2d035d795f02ad860848c7133aee5aefe839b7af
[safe/jmp/linux-2.6] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52
53
54 #include "bnx2x.h"
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_dump.h"
58
59 #define DRV_MODULE_VERSION      "1.48.114-1"
60 #define DRV_MODULE_RELDATE      "2009/07/29"
61 #define BNX2X_BC_VER            0x040200
62
63 #include <linux/firmware.h>
64 #include "bnx2x_fw_file_hdr.h"
65 /* FW files */
66 #define FW_FILE_PREFIX_E1               "bnx2x-e1-"
67 #define FW_FILE_PREFIX_E1H              "bnx2x-e1h-"
68
69 /* Time in jiffies before concluding the transmitter is hung */
70 #define TX_TIMEOUT              (5*HZ)
71
72 static char version[] __devinitdata =
73         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
74         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
76 MODULE_AUTHOR("Eliezer Tamir");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
80
81 static int multi_mode = 1;
82 module_param(multi_mode, int, 0);
83 MODULE_PARM_DESC(multi_mode, " Use per-CPU queues");
84
85 static int disable_tpa;
86 module_param(disable_tpa, int, 0);
87 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
88
89 static int int_mode;
90 module_param(int_mode, int, 0);
91 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
92
93 static int poll;
94 module_param(poll, int, 0);
95 MODULE_PARM_DESC(poll, " Use polling (for debug)");
96
97 static int mrrs = -1;
98 module_param(mrrs, int, 0);
99 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
100
101 static int debug;
102 module_param(debug, int, 0);
103 MODULE_PARM_DESC(debug, " Default debug msglevel");
104
105 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
106
107 static struct workqueue_struct *bnx2x_wq;
108
109 enum bnx2x_board_type {
110         BCM57710 = 0,
111         BCM57711 = 1,
112         BCM57711E = 2,
113 };
114
115 /* indexed by board_type, above */
116 static struct {
117         char *name;
118 } board_info[] __devinitdata = {
119         { "Broadcom NetXtreme II BCM57710 XGb" },
120         { "Broadcom NetXtreme II BCM57711 XGb" },
121         { "Broadcom NetXtreme II BCM57711E XGb" }
122 };
123
124
125 static const struct pci_device_id bnx2x_pci_tbl[] = {
126         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
127                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
128         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
129                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
130         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
131                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
132         { 0 }
133 };
134
135 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
136
137 /****************************************************************************
138 * General service functions
139 ****************************************************************************/
140
141 /* used only at init
142  * locking is done by mcp
143  */
144 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
145 {
146         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
147         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
148         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
149                                PCICFG_VENDOR_ID_OFFSET);
150 }
151
152 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
153 {
154         u32 val;
155
156         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
157         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
158         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
159                                PCICFG_VENDOR_ID_OFFSET);
160
161         return val;
162 }
163
164 static const u32 dmae_reg_go_c[] = {
165         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
166         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
167         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
168         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
169 };
170
171 /* copy command into DMAE command memory and set DMAE command go */
172 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
173                             int idx)
174 {
175         u32 cmd_offset;
176         int i;
177
178         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
179         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
180                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
181
182                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
183                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
184         }
185         REG_WR(bp, dmae_reg_go_c[idx], 1);
186 }
187
188 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
189                       u32 len32)
190 {
191         struct dmae_command *dmae = &bp->init_dmae;
192         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
193         int cnt = 200;
194
195         if (!bp->dmae_ready) {
196                 u32 *data = bnx2x_sp(bp, wb_data[0]);
197
198                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
199                    "  using indirect\n", dst_addr, len32);
200                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
201                 return;
202         }
203
204         mutex_lock(&bp->dmae_mutex);
205
206         memset(dmae, 0, sizeof(struct dmae_command));
207
208         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
209                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
210                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
211 #ifdef __BIG_ENDIAN
212                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
213 #else
214                         DMAE_CMD_ENDIANITY_DW_SWAP |
215 #endif
216                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
217                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
218         dmae->src_addr_lo = U64_LO(dma_addr);
219         dmae->src_addr_hi = U64_HI(dma_addr);
220         dmae->dst_addr_lo = dst_addr >> 2;
221         dmae->dst_addr_hi = 0;
222         dmae->len = len32;
223         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
224         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
225         dmae->comp_val = DMAE_COMP_VAL;
226
227         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
228            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
229                     "dst_addr [%x:%08x (%08x)]\n"
230            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
231            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
232            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
233            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
234         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
235            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
236            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
237
238         *wb_comp = 0;
239
240         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
241
242         udelay(5);
243
244         while (*wb_comp != DMAE_COMP_VAL) {
245                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
246
247                 if (!cnt) {
248                         BNX2X_ERR("DMAE timeout!\n");
249                         break;
250                 }
251                 cnt--;
252                 /* adjust delay for emulation/FPGA */
253                 if (CHIP_REV_IS_SLOW(bp))
254                         msleep(100);
255                 else
256                         udelay(5);
257         }
258
259         mutex_unlock(&bp->dmae_mutex);
260 }
261
262 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
263 {
264         struct dmae_command *dmae = &bp->init_dmae;
265         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
266         int cnt = 200;
267
268         if (!bp->dmae_ready) {
269                 u32 *data = bnx2x_sp(bp, wb_data[0]);
270                 int i;
271
272                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
273                    "  using indirect\n", src_addr, len32);
274                 for (i = 0; i < len32; i++)
275                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
276                 return;
277         }
278
279         mutex_lock(&bp->dmae_mutex);
280
281         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
282         memset(dmae, 0, sizeof(struct dmae_command));
283
284         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
285                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
286                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
287 #ifdef __BIG_ENDIAN
288                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
289 #else
290                         DMAE_CMD_ENDIANITY_DW_SWAP |
291 #endif
292                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
293                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
294         dmae->src_addr_lo = src_addr >> 2;
295         dmae->src_addr_hi = 0;
296         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
297         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
298         dmae->len = len32;
299         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
300         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
301         dmae->comp_val = DMAE_COMP_VAL;
302
303         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
304            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
305                     "dst_addr [%x:%08x (%08x)]\n"
306            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
307            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
308            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
309            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
310
311         *wb_comp = 0;
312
313         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
314
315         udelay(5);
316
317         while (*wb_comp != DMAE_COMP_VAL) {
318
319                 if (!cnt) {
320                         BNX2X_ERR("DMAE timeout!\n");
321                         break;
322                 }
323                 cnt--;
324                 /* adjust delay for emulation/FPGA */
325                 if (CHIP_REV_IS_SLOW(bp))
326                         msleep(100);
327                 else
328                         udelay(5);
329         }
330         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
331            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
332            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
333
334         mutex_unlock(&bp->dmae_mutex);
335 }
336
337 /* used only for slowpath so not inlined */
338 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
339 {
340         u32 wb_write[2];
341
342         wb_write[0] = val_hi;
343         wb_write[1] = val_lo;
344         REG_WR_DMAE(bp, reg, wb_write, 2);
345 }
346
347 #ifdef USE_WB_RD
348 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
349 {
350         u32 wb_data[2];
351
352         REG_RD_DMAE(bp, reg, wb_data, 2);
353
354         return HILO_U64(wb_data[0], wb_data[1]);
355 }
356 #endif
357
358 static int bnx2x_mc_assert(struct bnx2x *bp)
359 {
360         char last_idx;
361         int i, rc = 0;
362         u32 row0, row1, row2, row3;
363
364         /* XSTORM */
365         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
366                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
367         if (last_idx)
368                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
369
370         /* print the asserts */
371         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
372
373                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
374                               XSTORM_ASSERT_LIST_OFFSET(i));
375                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
376                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
377                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
378                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
379                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
380                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
381
382                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
383                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
384                                   " 0x%08x 0x%08x 0x%08x\n",
385                                   i, row3, row2, row1, row0);
386                         rc++;
387                 } else {
388                         break;
389                 }
390         }
391
392         /* TSTORM */
393         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
394                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
395         if (last_idx)
396                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
397
398         /* print the asserts */
399         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
400
401                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
402                               TSTORM_ASSERT_LIST_OFFSET(i));
403                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
404                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
405                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
406                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
407                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
408                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
409
410                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
411                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
412                                   " 0x%08x 0x%08x 0x%08x\n",
413                                   i, row3, row2, row1, row0);
414                         rc++;
415                 } else {
416                         break;
417                 }
418         }
419
420         /* CSTORM */
421         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
422                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
423         if (last_idx)
424                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
425
426         /* print the asserts */
427         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
428
429                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
430                               CSTORM_ASSERT_LIST_OFFSET(i));
431                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
432                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
433                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
434                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
435                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
436                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
437
438                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
439                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
440                                   " 0x%08x 0x%08x 0x%08x\n",
441                                   i, row3, row2, row1, row0);
442                         rc++;
443                 } else {
444                         break;
445                 }
446         }
447
448         /* USTORM */
449         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
450                            USTORM_ASSERT_LIST_INDEX_OFFSET);
451         if (last_idx)
452                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
453
454         /* print the asserts */
455         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
456
457                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
458                               USTORM_ASSERT_LIST_OFFSET(i));
459                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
460                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
461                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
462                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
463                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
464                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
465
466                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
467                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
468                                   " 0x%08x 0x%08x 0x%08x\n",
469                                   i, row3, row2, row1, row0);
470                         rc++;
471                 } else {
472                         break;
473                 }
474         }
475
476         return rc;
477 }
478
479 static void bnx2x_fw_dump(struct bnx2x *bp)
480 {
481         u32 mark, offset;
482         __be32 data[9];
483         int word;
484
485         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
486         mark = ((mark + 0x3) & ~0x3);
487         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
488
489         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
490                 for (word = 0; word < 8; word++)
491                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
492                                                   offset + 4*word));
493                 data[8] = 0x0;
494                 printk(KERN_CONT "%s", (char *)data);
495         }
496         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
497                 for (word = 0; word < 8; word++)
498                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
499                                                   offset + 4*word));
500                 data[8] = 0x0;
501                 printk(KERN_CONT "%s", (char *)data);
502         }
503         printk("\n" KERN_ERR PFX "end of fw dump\n");
504 }
505
506 static void bnx2x_panic_dump(struct bnx2x *bp)
507 {
508         int i;
509         u16 j, start, end;
510
511         bp->stats_state = STATS_STATE_DISABLED;
512         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
513
514         BNX2X_ERR("begin crash dump -----------------\n");
515
516         /* Indices */
517         /* Common */
518         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
519                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
520                   "  spq_prod_idx(%u)\n",
521                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
522                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
523
524         /* Rx */
525         for_each_rx_queue(bp, i) {
526                 struct bnx2x_fastpath *fp = &bp->fp[i];
527
528                 BNX2X_ERR("fp%d: rx_bd_prod(%x)  rx_bd_cons(%x)"
529                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
530                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
531                           i, fp->rx_bd_prod, fp->rx_bd_cons,
532                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
533                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
534                 BNX2X_ERR("      rx_sge_prod(%x)  last_max_sge(%x)"
535                           "  fp_u_idx(%x) *sb_u_idx(%x)\n",
536                           fp->rx_sge_prod, fp->last_max_sge,
537                           le16_to_cpu(fp->fp_u_idx),
538                           fp->status_blk->u_status_block.status_block_index);
539         }
540
541         /* Tx */
542         for_each_tx_queue(bp, i) {
543                 struct bnx2x_fastpath *fp = &bp->fp[i];
544                 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
545
546                 BNX2X_ERR("fp%d: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
547                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
548                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
549                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
550                 BNX2X_ERR("      fp_c_idx(%x)  *sb_c_idx(%x)"
551                           "  bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx),
552                           fp->status_blk->c_status_block.status_block_index,
553                           hw_prods->packets_prod, hw_prods->bds_prod);
554         }
555
556         /* Rings */
557         /* Rx */
558         for_each_rx_queue(bp, i) {
559                 struct bnx2x_fastpath *fp = &bp->fp[i];
560
561                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
562                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
563                 for (j = start; j != end; j = RX_BD(j + 1)) {
564                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
565                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
566
567                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
568                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
569                 }
570
571                 start = RX_SGE(fp->rx_sge_prod);
572                 end = RX_SGE(fp->last_max_sge);
573                 for (j = start; j != end; j = RX_SGE(j + 1)) {
574                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
575                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
576
577                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
578                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
579                 }
580
581                 start = RCQ_BD(fp->rx_comp_cons - 10);
582                 end = RCQ_BD(fp->rx_comp_cons + 503);
583                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
584                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
585
586                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
587                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
588                 }
589         }
590
591         /* Tx */
592         for_each_tx_queue(bp, i) {
593                 struct bnx2x_fastpath *fp = &bp->fp[i];
594
595                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
596                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
597                 for (j = start; j != end; j = TX_BD(j + 1)) {
598                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
599
600                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
601                                   i, j, sw_bd->skb, sw_bd->first_bd);
602                 }
603
604                 start = TX_BD(fp->tx_bd_cons - 10);
605                 end = TX_BD(fp->tx_bd_cons + 254);
606                 for (j = start; j != end; j = TX_BD(j + 1)) {
607                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
608
609                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
610                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
611                 }
612         }
613
614         bnx2x_fw_dump(bp);
615         bnx2x_mc_assert(bp);
616         BNX2X_ERR("end crash dump -----------------\n");
617 }
618
619 static void bnx2x_int_enable(struct bnx2x *bp)
620 {
621         int port = BP_PORT(bp);
622         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
623         u32 val = REG_RD(bp, addr);
624         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
625         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
626
627         if (msix) {
628                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
629                          HC_CONFIG_0_REG_INT_LINE_EN_0);
630                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
631                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
632         } else if (msi) {
633                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
634                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
635                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
636                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
637         } else {
638                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
639                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
640                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
641                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
642
643                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
644                    val, port, addr);
645
646                 REG_WR(bp, addr, val);
647
648                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
649         }
650
651         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
652            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
653
654         REG_WR(bp, addr, val);
655         /*
656          * Ensure that HC_CONFIG is written before leading/trailing edge config
657          */
658         mmiowb();
659         barrier();
660
661         if (CHIP_IS_E1H(bp)) {
662                 /* init leading/trailing edge */
663                 if (IS_E1HMF(bp)) {
664                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
665                         if (bp->port.pmf)
666                                 /* enable nig and gpio3 attention */
667                                 val |= 0x1100;
668                 } else
669                         val = 0xffff;
670
671                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
672                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
673         }
674
675         /* Make sure that interrupts are indeed enabled from here on */
676         mmiowb();
677 }
678
679 static void bnx2x_int_disable(struct bnx2x *bp)
680 {
681         int port = BP_PORT(bp);
682         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
683         u32 val = REG_RD(bp, addr);
684
685         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
686                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
687                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
688                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
689
690         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
691            val, port, addr);
692
693         /* flush all outstanding writes */
694         mmiowb();
695
696         REG_WR(bp, addr, val);
697         if (REG_RD(bp, addr) != val)
698                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
699
700 }
701
702 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
703 {
704         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
705         int i, offset;
706
707         /* disable interrupt handling */
708         atomic_inc(&bp->intr_sem);
709         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
710
711         if (disable_hw)
712                 /* prevent the HW from sending interrupts */
713                 bnx2x_int_disable(bp);
714
715         /* make sure all ISRs are done */
716         if (msix) {
717                 synchronize_irq(bp->msix_table[0].vector);
718                 offset = 1;
719                 for_each_queue(bp, i)
720                         synchronize_irq(bp->msix_table[i + offset].vector);
721         } else
722                 synchronize_irq(bp->pdev->irq);
723
724         /* make sure sp_task is not running */
725         cancel_delayed_work(&bp->sp_task);
726         flush_workqueue(bnx2x_wq);
727 }
728
729 /* fast path */
730
731 /*
732  * General service functions
733  */
734
735 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
736                                 u8 storm, u16 index, u8 op, u8 update)
737 {
738         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
739                        COMMAND_REG_INT_ACK);
740         struct igu_ack_register igu_ack;
741
742         igu_ack.status_block_index = index;
743         igu_ack.sb_id_and_flags =
744                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
745                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
746                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
747                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
748
749         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
750            (*(u32 *)&igu_ack), hc_addr);
751         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
752
753         /* Make sure that ACK is written */
754         mmiowb();
755         barrier();
756 }
757
758 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
759 {
760         struct host_status_block *fpsb = fp->status_blk;
761         u16 rc = 0;
762
763         barrier(); /* status block is written to by the chip */
764         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
765                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
766                 rc |= 1;
767         }
768         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
769                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
770                 rc |= 2;
771         }
772         return rc;
773 }
774
775 static u16 bnx2x_ack_int(struct bnx2x *bp)
776 {
777         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
778                        COMMAND_REG_SIMD_MASK);
779         u32 result = REG_RD(bp, hc_addr);
780
781         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
782            result, hc_addr);
783
784         return result;
785 }
786
787
788 /*
789  * fast path service functions
790  */
791
792 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
793 {
794         u16 tx_cons_sb;
795
796         /* Tell compiler that status block fields can change */
797         barrier();
798         tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
799         return (fp->tx_pkt_cons != tx_cons_sb);
800 }
801
802 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
803 {
804         /* Tell compiler that consumer and producer can change */
805         barrier();
806         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
807 }
808
809 /* free skb in the packet ring at pos idx
810  * return idx of last bd freed
811  */
812 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
813                              u16 idx)
814 {
815         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
816         struct eth_tx_bd *tx_bd;
817         struct sk_buff *skb = tx_buf->skb;
818         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
819         int nbd;
820
821         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
822            idx, tx_buf, skb);
823
824         /* unmap first bd */
825         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
826         tx_bd = &fp->tx_desc_ring[bd_idx];
827         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
828                          BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
829
830         nbd = le16_to_cpu(tx_bd->nbd) - 1;
831         new_cons = nbd + tx_buf->first_bd;
832 #ifdef BNX2X_STOP_ON_ERROR
833         if (nbd > (MAX_SKB_FRAGS + 2)) {
834                 BNX2X_ERR("BAD nbd!\n");
835                 bnx2x_panic();
836         }
837 #endif
838
839         /* Skip a parse bd and the TSO split header bd
840            since they have no mapping */
841         if (nbd)
842                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
843
844         if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
845                                            ETH_TX_BD_FLAGS_TCP_CSUM |
846                                            ETH_TX_BD_FLAGS_SW_LSO)) {
847                 if (--nbd)
848                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
849                 tx_bd = &fp->tx_desc_ring[bd_idx];
850                 /* is this a TSO split header bd? */
851                 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
852                         if (--nbd)
853                                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
854                 }
855         }
856
857         /* now free frags */
858         while (nbd > 0) {
859
860                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
861                 tx_bd = &fp->tx_desc_ring[bd_idx];
862                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
863                                BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
864                 if (--nbd)
865                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
866         }
867
868         /* release skb */
869         WARN_ON(!skb);
870         dev_kfree_skb(skb);
871         tx_buf->first_bd = 0;
872         tx_buf->skb = NULL;
873
874         return new_cons;
875 }
876
877 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
878 {
879         s16 used;
880         u16 prod;
881         u16 cons;
882
883         barrier(); /* Tell compiler that prod and cons can change */
884         prod = fp->tx_bd_prod;
885         cons = fp->tx_bd_cons;
886
887         /* NUM_TX_RINGS = number of "next-page" entries
888            It will be used as a threshold */
889         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
890
891 #ifdef BNX2X_STOP_ON_ERROR
892         WARN_ON(used < 0);
893         WARN_ON(used > fp->bp->tx_ring_size);
894         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
895 #endif
896
897         return (s16)(fp->bp->tx_ring_size) - used;
898 }
899
900 static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
901 {
902         struct bnx2x *bp = fp->bp;
903         struct netdev_queue *txq;
904         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
905         int done = 0;
906
907 #ifdef BNX2X_STOP_ON_ERROR
908         if (unlikely(bp->panic))
909                 return;
910 #endif
911
912         txq = netdev_get_tx_queue(bp->dev, fp->index);
913         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
914         sw_cons = fp->tx_pkt_cons;
915
916         while (sw_cons != hw_cons) {
917                 u16 pkt_cons;
918
919                 pkt_cons = TX_BD(sw_cons);
920
921                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
922
923                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
924                    hw_cons, sw_cons, pkt_cons);
925
926 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
927                         rmb();
928                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
929                 }
930 */
931                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
932                 sw_cons++;
933                 done++;
934         }
935
936         fp->tx_pkt_cons = sw_cons;
937         fp->tx_bd_cons = bd_cons;
938
939         /* TBD need a thresh? */
940         if (unlikely(netif_tx_queue_stopped(txq))) {
941
942                 __netif_tx_lock(txq, smp_processor_id());
943
944                 /* Need to make the tx_bd_cons update visible to start_xmit()
945                  * before checking for netif_tx_queue_stopped().  Without the
946                  * memory barrier, there is a small possibility that
947                  * start_xmit() will miss it and cause the queue to be stopped
948                  * forever.
949                  */
950                 smp_mb();
951
952                 if ((netif_tx_queue_stopped(txq)) &&
953                     (bp->state == BNX2X_STATE_OPEN) &&
954                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
955                         netif_tx_wake_queue(txq);
956
957                 __netif_tx_unlock(txq);
958         }
959 }
960
961
962 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
963                            union eth_rx_cqe *rr_cqe)
964 {
965         struct bnx2x *bp = fp->bp;
966         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
967         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
968
969         DP(BNX2X_MSG_SP,
970            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
971            fp->index, cid, command, bp->state,
972            rr_cqe->ramrod_cqe.ramrod_type);
973
974         bp->spq_left++;
975
976         if (fp->index) {
977                 switch (command | fp->state) {
978                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
979                                                 BNX2X_FP_STATE_OPENING):
980                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
981                            cid);
982                         fp->state = BNX2X_FP_STATE_OPEN;
983                         break;
984
985                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
986                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
987                            cid);
988                         fp->state = BNX2X_FP_STATE_HALTED;
989                         break;
990
991                 default:
992                         BNX2X_ERR("unexpected MC reply (%d)  "
993                                   "fp->state is %x\n", command, fp->state);
994                         break;
995                 }
996                 mb(); /* force bnx2x_wait_ramrod() to see the change */
997                 return;
998         }
999
1000         switch (command | bp->state) {
1001         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1002                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1003                 bp->state = BNX2X_STATE_OPEN;
1004                 break;
1005
1006         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1007                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1008                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1009                 fp->state = BNX2X_FP_STATE_HALTED;
1010                 break;
1011
1012         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1013                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1014                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1015                 break;
1016
1017
1018         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1019         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1020                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1021                 bp->set_mac_pending = 0;
1022                 break;
1023
1024         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1025                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1026                 break;
1027
1028         default:
1029                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
1030                           command, bp->state);
1031                 break;
1032         }
1033         mb(); /* force bnx2x_wait_ramrod() to see the change */
1034 }
1035
1036 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1037                                      struct bnx2x_fastpath *fp, u16 index)
1038 {
1039         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1040         struct page *page = sw_buf->page;
1041         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1042
1043         /* Skip "next page" elements */
1044         if (!page)
1045                 return;
1046
1047         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1048                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1049         __free_pages(page, PAGES_PER_SGE_SHIFT);
1050
1051         sw_buf->page = NULL;
1052         sge->addr_hi = 0;
1053         sge->addr_lo = 0;
1054 }
1055
1056 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1057                                            struct bnx2x_fastpath *fp, int last)
1058 {
1059         int i;
1060
1061         for (i = 0; i < last; i++)
1062                 bnx2x_free_rx_sge(bp, fp, i);
1063 }
1064
1065 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1066                                      struct bnx2x_fastpath *fp, u16 index)
1067 {
1068         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1069         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1070         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1071         dma_addr_t mapping;
1072
1073         if (unlikely(page == NULL))
1074                 return -ENOMEM;
1075
1076         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1077                                PCI_DMA_FROMDEVICE);
1078         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1079                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1080                 return -ENOMEM;
1081         }
1082
1083         sw_buf->page = page;
1084         pci_unmap_addr_set(sw_buf, mapping, mapping);
1085
1086         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1087         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1088
1089         return 0;
1090 }
1091
1092 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1093                                      struct bnx2x_fastpath *fp, u16 index)
1094 {
1095         struct sk_buff *skb;
1096         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1097         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1098         dma_addr_t mapping;
1099
1100         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1101         if (unlikely(skb == NULL))
1102                 return -ENOMEM;
1103
1104         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1105                                  PCI_DMA_FROMDEVICE);
1106         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1107                 dev_kfree_skb(skb);
1108                 return -ENOMEM;
1109         }
1110
1111         rx_buf->skb = skb;
1112         pci_unmap_addr_set(rx_buf, mapping, mapping);
1113
1114         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1115         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1116
1117         return 0;
1118 }
1119
1120 /* note that we are not allocating a new skb,
1121  * we are just moving one from cons to prod
1122  * we are not creating a new mapping,
1123  * so there is no need to check for dma_mapping_error().
1124  */
1125 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1126                                struct sk_buff *skb, u16 cons, u16 prod)
1127 {
1128         struct bnx2x *bp = fp->bp;
1129         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1130         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1131         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1132         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1133
1134         pci_dma_sync_single_for_device(bp->pdev,
1135                                        pci_unmap_addr(cons_rx_buf, mapping),
1136                                        RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1137
1138         prod_rx_buf->skb = cons_rx_buf->skb;
1139         pci_unmap_addr_set(prod_rx_buf, mapping,
1140                            pci_unmap_addr(cons_rx_buf, mapping));
1141         *prod_bd = *cons_bd;
1142 }
1143
1144 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1145                                              u16 idx)
1146 {
1147         u16 last_max = fp->last_max_sge;
1148
1149         if (SUB_S16(idx, last_max) > 0)
1150                 fp->last_max_sge = idx;
1151 }
1152
1153 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1154 {
1155         int i, j;
1156
1157         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1158                 int idx = RX_SGE_CNT * i - 1;
1159
1160                 for (j = 0; j < 2; j++) {
1161                         SGE_MASK_CLEAR_BIT(fp, idx);
1162                         idx--;
1163                 }
1164         }
1165 }
1166
1167 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1168                                   struct eth_fast_path_rx_cqe *fp_cqe)
1169 {
1170         struct bnx2x *bp = fp->bp;
1171         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1172                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1173                       SGE_PAGE_SHIFT;
1174         u16 last_max, last_elem, first_elem;
1175         u16 delta = 0;
1176         u16 i;
1177
1178         if (!sge_len)
1179                 return;
1180
1181         /* First mark all used pages */
1182         for (i = 0; i < sge_len; i++)
1183                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1184
1185         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1186            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1187
1188         /* Here we assume that the last SGE index is the biggest */
1189         prefetch((void *)(fp->sge_mask));
1190         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1191
1192         last_max = RX_SGE(fp->last_max_sge);
1193         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1194         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1195
1196         /* If ring is not full */
1197         if (last_elem + 1 != first_elem)
1198                 last_elem++;
1199
1200         /* Now update the prod */
1201         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1202                 if (likely(fp->sge_mask[i]))
1203                         break;
1204
1205                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1206                 delta += RX_SGE_MASK_ELEM_SZ;
1207         }
1208
1209         if (delta > 0) {
1210                 fp->rx_sge_prod += delta;
1211                 /* clear page-end entries */
1212                 bnx2x_clear_sge_mask_next_elems(fp);
1213         }
1214
1215         DP(NETIF_MSG_RX_STATUS,
1216            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1217            fp->last_max_sge, fp->rx_sge_prod);
1218 }
1219
1220 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1221 {
1222         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1223         memset(fp->sge_mask, 0xff,
1224                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1225
1226         /* Clear the two last indices in the page to 1:
1227            these are the indices that correspond to the "next" element,
1228            hence will never be indicated and should be removed from
1229            the calculations. */
1230         bnx2x_clear_sge_mask_next_elems(fp);
1231 }
1232
1233 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1234                             struct sk_buff *skb, u16 cons, u16 prod)
1235 {
1236         struct bnx2x *bp = fp->bp;
1237         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1238         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1239         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1240         dma_addr_t mapping;
1241
1242         /* move empty skb from pool to prod and map it */
1243         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1244         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1245                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1246         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1247
1248         /* move partial skb from cons to pool (don't unmap yet) */
1249         fp->tpa_pool[queue] = *cons_rx_buf;
1250
1251         /* mark bin state as start - print error if current state != stop */
1252         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1253                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1254
1255         fp->tpa_state[queue] = BNX2X_TPA_START;
1256
1257         /* point prod_bd to new skb */
1258         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1259         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1260
1261 #ifdef BNX2X_STOP_ON_ERROR
1262         fp->tpa_queue_used |= (1 << queue);
1263 #ifdef __powerpc64__
1264         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1265 #else
1266         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1267 #endif
1268            fp->tpa_queue_used);
1269 #endif
1270 }
1271
1272 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1273                                struct sk_buff *skb,
1274                                struct eth_fast_path_rx_cqe *fp_cqe,
1275                                u16 cqe_idx)
1276 {
1277         struct sw_rx_page *rx_pg, old_rx_pg;
1278         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1279         u32 i, frag_len, frag_size, pages;
1280         int err;
1281         int j;
1282
1283         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1284         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1285
1286         /* This is needed in order to enable forwarding support */
1287         if (frag_size)
1288                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1289                                                max(frag_size, (u32)len_on_bd));
1290
1291 #ifdef BNX2X_STOP_ON_ERROR
1292         if (pages >
1293             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1294                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1295                           pages, cqe_idx);
1296                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1297                           fp_cqe->pkt_len, len_on_bd);
1298                 bnx2x_panic();
1299                 return -EINVAL;
1300         }
1301 #endif
1302
1303         /* Run through the SGL and compose the fragmented skb */
1304         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1305                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1306
1307                 /* FW gives the indices of the SGE as if the ring is an array
1308                    (meaning that "next" element will consume 2 indices) */
1309                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1310                 rx_pg = &fp->rx_page_ring[sge_idx];
1311                 old_rx_pg = *rx_pg;
1312
1313                 /* If we fail to allocate a substitute page, we simply stop
1314                    where we are and drop the whole packet */
1315                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1316                 if (unlikely(err)) {
1317                         fp->eth_q_stats.rx_skb_alloc_failed++;
1318                         return err;
1319                 }
1320
1321                 /* Unmap the page as we r going to pass it to the stack */
1322                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1323                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1324
1325                 /* Add one frag and update the appropriate fields in the skb */
1326                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1327
1328                 skb->data_len += frag_len;
1329                 skb->truesize += frag_len;
1330                 skb->len += frag_len;
1331
1332                 frag_size -= frag_len;
1333         }
1334
1335         return 0;
1336 }
1337
1338 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1339                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1340                            u16 cqe_idx)
1341 {
1342         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1343         struct sk_buff *skb = rx_buf->skb;
1344         /* alloc new skb */
1345         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1346
1347         /* Unmap skb in the pool anyway, as we are going to change
1348            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1349            fails. */
1350         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1351                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1352
1353         if (likely(new_skb)) {
1354                 /* fix ip xsum and give it to the stack */
1355                 /* (no need to map the new skb) */
1356 #ifdef BCM_VLAN
1357                 int is_vlan_cqe =
1358                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1359                          PARSING_FLAGS_VLAN);
1360                 int is_not_hwaccel_vlan_cqe =
1361                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1362 #endif
1363
1364                 prefetch(skb);
1365                 prefetch(((char *)(skb)) + 128);
1366
1367 #ifdef BNX2X_STOP_ON_ERROR
1368                 if (pad + len > bp->rx_buf_size) {
1369                         BNX2X_ERR("skb_put is about to fail...  "
1370                                   "pad %d  len %d  rx_buf_size %d\n",
1371                                   pad, len, bp->rx_buf_size);
1372                         bnx2x_panic();
1373                         return;
1374                 }
1375 #endif
1376
1377                 skb_reserve(skb, pad);
1378                 skb_put(skb, len);
1379
1380                 skb->protocol = eth_type_trans(skb, bp->dev);
1381                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1382
1383                 {
1384                         struct iphdr *iph;
1385
1386                         iph = (struct iphdr *)skb->data;
1387 #ifdef BCM_VLAN
1388                         /* If there is no Rx VLAN offloading -
1389                            take VLAN tag into an account */
1390                         if (unlikely(is_not_hwaccel_vlan_cqe))
1391                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1392 #endif
1393                         iph->check = 0;
1394                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1395                 }
1396
1397                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1398                                          &cqe->fast_path_cqe, cqe_idx)) {
1399 #ifdef BCM_VLAN
1400                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1401                             (!is_not_hwaccel_vlan_cqe))
1402                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1403                                                 le16_to_cpu(cqe->fast_path_cqe.
1404                                                             vlan_tag));
1405                         else
1406 #endif
1407                                 netif_receive_skb(skb);
1408                 } else {
1409                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1410                            " - dropping packet!\n");
1411                         dev_kfree_skb(skb);
1412                 }
1413
1414
1415                 /* put new skb in bin */
1416                 fp->tpa_pool[queue].skb = new_skb;
1417
1418         } else {
1419                 /* else drop the packet and keep the buffer in the bin */
1420                 DP(NETIF_MSG_RX_STATUS,
1421                    "Failed to allocate new skb - dropping packet!\n");
1422                 fp->eth_q_stats.rx_skb_alloc_failed++;
1423         }
1424
1425         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1426 }
1427
1428 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1429                                         struct bnx2x_fastpath *fp,
1430                                         u16 bd_prod, u16 rx_comp_prod,
1431                                         u16 rx_sge_prod)
1432 {
1433         struct ustorm_eth_rx_producers rx_prods = {0};
1434         int i;
1435
1436         /* Update producers */
1437         rx_prods.bd_prod = bd_prod;
1438         rx_prods.cqe_prod = rx_comp_prod;
1439         rx_prods.sge_prod = rx_sge_prod;
1440
1441         /*
1442          * Make sure that the BD and SGE data is updated before updating the
1443          * producers since FW might read the BD/SGE right after the producer
1444          * is updated.
1445          * This is only applicable for weak-ordered memory model archs such
1446          * as IA-64. The following barrier is also mandatory since FW will
1447          * assumes BDs must have buffers.
1448          */
1449         wmb();
1450
1451         for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1452                 REG_WR(bp, BAR_USTRORM_INTMEM +
1453                        USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1454                        ((u32 *)&rx_prods)[i]);
1455
1456         mmiowb(); /* keep prod updates ordered */
1457
1458         DP(NETIF_MSG_RX_STATUS,
1459            "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
1460            fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1461 }
1462
1463 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1464 {
1465         struct bnx2x *bp = fp->bp;
1466         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1467         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1468         int rx_pkt = 0;
1469
1470 #ifdef BNX2X_STOP_ON_ERROR
1471         if (unlikely(bp->panic))
1472                 return 0;
1473 #endif
1474
1475         /* CQ "next element" is of the size of the regular element,
1476            that's why it's ok here */
1477         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1478         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1479                 hw_comp_cons++;
1480
1481         bd_cons = fp->rx_bd_cons;
1482         bd_prod = fp->rx_bd_prod;
1483         bd_prod_fw = bd_prod;
1484         sw_comp_cons = fp->rx_comp_cons;
1485         sw_comp_prod = fp->rx_comp_prod;
1486
1487         /* Memory barrier necessary as speculative reads of the rx
1488          * buffer can be ahead of the index in the status block
1489          */
1490         rmb();
1491
1492         DP(NETIF_MSG_RX_STATUS,
1493            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1494            fp->index, hw_comp_cons, sw_comp_cons);
1495
1496         while (sw_comp_cons != hw_comp_cons) {
1497                 struct sw_rx_bd *rx_buf = NULL;
1498                 struct sk_buff *skb;
1499                 union eth_rx_cqe *cqe;
1500                 u8 cqe_fp_flags;
1501                 u16 len, pad;
1502
1503                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1504                 bd_prod = RX_BD(bd_prod);
1505                 bd_cons = RX_BD(bd_cons);
1506
1507                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1508                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1509
1510                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1511                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1512                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1513                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1514                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1515                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1516
1517                 /* is this a slowpath msg? */
1518                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1519                         bnx2x_sp_event(fp, cqe);
1520                         goto next_cqe;
1521
1522                 /* this is an rx packet */
1523                 } else {
1524                         rx_buf = &fp->rx_buf_ring[bd_cons];
1525                         skb = rx_buf->skb;
1526                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1527                         pad = cqe->fast_path_cqe.placement_offset;
1528
1529                         /* If CQE is marked both TPA_START and TPA_END
1530                            it is a non-TPA CQE */
1531                         if ((!fp->disable_tpa) &&
1532                             (TPA_TYPE(cqe_fp_flags) !=
1533                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1534                                 u16 queue = cqe->fast_path_cqe.queue_index;
1535
1536                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1537                                         DP(NETIF_MSG_RX_STATUS,
1538                                            "calling tpa_start on queue %d\n",
1539                                            queue);
1540
1541                                         bnx2x_tpa_start(fp, queue, skb,
1542                                                         bd_cons, bd_prod);
1543                                         goto next_rx;
1544                                 }
1545
1546                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1547                                         DP(NETIF_MSG_RX_STATUS,
1548                                            "calling tpa_stop on queue %d\n",
1549                                            queue);
1550
1551                                         if (!BNX2X_RX_SUM_FIX(cqe))
1552                                                 BNX2X_ERR("STOP on none TCP "
1553                                                           "data\n");
1554
1555                                         /* This is a size of the linear data
1556                                            on this skb */
1557                                         len = le16_to_cpu(cqe->fast_path_cqe.
1558                                                                 len_on_bd);
1559                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1560                                                     len, cqe, comp_ring_cons);
1561 #ifdef BNX2X_STOP_ON_ERROR
1562                                         if (bp->panic)
1563                                                 return 0;
1564 #endif
1565
1566                                         bnx2x_update_sge_prod(fp,
1567                                                         &cqe->fast_path_cqe);
1568                                         goto next_cqe;
1569                                 }
1570                         }
1571
1572                         pci_dma_sync_single_for_device(bp->pdev,
1573                                         pci_unmap_addr(rx_buf, mapping),
1574                                                        pad + RX_COPY_THRESH,
1575                                                        PCI_DMA_FROMDEVICE);
1576                         prefetch(skb);
1577                         prefetch(((char *)(skb)) + 128);
1578
1579                         /* is this an error packet? */
1580                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1581                                 DP(NETIF_MSG_RX_ERR,
1582                                    "ERROR  flags %x  rx packet %u\n",
1583                                    cqe_fp_flags, sw_comp_cons);
1584                                 fp->eth_q_stats.rx_err_discard_pkt++;
1585                                 goto reuse_rx;
1586                         }
1587
1588                         /* Since we don't have a jumbo ring
1589                          * copy small packets if mtu > 1500
1590                          */
1591                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1592                             (len <= RX_COPY_THRESH)) {
1593                                 struct sk_buff *new_skb;
1594
1595                                 new_skb = netdev_alloc_skb(bp->dev,
1596                                                            len + pad);
1597                                 if (new_skb == NULL) {
1598                                         DP(NETIF_MSG_RX_ERR,
1599                                            "ERROR  packet dropped "
1600                                            "because of alloc failure\n");
1601                                         fp->eth_q_stats.rx_skb_alloc_failed++;
1602                                         goto reuse_rx;
1603                                 }
1604
1605                                 /* aligned copy */
1606                                 skb_copy_from_linear_data_offset(skb, pad,
1607                                                     new_skb->data + pad, len);
1608                                 skb_reserve(new_skb, pad);
1609                                 skb_put(new_skb, len);
1610
1611                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1612
1613                                 skb = new_skb;
1614
1615                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1616                                 pci_unmap_single(bp->pdev,
1617                                         pci_unmap_addr(rx_buf, mapping),
1618                                                  bp->rx_buf_size,
1619                                                  PCI_DMA_FROMDEVICE);
1620                                 skb_reserve(skb, pad);
1621                                 skb_put(skb, len);
1622
1623                         } else {
1624                                 DP(NETIF_MSG_RX_ERR,
1625                                    "ERROR  packet dropped because "
1626                                    "of alloc failure\n");
1627                                 fp->eth_q_stats.rx_skb_alloc_failed++;
1628 reuse_rx:
1629                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1630                                 goto next_rx;
1631                         }
1632
1633                         skb->protocol = eth_type_trans(skb, bp->dev);
1634
1635                         skb->ip_summed = CHECKSUM_NONE;
1636                         if (bp->rx_csum) {
1637                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1638                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1639                                 else
1640                                         fp->eth_q_stats.hw_csum_err++;
1641                         }
1642                 }
1643
1644                 skb_record_rx_queue(skb, fp->index);
1645 #ifdef BCM_VLAN
1646                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1647                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1648                      PARSING_FLAGS_VLAN))
1649                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1650                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1651                 else
1652 #endif
1653                         netif_receive_skb(skb);
1654
1655
1656 next_rx:
1657                 rx_buf->skb = NULL;
1658
1659                 bd_cons = NEXT_RX_IDX(bd_cons);
1660                 bd_prod = NEXT_RX_IDX(bd_prod);
1661                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1662                 rx_pkt++;
1663 next_cqe:
1664                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1665                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1666
1667                 if (rx_pkt == budget)
1668                         break;
1669         } /* while */
1670
1671         fp->rx_bd_cons = bd_cons;
1672         fp->rx_bd_prod = bd_prod_fw;
1673         fp->rx_comp_cons = sw_comp_cons;
1674         fp->rx_comp_prod = sw_comp_prod;
1675
1676         /* Update producers */
1677         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1678                              fp->rx_sge_prod);
1679
1680         fp->rx_pkt += rx_pkt;
1681         fp->rx_calls++;
1682
1683         return rx_pkt;
1684 }
1685
1686 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1687 {
1688         struct bnx2x_fastpath *fp = fp_cookie;
1689         struct bnx2x *bp = fp->bp;
1690         int index = fp->index;
1691
1692         /* Return here if interrupt is disabled */
1693         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1694                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1695                 return IRQ_HANDLED;
1696         }
1697
1698         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1699            index, fp->sb_id);
1700         bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1701
1702 #ifdef BNX2X_STOP_ON_ERROR
1703         if (unlikely(bp->panic))
1704                 return IRQ_HANDLED;
1705 #endif
1706
1707         prefetch(fp->rx_cons_sb);
1708         prefetch(fp->tx_cons_sb);
1709         prefetch(&fp->status_blk->c_status_block.status_block_index);
1710         prefetch(&fp->status_blk->u_status_block.status_block_index);
1711
1712         napi_schedule(&bnx2x_fp(bp, index, napi));
1713
1714         return IRQ_HANDLED;
1715 }
1716
1717 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1718 {
1719         struct bnx2x *bp = netdev_priv(dev_instance);
1720         u16 status = bnx2x_ack_int(bp);
1721         u16 mask;
1722
1723         /* Return here if interrupt is shared and it's not for us */
1724         if (unlikely(status == 0)) {
1725                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1726                 return IRQ_NONE;
1727         }
1728         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1729
1730         /* Return here if interrupt is disabled */
1731         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1732                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1733                 return IRQ_HANDLED;
1734         }
1735
1736 #ifdef BNX2X_STOP_ON_ERROR
1737         if (unlikely(bp->panic))
1738                 return IRQ_HANDLED;
1739 #endif
1740
1741         mask = 0x2 << bp->fp[0].sb_id;
1742         if (status & mask) {
1743                 struct bnx2x_fastpath *fp = &bp->fp[0];
1744
1745                 prefetch(fp->rx_cons_sb);
1746                 prefetch(fp->tx_cons_sb);
1747                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1748                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1749
1750                 napi_schedule(&bnx2x_fp(bp, 0, napi));
1751
1752                 status &= ~mask;
1753         }
1754
1755
1756         if (unlikely(status & 0x1)) {
1757                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1758
1759                 status &= ~0x1;
1760                 if (!status)
1761                         return IRQ_HANDLED;
1762         }
1763
1764         if (status)
1765                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1766                    status);
1767
1768         return IRQ_HANDLED;
1769 }
1770
1771 /* end of fast path */
1772
1773 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1774
1775 /* Link */
1776
1777 /*
1778  * General service functions
1779  */
1780
1781 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1782 {
1783         u32 lock_status;
1784         u32 resource_bit = (1 << resource);
1785         int func = BP_FUNC(bp);
1786         u32 hw_lock_control_reg;
1787         int cnt;
1788
1789         /* Validating that the resource is within range */
1790         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1791                 DP(NETIF_MSG_HW,
1792                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1793                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1794                 return -EINVAL;
1795         }
1796
1797         if (func <= 5) {
1798                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1799         } else {
1800                 hw_lock_control_reg =
1801                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1802         }
1803
1804         /* Validating that the resource is not already taken */
1805         lock_status = REG_RD(bp, hw_lock_control_reg);
1806         if (lock_status & resource_bit) {
1807                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1808                    lock_status, resource_bit);
1809                 return -EEXIST;
1810         }
1811
1812         /* Try for 5 second every 5ms */
1813         for (cnt = 0; cnt < 1000; cnt++) {
1814                 /* Try to acquire the lock */
1815                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1816                 lock_status = REG_RD(bp, hw_lock_control_reg);
1817                 if (lock_status & resource_bit)
1818                         return 0;
1819
1820                 msleep(5);
1821         }
1822         DP(NETIF_MSG_HW, "Timeout\n");
1823         return -EAGAIN;
1824 }
1825
1826 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1827 {
1828         u32 lock_status;
1829         u32 resource_bit = (1 << resource);
1830         int func = BP_FUNC(bp);
1831         u32 hw_lock_control_reg;
1832
1833         /* Validating that the resource is within range */
1834         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1835                 DP(NETIF_MSG_HW,
1836                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1837                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1838                 return -EINVAL;
1839         }
1840
1841         if (func <= 5) {
1842                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1843         } else {
1844                 hw_lock_control_reg =
1845                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1846         }
1847
1848         /* Validating that the resource is currently taken */
1849         lock_status = REG_RD(bp, hw_lock_control_reg);
1850         if (!(lock_status & resource_bit)) {
1851                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1852                    lock_status, resource_bit);
1853                 return -EFAULT;
1854         }
1855
1856         REG_WR(bp, hw_lock_control_reg, resource_bit);
1857         return 0;
1858 }
1859
1860 /* HW Lock for shared dual port PHYs */
1861 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1862 {
1863         mutex_lock(&bp->port.phy_mutex);
1864
1865         if (bp->port.need_hw_lock)
1866                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1867 }
1868
1869 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1870 {
1871         if (bp->port.need_hw_lock)
1872                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1873
1874         mutex_unlock(&bp->port.phy_mutex);
1875 }
1876
1877 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1878 {
1879         /* The GPIO should be swapped if swap register is set and active */
1880         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1881                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1882         int gpio_shift = gpio_num +
1883                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1884         u32 gpio_mask = (1 << gpio_shift);
1885         u32 gpio_reg;
1886         int value;
1887
1888         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1889                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1890                 return -EINVAL;
1891         }
1892
1893         /* read GPIO value */
1894         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1895
1896         /* get the requested pin value */
1897         if ((gpio_reg & gpio_mask) == gpio_mask)
1898                 value = 1;
1899         else
1900                 value = 0;
1901
1902         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1903
1904         return value;
1905 }
1906
1907 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1908 {
1909         /* The GPIO should be swapped if swap register is set and active */
1910         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1911                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1912         int gpio_shift = gpio_num +
1913                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1914         u32 gpio_mask = (1 << gpio_shift);
1915         u32 gpio_reg;
1916
1917         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1918                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1919                 return -EINVAL;
1920         }
1921
1922         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1923         /* read GPIO and mask except the float bits */
1924         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1925
1926         switch (mode) {
1927         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1928                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1929                    gpio_num, gpio_shift);
1930                 /* clear FLOAT and set CLR */
1931                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1932                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1933                 break;
1934
1935         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1936                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1937                    gpio_num, gpio_shift);
1938                 /* clear FLOAT and set SET */
1939                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1940                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1941                 break;
1942
1943         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1944                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1945                    gpio_num, gpio_shift);
1946                 /* set FLOAT */
1947                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1948                 break;
1949
1950         default:
1951                 break;
1952         }
1953
1954         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1955         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1956
1957         return 0;
1958 }
1959
1960 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1961 {
1962         /* The GPIO should be swapped if swap register is set and active */
1963         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1964                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1965         int gpio_shift = gpio_num +
1966                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1967         u32 gpio_mask = (1 << gpio_shift);
1968         u32 gpio_reg;
1969
1970         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1971                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1972                 return -EINVAL;
1973         }
1974
1975         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1976         /* read GPIO int */
1977         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1978
1979         switch (mode) {
1980         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1981                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1982                                    "output low\n", gpio_num, gpio_shift);
1983                 /* clear SET and set CLR */
1984                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1985                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1986                 break;
1987
1988         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1989                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1990                                    "output high\n", gpio_num, gpio_shift);
1991                 /* clear CLR and set SET */
1992                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1993                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1994                 break;
1995
1996         default:
1997                 break;
1998         }
1999
2000         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2001         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2002
2003         return 0;
2004 }
2005
2006 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2007 {
2008         u32 spio_mask = (1 << spio_num);
2009         u32 spio_reg;
2010
2011         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2012             (spio_num > MISC_REGISTERS_SPIO_7)) {
2013                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2014                 return -EINVAL;
2015         }
2016
2017         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2018         /* read SPIO and mask except the float bits */
2019         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2020
2021         switch (mode) {
2022         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2023                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2024                 /* clear FLOAT and set CLR */
2025                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2026                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2027                 break;
2028
2029         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2030                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2031                 /* clear FLOAT and set SET */
2032                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2033                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2034                 break;
2035
2036         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2037                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2038                 /* set FLOAT */
2039                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2040                 break;
2041
2042         default:
2043                 break;
2044         }
2045
2046         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2047         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2048
2049         return 0;
2050 }
2051
2052 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2053 {
2054         switch (bp->link_vars.ieee_fc &
2055                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2056         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2057                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2058                                           ADVERTISED_Pause);
2059                 break;
2060
2061         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2062                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2063                                          ADVERTISED_Pause);
2064                 break;
2065
2066         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2067                 bp->port.advertising |= ADVERTISED_Asym_Pause;
2068                 break;
2069
2070         default:
2071                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2072                                           ADVERTISED_Pause);
2073                 break;
2074         }
2075 }
2076
2077 static void bnx2x_link_report(struct bnx2x *bp)
2078 {
2079         if (bp->link_vars.link_up) {
2080                 if (bp->state == BNX2X_STATE_OPEN)
2081                         netif_carrier_on(bp->dev);
2082                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2083
2084                 printk("%d Mbps ", bp->link_vars.line_speed);
2085
2086                 if (bp->link_vars.duplex == DUPLEX_FULL)
2087                         printk("full duplex");
2088                 else
2089                         printk("half duplex");
2090
2091                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2092                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2093                                 printk(", receive ");
2094                                 if (bp->link_vars.flow_ctrl &
2095                                     BNX2X_FLOW_CTRL_TX)
2096                                         printk("& transmit ");
2097                         } else {
2098                                 printk(", transmit ");
2099                         }
2100                         printk("flow control ON");
2101                 }
2102                 printk("\n");
2103
2104         } else { /* link_down */
2105                 netif_carrier_off(bp->dev);
2106                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2107         }
2108 }
2109
2110 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2111 {
2112         if (!BP_NOMCP(bp)) {
2113                 u8 rc;
2114
2115                 /* Initialize link parameters structure variables */
2116                 /* It is recommended to turn off RX FC for jumbo frames
2117                    for better performance */
2118                 if (IS_E1HMF(bp))
2119                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2120                 else if (bp->dev->mtu > 5000)
2121                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2122                 else
2123                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2124
2125                 bnx2x_acquire_phy_lock(bp);
2126
2127                 if (load_mode == LOAD_DIAG)
2128                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2129
2130                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2131
2132                 bnx2x_release_phy_lock(bp);
2133
2134                 bnx2x_calc_fc_adv(bp);
2135
2136                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2137                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2138                         bnx2x_link_report(bp);
2139                 }
2140
2141                 return rc;
2142         }
2143         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2144         return -EINVAL;
2145 }
2146
2147 static void bnx2x_link_set(struct bnx2x *bp)
2148 {
2149         if (!BP_NOMCP(bp)) {
2150                 bnx2x_acquire_phy_lock(bp);
2151                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2152                 bnx2x_release_phy_lock(bp);
2153
2154                 bnx2x_calc_fc_adv(bp);
2155         } else
2156                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2157 }
2158
2159 static void bnx2x__link_reset(struct bnx2x *bp)
2160 {
2161         if (!BP_NOMCP(bp)) {
2162                 bnx2x_acquire_phy_lock(bp);
2163                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2164                 bnx2x_release_phy_lock(bp);
2165         } else
2166                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2167 }
2168
2169 static u8 bnx2x_link_test(struct bnx2x *bp)
2170 {
2171         u8 rc;
2172
2173         bnx2x_acquire_phy_lock(bp);
2174         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2175         bnx2x_release_phy_lock(bp);
2176
2177         return rc;
2178 }
2179
2180 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2181 {
2182         u32 r_param = bp->link_vars.line_speed / 8;
2183         u32 fair_periodic_timeout_usec;
2184         u32 t_fair;
2185
2186         memset(&(bp->cmng.rs_vars), 0,
2187                sizeof(struct rate_shaping_vars_per_port));
2188         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2189
2190         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2191         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2192
2193         /* this is the threshold below which no timer arming will occur
2194            1.25 coefficient is for the threshold to be a little bigger
2195            than the real time, to compensate for timer in-accuracy */
2196         bp->cmng.rs_vars.rs_threshold =
2197                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2198
2199         /* resolution of fairness timer */
2200         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2201         /* for 10G it is 1000usec. for 1G it is 10000usec. */
2202         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2203
2204         /* this is the threshold below which we won't arm the timer anymore */
2205         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2206
2207         /* we multiply by 1e3/8 to get bytes/msec.
2208            We don't want the credits to pass a credit
2209            of the t_fair*FAIR_MEM (algorithm resolution) */
2210         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2211         /* since each tick is 4 usec */
2212         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2213 }
2214
2215 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2216 {
2217         struct rate_shaping_vars_per_vn m_rs_vn;
2218         struct fairness_vars_per_vn m_fair_vn;
2219         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2220         u16 vn_min_rate, vn_max_rate;
2221         int i;
2222
2223         /* If function is hidden - set min and max to zeroes */
2224         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2225                 vn_min_rate = 0;
2226                 vn_max_rate = 0;
2227
2228         } else {
2229                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2230                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2231                 /* If fairness is enabled (not all min rates are zeroes) and
2232                    if current min rate is zero - set it to 1.
2233                    This is a requirement of the algorithm. */
2234                 if (bp->vn_weight_sum && (vn_min_rate == 0))
2235                         vn_min_rate = DEF_MIN_RATE;
2236                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2237                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2238         }
2239
2240         DP(NETIF_MSG_IFUP,
2241            "func %d: vn_min_rate=%d  vn_max_rate=%d  vn_weight_sum=%d\n",
2242            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2243
2244         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2245         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2246
2247         /* global vn counter - maximal Mbps for this vn */
2248         m_rs_vn.vn_counter.rate = vn_max_rate;
2249
2250         /* quota - number of bytes transmitted in this period */
2251         m_rs_vn.vn_counter.quota =
2252                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2253
2254         if (bp->vn_weight_sum) {
2255                 /* credit for each period of the fairness algorithm:
2256                    number of bytes in T_FAIR (the vn share the port rate).
2257                    vn_weight_sum should not be larger than 10000, thus
2258                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2259                    than zero */
2260                 m_fair_vn.vn_credit_delta =
2261                         max((u32)(vn_min_rate * (T_FAIR_COEF /
2262                                                  (8 * bp->vn_weight_sum))),
2263                             (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2264                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2265                    m_fair_vn.vn_credit_delta);
2266         }
2267
2268         /* Store it to internal memory */
2269         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2270                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2271                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2272                        ((u32 *)(&m_rs_vn))[i]);
2273
2274         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2275                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2276                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2277                        ((u32 *)(&m_fair_vn))[i]);
2278 }
2279
2280
2281 /* This function is called upon link interrupt */
2282 static void bnx2x_link_attn(struct bnx2x *bp)
2283 {
2284         /* Make sure that we are synced with the current statistics */
2285         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2286
2287         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2288
2289         if (bp->link_vars.link_up) {
2290
2291                 /* dropless flow control */
2292                 if (CHIP_IS_E1H(bp)) {
2293                         int port = BP_PORT(bp);
2294                         u32 pause_enabled = 0;
2295
2296                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2297                                 pause_enabled = 1;
2298
2299                         REG_WR(bp, BAR_USTRORM_INTMEM +
2300                                USTORM_PAUSE_ENABLED_OFFSET(port),
2301                                pause_enabled);
2302                 }
2303
2304                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2305                         struct host_port_stats *pstats;
2306
2307                         pstats = bnx2x_sp(bp, port_stats);
2308                         /* reset old bmac stats */
2309                         memset(&(pstats->mac_stx[0]), 0,
2310                                sizeof(struct mac_stx));
2311                 }
2312                 if ((bp->state == BNX2X_STATE_OPEN) ||
2313                     (bp->state == BNX2X_STATE_DISABLED))
2314                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2315         }
2316
2317         /* indicate link status */
2318         bnx2x_link_report(bp);
2319
2320         if (IS_E1HMF(bp)) {
2321                 int port = BP_PORT(bp);
2322                 int func;
2323                 int vn;
2324
2325                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2326                         if (vn == BP_E1HVN(bp))
2327                                 continue;
2328
2329                         func = ((vn << 1) | port);
2330
2331                         /* Set the attention towards other drivers
2332                            on the same port */
2333                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2334                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2335                 }
2336
2337                 if (bp->link_vars.link_up) {
2338                         int i;
2339
2340                         /* Init rate shaping and fairness contexts */
2341                         bnx2x_init_port_minmax(bp);
2342
2343                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2344                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2345
2346                         /* Store it to internal memory */
2347                         for (i = 0;
2348                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
2349                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2350                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2351                                        ((u32 *)(&bp->cmng))[i]);
2352                 }
2353         }
2354 }
2355
2356 static void bnx2x__link_status_update(struct bnx2x *bp)
2357 {
2358         if (bp->state != BNX2X_STATE_OPEN)
2359                 return;
2360
2361         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2362
2363         if (bp->link_vars.link_up)
2364                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2365         else
2366                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2367
2368         /* indicate link status */
2369         bnx2x_link_report(bp);
2370 }
2371
2372 static void bnx2x_pmf_update(struct bnx2x *bp)
2373 {
2374         int port = BP_PORT(bp);
2375         u32 val;
2376
2377         bp->port.pmf = 1;
2378         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2379
2380         /* enable nig attention */
2381         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2382         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2383         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2384
2385         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2386 }
2387
2388 /* end of Link */
2389
2390 /* slow path */
2391
2392 /*
2393  * General service functions
2394  */
2395
2396 /* the slow path queue is odd since completions arrive on the fastpath ring */
2397 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2398                          u32 data_hi, u32 data_lo, int common)
2399 {
2400         int func = BP_FUNC(bp);
2401
2402         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2403            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2404            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2405            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2406            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2407
2408 #ifdef BNX2X_STOP_ON_ERROR
2409         if (unlikely(bp->panic))
2410                 return -EIO;
2411 #endif
2412
2413         spin_lock_bh(&bp->spq_lock);
2414
2415         if (!bp->spq_left) {
2416                 BNX2X_ERR("BUG! SPQ ring full!\n");
2417                 spin_unlock_bh(&bp->spq_lock);
2418                 bnx2x_panic();
2419                 return -EBUSY;
2420         }
2421
2422         /* CID needs port number to be encoded int it */
2423         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2424                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2425                                      HW_CID(bp, cid)));
2426         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2427         if (common)
2428                 bp->spq_prod_bd->hdr.type |=
2429                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2430
2431         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2432         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2433
2434         bp->spq_left--;
2435
2436         if (bp->spq_prod_bd == bp->spq_last_bd) {
2437                 bp->spq_prod_bd = bp->spq;
2438                 bp->spq_prod_idx = 0;
2439                 DP(NETIF_MSG_TIMER, "end of spq\n");
2440
2441         } else {
2442                 bp->spq_prod_bd++;
2443                 bp->spq_prod_idx++;
2444         }
2445
2446         /* Make sure that BD data is updated before writing the producer */
2447         wmb();
2448
2449         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2450                bp->spq_prod_idx);
2451
2452         mmiowb();
2453
2454         spin_unlock_bh(&bp->spq_lock);
2455         return 0;
2456 }
2457
2458 /* acquire split MCP access lock register */
2459 static int bnx2x_acquire_alr(struct bnx2x *bp)
2460 {
2461         u32 i, j, val;
2462         int rc = 0;
2463
2464         might_sleep();
2465         i = 100;
2466         for (j = 0; j < i*10; j++) {
2467                 val = (1UL << 31);
2468                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2469                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2470                 if (val & (1L << 31))
2471                         break;
2472
2473                 msleep(5);
2474         }
2475         if (!(val & (1L << 31))) {
2476                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2477                 rc = -EBUSY;
2478         }
2479
2480         return rc;
2481 }
2482
2483 /* release split MCP access lock register */
2484 static void bnx2x_release_alr(struct bnx2x *bp)
2485 {
2486         u32 val = 0;
2487
2488         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2489 }
2490
2491 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2492 {
2493         struct host_def_status_block *def_sb = bp->def_status_blk;
2494         u16 rc = 0;
2495
2496         barrier(); /* status block is written to by the chip */
2497         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2498                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2499                 rc |= 1;
2500         }
2501         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2502                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2503                 rc |= 2;
2504         }
2505         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2506                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2507                 rc |= 4;
2508         }
2509         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2510                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2511                 rc |= 8;
2512         }
2513         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2514                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2515                 rc |= 16;
2516         }
2517         return rc;
2518 }
2519
2520 /*
2521  * slow path service functions
2522  */
2523
2524 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2525 {
2526         int port = BP_PORT(bp);
2527         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2528                        COMMAND_REG_ATTN_BITS_SET);
2529         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2530                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2531         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2532                                        NIG_REG_MASK_INTERRUPT_PORT0;
2533         u32 aeu_mask;
2534         u32 nig_mask = 0;
2535
2536         if (bp->attn_state & asserted)
2537                 BNX2X_ERR("IGU ERROR\n");
2538
2539         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2540         aeu_mask = REG_RD(bp, aeu_addr);
2541
2542         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2543            aeu_mask, asserted);
2544         aeu_mask &= ~(asserted & 0xff);
2545         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2546
2547         REG_WR(bp, aeu_addr, aeu_mask);
2548         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2549
2550         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2551         bp->attn_state |= asserted;
2552         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2553
2554         if (asserted & ATTN_HARD_WIRED_MASK) {
2555                 if (asserted & ATTN_NIG_FOR_FUNC) {
2556
2557                         bnx2x_acquire_phy_lock(bp);
2558
2559                         /* save nig interrupt mask */
2560                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2561                         REG_WR(bp, nig_int_mask_addr, 0);
2562
2563                         bnx2x_link_attn(bp);
2564
2565                         /* handle unicore attn? */
2566                 }
2567                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2568                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2569
2570                 if (asserted & GPIO_2_FUNC)
2571                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2572
2573                 if (asserted & GPIO_3_FUNC)
2574                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2575
2576                 if (asserted & GPIO_4_FUNC)
2577                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2578
2579                 if (port == 0) {
2580                         if (asserted & ATTN_GENERAL_ATTN_1) {
2581                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2582                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2583                         }
2584                         if (asserted & ATTN_GENERAL_ATTN_2) {
2585                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2586                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2587                         }
2588                         if (asserted & ATTN_GENERAL_ATTN_3) {
2589                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2590                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2591                         }
2592                 } else {
2593                         if (asserted & ATTN_GENERAL_ATTN_4) {
2594                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2595                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2596                         }
2597                         if (asserted & ATTN_GENERAL_ATTN_5) {
2598                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2599                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2600                         }
2601                         if (asserted & ATTN_GENERAL_ATTN_6) {
2602                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2603                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2604                         }
2605                 }
2606
2607         } /* if hardwired */
2608
2609         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2610            asserted, hc_addr);
2611         REG_WR(bp, hc_addr, asserted);
2612
2613         /* now set back the mask */
2614         if (asserted & ATTN_NIG_FOR_FUNC) {
2615                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2616                 bnx2x_release_phy_lock(bp);
2617         }
2618 }
2619
2620 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2621 {
2622         int port = BP_PORT(bp);
2623
2624         /* mark the failure */
2625         bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2626         bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2627         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2628                  bp->link_params.ext_phy_config);
2629
2630         /* log the failure */
2631         printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2632                " the driver to shutdown the card to prevent permanent"
2633                " damage.  Please contact Dell Support for assistance\n",
2634                bp->dev->name);
2635 }
2636 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2637 {
2638         int port = BP_PORT(bp);
2639         int reg_offset;
2640         u32 val, swap_val, swap_override;
2641
2642         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2643                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2644
2645         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2646
2647                 val = REG_RD(bp, reg_offset);
2648                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2649                 REG_WR(bp, reg_offset, val);
2650
2651                 BNX2X_ERR("SPIO5 hw attention\n");
2652
2653                 /* Fan failure attention */
2654                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2655                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2656                         /* Low power mode is controlled by GPIO 2 */
2657                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2658                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2659                         /* The PHY reset is controlled by GPIO 1 */
2660                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2661                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2662                         break;
2663
2664                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2665                         /* The PHY reset is controlled by GPIO 1 */
2666                         /* fake the port number to cancel the swap done in
2667                            set_gpio() */
2668                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2669                         swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2670                         port = (swap_val && swap_override) ^ 1;
2671                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2672                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2673                         break;
2674
2675                 default:
2676                         break;
2677                 }
2678                 bnx2x_fan_failure(bp);
2679         }
2680
2681         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2682                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2683                 bnx2x_acquire_phy_lock(bp);
2684                 bnx2x_handle_module_detect_int(&bp->link_params);
2685                 bnx2x_release_phy_lock(bp);
2686         }
2687
2688         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2689
2690                 val = REG_RD(bp, reg_offset);
2691                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2692                 REG_WR(bp, reg_offset, val);
2693
2694                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2695                           (attn & HW_INTERRUT_ASSERT_SET_0));
2696                 bnx2x_panic();
2697         }
2698 }
2699
2700 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2701 {
2702         u32 val;
2703
2704         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2705
2706                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2707                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2708                 /* DORQ discard attention */
2709                 if (val & 0x2)
2710                         BNX2X_ERR("FATAL error from DORQ\n");
2711         }
2712
2713         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2714
2715                 int port = BP_PORT(bp);
2716                 int reg_offset;
2717
2718                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2719                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2720
2721                 val = REG_RD(bp, reg_offset);
2722                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2723                 REG_WR(bp, reg_offset, val);
2724
2725                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2726                           (attn & HW_INTERRUT_ASSERT_SET_1));
2727                 bnx2x_panic();
2728         }
2729 }
2730
2731 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2732 {
2733         u32 val;
2734
2735         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2736
2737                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2738                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2739                 /* CFC error attention */
2740                 if (val & 0x2)
2741                         BNX2X_ERR("FATAL error from CFC\n");
2742         }
2743
2744         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2745
2746                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2747                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2748                 /* RQ_USDMDP_FIFO_OVERFLOW */
2749                 if (val & 0x18000)
2750                         BNX2X_ERR("FATAL error from PXP\n");
2751         }
2752
2753         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2754
2755                 int port = BP_PORT(bp);
2756                 int reg_offset;
2757
2758                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2759                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2760
2761                 val = REG_RD(bp, reg_offset);
2762                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2763                 REG_WR(bp, reg_offset, val);
2764
2765                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2766                           (attn & HW_INTERRUT_ASSERT_SET_2));
2767                 bnx2x_panic();
2768         }
2769 }
2770
2771 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2772 {
2773         u32 val;
2774
2775         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2776
2777                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2778                         int func = BP_FUNC(bp);
2779
2780                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2781                         bnx2x__link_status_update(bp);
2782                         if (SHMEM_RD(bp, func_mb[func].drv_status) &
2783                                                         DRV_STATUS_PMF)
2784                                 bnx2x_pmf_update(bp);
2785
2786                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2787
2788                         BNX2X_ERR("MC assert!\n");
2789                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2790                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2791                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2792                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2793                         bnx2x_panic();
2794
2795                 } else if (attn & BNX2X_MCP_ASSERT) {
2796
2797                         BNX2X_ERR("MCP assert!\n");
2798                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2799                         bnx2x_fw_dump(bp);
2800
2801                 } else
2802                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2803         }
2804
2805         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2806                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2807                 if (attn & BNX2X_GRC_TIMEOUT) {
2808                         val = CHIP_IS_E1H(bp) ?
2809                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2810                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2811                 }
2812                 if (attn & BNX2X_GRC_RSV) {
2813                         val = CHIP_IS_E1H(bp) ?
2814                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2815                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2816                 }
2817                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2818         }
2819 }
2820
2821 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2822 {
2823         struct attn_route attn;
2824         struct attn_route group_mask;
2825         int port = BP_PORT(bp);
2826         int index;
2827         u32 reg_addr;
2828         u32 val;
2829         u32 aeu_mask;
2830
2831         /* need to take HW lock because MCP or other port might also
2832            try to handle this event */
2833         bnx2x_acquire_alr(bp);
2834
2835         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2836         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2837         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2838         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2839         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2840            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2841
2842         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2843                 if (deasserted & (1 << index)) {
2844                         group_mask = bp->attn_group[index];
2845
2846                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2847                            index, group_mask.sig[0], group_mask.sig[1],
2848                            group_mask.sig[2], group_mask.sig[3]);
2849
2850                         bnx2x_attn_int_deasserted3(bp,
2851                                         attn.sig[3] & group_mask.sig[3]);
2852                         bnx2x_attn_int_deasserted1(bp,
2853                                         attn.sig[1] & group_mask.sig[1]);
2854                         bnx2x_attn_int_deasserted2(bp,
2855                                         attn.sig[2] & group_mask.sig[2]);
2856                         bnx2x_attn_int_deasserted0(bp,
2857                                         attn.sig[0] & group_mask.sig[0]);
2858
2859                         if ((attn.sig[0] & group_mask.sig[0] &
2860                                                 HW_PRTY_ASSERT_SET_0) ||
2861                             (attn.sig[1] & group_mask.sig[1] &
2862                                                 HW_PRTY_ASSERT_SET_1) ||
2863                             (attn.sig[2] & group_mask.sig[2] &
2864                                                 HW_PRTY_ASSERT_SET_2))
2865                                 BNX2X_ERR("FATAL HW block parity attention\n");
2866                 }
2867         }
2868
2869         bnx2x_release_alr(bp);
2870
2871         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2872
2873         val = ~deasserted;
2874         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2875            val, reg_addr);
2876         REG_WR(bp, reg_addr, val);
2877
2878         if (~bp->attn_state & deasserted)
2879                 BNX2X_ERR("IGU ERROR\n");
2880
2881         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2882                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2883
2884         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2885         aeu_mask = REG_RD(bp, reg_addr);
2886
2887         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2888            aeu_mask, deasserted);
2889         aeu_mask |= (deasserted & 0xff);
2890         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2891
2892         REG_WR(bp, reg_addr, aeu_mask);
2893         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2894
2895         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2896         bp->attn_state &= ~deasserted;
2897         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2898 }
2899
2900 static void bnx2x_attn_int(struct bnx2x *bp)
2901 {
2902         /* read local copy of bits */
2903         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2904                                                                 attn_bits);
2905         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2906                                                                 attn_bits_ack);
2907         u32 attn_state = bp->attn_state;
2908
2909         /* look for changed bits */
2910         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2911         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2912
2913         DP(NETIF_MSG_HW,
2914            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2915            attn_bits, attn_ack, asserted, deasserted);
2916
2917         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2918                 BNX2X_ERR("BAD attention state\n");
2919
2920         /* handle bits that were raised */
2921         if (asserted)
2922                 bnx2x_attn_int_asserted(bp, asserted);
2923
2924         if (deasserted)
2925                 bnx2x_attn_int_deasserted(bp, deasserted);
2926 }
2927
2928 static void bnx2x_sp_task(struct work_struct *work)
2929 {
2930         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2931         u16 status;
2932
2933
2934         /* Return here if interrupt is disabled */
2935         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2936                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2937                 return;
2938         }
2939
2940         status = bnx2x_update_dsb_idx(bp);
2941 /*      if (status == 0)                                     */
2942 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2943
2944         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2945
2946         /* HW attentions */
2947         if (status & 0x1)
2948                 bnx2x_attn_int(bp);
2949
2950         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2951                      IGU_INT_NOP, 1);
2952         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2953                      IGU_INT_NOP, 1);
2954         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2955                      IGU_INT_NOP, 1);
2956         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2957                      IGU_INT_NOP, 1);
2958         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2959                      IGU_INT_ENABLE, 1);
2960
2961 }
2962
2963 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2964 {
2965         struct net_device *dev = dev_instance;
2966         struct bnx2x *bp = netdev_priv(dev);
2967
2968         /* Return here if interrupt is disabled */
2969         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2970                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2971                 return IRQ_HANDLED;
2972         }
2973
2974         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2975
2976 #ifdef BNX2X_STOP_ON_ERROR
2977         if (unlikely(bp->panic))
2978                 return IRQ_HANDLED;
2979 #endif
2980
2981         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2982
2983         return IRQ_HANDLED;
2984 }
2985
2986 /* end of slow path */
2987
2988 /* Statistics */
2989
2990 /****************************************************************************
2991 * Macros
2992 ****************************************************************************/
2993
2994 /* sum[hi:lo] += add[hi:lo] */
2995 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2996         do { \
2997                 s_lo += a_lo; \
2998                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2999         } while (0)
3000
3001 /* difference = minuend - subtrahend */
3002 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3003         do { \
3004                 if (m_lo < s_lo) { \
3005                         /* underflow */ \
3006                         d_hi = m_hi - s_hi; \
3007                         if (d_hi > 0) { \
3008                                 /* we can 'loan' 1 */ \
3009                                 d_hi--; \
3010                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3011                         } else { \
3012                                 /* m_hi <= s_hi */ \
3013                                 d_hi = 0; \
3014                                 d_lo = 0; \
3015                         } \
3016                 } else { \
3017                         /* m_lo >= s_lo */ \
3018                         if (m_hi < s_hi) { \
3019                                 d_hi = 0; \
3020                                 d_lo = 0; \
3021                         } else { \
3022                                 /* m_hi >= s_hi */ \
3023                                 d_hi = m_hi - s_hi; \
3024                                 d_lo = m_lo - s_lo; \
3025                         } \
3026                 } \
3027         } while (0)
3028
3029 #define UPDATE_STAT64(s, t) \
3030         do { \
3031                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3032                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3033                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3034                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3035                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3036                        pstats->mac_stx[1].t##_lo, diff.lo); \
3037         } while (0)
3038
3039 #define UPDATE_STAT64_NIG(s, t) \
3040         do { \
3041                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3042                         diff.lo, new->s##_lo, old->s##_lo); \
3043                 ADD_64(estats->t##_hi, diff.hi, \
3044                        estats->t##_lo, diff.lo); \
3045         } while (0)
3046
3047 /* sum[hi:lo] += add */
3048 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3049         do { \
3050                 s_lo += a; \
3051                 s_hi += (s_lo < a) ? 1 : 0; \
3052         } while (0)
3053
3054 #define UPDATE_EXTEND_STAT(s) \
3055         do { \
3056                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3057                               pstats->mac_stx[1].s##_lo, \
3058                               new->s); \
3059         } while (0)
3060
3061 #define UPDATE_EXTEND_TSTAT(s, t) \
3062         do { \
3063                 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3064                 old_tclient->s = tclient->s; \
3065                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3066         } while (0)
3067
3068 #define UPDATE_EXTEND_USTAT(s, t) \
3069         do { \
3070                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3071                 old_uclient->s = uclient->s; \
3072                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3073         } while (0)
3074
3075 #define UPDATE_EXTEND_XSTAT(s, t) \
3076         do { \
3077                 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3078                 old_xclient->s = xclient->s; \
3079                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3080         } while (0)
3081
3082 /* minuend -= subtrahend */
3083 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3084         do { \
3085                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3086         } while (0)
3087
3088 /* minuend[hi:lo] -= subtrahend */
3089 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3090         do { \
3091                 SUB_64(m_hi, 0, m_lo, s); \
3092         } while (0)
3093
3094 #define SUB_EXTEND_USTAT(s, t) \
3095         do { \
3096                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3097                 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3098         } while (0)
3099
3100 /*
3101  * General service functions
3102  */
3103
3104 static inline long bnx2x_hilo(u32 *hiref)
3105 {
3106         u32 lo = *(hiref + 1);
3107 #if (BITS_PER_LONG == 64)
3108         u32 hi = *hiref;
3109
3110         return HILO_U64(hi, lo);
3111 #else
3112         return lo;
3113 #endif
3114 }
3115
3116 /*
3117  * Init service functions
3118  */
3119
3120 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3121 {
3122         if (!bp->stats_pending) {
3123                 struct eth_query_ramrod_data ramrod_data = {0};
3124                 int i, rc;
3125
3126                 ramrod_data.drv_counter = bp->stats_counter++;
3127                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3128                 for_each_queue(bp, i)
3129                         ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3130
3131                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3132                                    ((u32 *)&ramrod_data)[1],
3133                                    ((u32 *)&ramrod_data)[0], 0);
3134                 if (rc == 0) {
3135                         /* stats ramrod has it's own slot on the spq */
3136                         bp->spq_left++;
3137                         bp->stats_pending = 1;
3138                 }
3139         }
3140 }
3141
3142 static void bnx2x_stats_init(struct bnx2x *bp)
3143 {
3144         int port = BP_PORT(bp);
3145         int i;
3146
3147         bp->stats_pending = 0;
3148         bp->executer_idx = 0;
3149         bp->stats_counter = 0;
3150
3151         /* port stats */
3152         if (!BP_NOMCP(bp))
3153                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3154         else
3155                 bp->port.port_stx = 0;
3156         DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3157
3158         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3159         bp->port.old_nig_stats.brb_discard =
3160                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3161         bp->port.old_nig_stats.brb_truncate =
3162                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3163         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3164                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3165         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3166                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3167
3168         /* function stats */
3169         for_each_queue(bp, i) {
3170                 struct bnx2x_fastpath *fp = &bp->fp[i];
3171
3172                 memset(&fp->old_tclient, 0,
3173                        sizeof(struct tstorm_per_client_stats));
3174                 memset(&fp->old_uclient, 0,
3175                        sizeof(struct ustorm_per_client_stats));
3176                 memset(&fp->old_xclient, 0,
3177                        sizeof(struct xstorm_per_client_stats));
3178                 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3179         }
3180
3181         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3182         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3183
3184         bp->stats_state = STATS_STATE_DISABLED;
3185         if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3186                 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3187 }
3188
3189 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3190 {
3191         struct dmae_command *dmae = &bp->stats_dmae;
3192         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3193
3194         *stats_comp = DMAE_COMP_VAL;
3195         if (CHIP_REV_IS_SLOW(bp))
3196                 return;
3197
3198         /* loader */
3199         if (bp->executer_idx) {
3200                 int loader_idx = PMF_DMAE_C(bp);
3201
3202                 memset(dmae, 0, sizeof(struct dmae_command));
3203
3204                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3205                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3206                                 DMAE_CMD_DST_RESET |
3207 #ifdef __BIG_ENDIAN
3208                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3209 #else
3210                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3211 #endif
3212                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3213                                                DMAE_CMD_PORT_0) |
3214                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3215                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3216                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3217                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3218                                      sizeof(struct dmae_command) *
3219                                      (loader_idx + 1)) >> 2;
3220                 dmae->dst_addr_hi = 0;
3221                 dmae->len = sizeof(struct dmae_command) >> 2;
3222                 if (CHIP_IS_E1(bp))
3223                         dmae->len--;
3224                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3225                 dmae->comp_addr_hi = 0;
3226                 dmae->comp_val = 1;
3227
3228                 *stats_comp = 0;
3229                 bnx2x_post_dmae(bp, dmae, loader_idx);
3230
3231         } else if (bp->func_stx) {
3232                 *stats_comp = 0;
3233                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3234         }
3235 }
3236
3237 static int bnx2x_stats_comp(struct bnx2x *bp)
3238 {
3239         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3240         int cnt = 10;
3241
3242         might_sleep();
3243         while (*stats_comp != DMAE_COMP_VAL) {
3244                 if (!cnt) {
3245                         BNX2X_ERR("timeout waiting for stats finished\n");
3246                         break;
3247                 }
3248                 cnt--;
3249                 msleep(1);
3250         }
3251         return 1;
3252 }
3253
3254 /*
3255  * Statistics service functions
3256  */
3257
3258 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3259 {
3260         struct dmae_command *dmae;
3261         u32 opcode;
3262         int loader_idx = PMF_DMAE_C(bp);
3263         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3264
3265         /* sanity */
3266         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3267                 BNX2X_ERR("BUG!\n");
3268                 return;
3269         }
3270
3271         bp->executer_idx = 0;
3272
3273         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3274                   DMAE_CMD_C_ENABLE |
3275                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3276 #ifdef __BIG_ENDIAN
3277                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3278 #else
3279                   DMAE_CMD_ENDIANITY_DW_SWAP |
3280 #endif
3281                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3282                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3283
3284         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3285         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3286         dmae->src_addr_lo = bp->port.port_stx >> 2;
3287         dmae->src_addr_hi = 0;
3288         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3289         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3290         dmae->len = DMAE_LEN32_RD_MAX;
3291         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3292         dmae->comp_addr_hi = 0;
3293         dmae->comp_val = 1;
3294
3295         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3296         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3297         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3298         dmae->src_addr_hi = 0;
3299         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3300                                    DMAE_LEN32_RD_MAX * 4);
3301         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3302                                    DMAE_LEN32_RD_MAX * 4);
3303         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3304         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3305         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3306         dmae->comp_val = DMAE_COMP_VAL;
3307
3308         *stats_comp = 0;
3309         bnx2x_hw_stats_post(bp);
3310         bnx2x_stats_comp(bp);
3311 }
3312
3313 static void bnx2x_port_stats_init(struct bnx2x *bp)
3314 {
3315         struct dmae_command *dmae;
3316         int port = BP_PORT(bp);
3317         int vn = BP_E1HVN(bp);
3318         u32 opcode;
3319         int loader_idx = PMF_DMAE_C(bp);
3320         u32 mac_addr;
3321         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3322
3323         /* sanity */
3324         if (!bp->link_vars.link_up || !bp->port.pmf) {
3325                 BNX2X_ERR("BUG!\n");
3326                 return;
3327         }
3328
3329         bp->executer_idx = 0;
3330
3331         /* MCP */
3332         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3333                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3334                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3335 #ifdef __BIG_ENDIAN
3336                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3337 #else
3338                   DMAE_CMD_ENDIANITY_DW_SWAP |
3339 #endif
3340                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3341                   (vn << DMAE_CMD_E1HVN_SHIFT));
3342
3343         if (bp->port.port_stx) {
3344
3345                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3346                 dmae->opcode = opcode;
3347                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3348                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3349                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3350                 dmae->dst_addr_hi = 0;
3351                 dmae->len = sizeof(struct host_port_stats) >> 2;
3352                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3353                 dmae->comp_addr_hi = 0;
3354                 dmae->comp_val = 1;
3355         }
3356
3357         if (bp->func_stx) {
3358
3359                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3360                 dmae->opcode = opcode;
3361                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3362                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3363                 dmae->dst_addr_lo = bp->func_stx >> 2;
3364                 dmae->dst_addr_hi = 0;
3365                 dmae->len = sizeof(struct host_func_stats) >> 2;
3366                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3367                 dmae->comp_addr_hi = 0;
3368                 dmae->comp_val = 1;
3369         }
3370
3371         /* MAC */
3372         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3373                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3374                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3375 #ifdef __BIG_ENDIAN
3376                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3377 #else
3378                   DMAE_CMD_ENDIANITY_DW_SWAP |
3379 #endif
3380                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3381                   (vn << DMAE_CMD_E1HVN_SHIFT));
3382
3383         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3384
3385                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3386                                    NIG_REG_INGRESS_BMAC0_MEM);
3387
3388                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3389                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3390                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3391                 dmae->opcode = opcode;
3392                 dmae->src_addr_lo = (mac_addr +
3393                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3394                 dmae->src_addr_hi = 0;
3395                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3396                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3397                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3398                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3399                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3400                 dmae->comp_addr_hi = 0;
3401                 dmae->comp_val = 1;
3402
3403                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3404                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3405                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3406                 dmae->opcode = opcode;
3407                 dmae->src_addr_lo = (mac_addr +
3408                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3409                 dmae->src_addr_hi = 0;
3410                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3411                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3412                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3413                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3414                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3415                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3416                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3417                 dmae->comp_addr_hi = 0;
3418                 dmae->comp_val = 1;
3419
3420         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3421
3422                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3423
3424                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3425                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3426                 dmae->opcode = opcode;
3427                 dmae->src_addr_lo = (mac_addr +
3428                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3429                 dmae->src_addr_hi = 0;
3430                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3431                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3432                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3433                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3434                 dmae->comp_addr_hi = 0;
3435                 dmae->comp_val = 1;
3436
3437                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3438                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3439                 dmae->opcode = opcode;
3440                 dmae->src_addr_lo = (mac_addr +
3441                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3442                 dmae->src_addr_hi = 0;
3443                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3444                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3445                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3446                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3447                 dmae->len = 1;
3448                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3449                 dmae->comp_addr_hi = 0;
3450                 dmae->comp_val = 1;
3451
3452                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3453                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3454                 dmae->opcode = opcode;
3455                 dmae->src_addr_lo = (mac_addr +
3456                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3457                 dmae->src_addr_hi = 0;
3458                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3459                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3460                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3461                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3462                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3463                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3464                 dmae->comp_addr_hi = 0;
3465                 dmae->comp_val = 1;
3466         }
3467
3468         /* NIG */
3469         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3470         dmae->opcode = opcode;
3471         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3472                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3473         dmae->src_addr_hi = 0;
3474         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3475         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3476         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3477         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3478         dmae->comp_addr_hi = 0;
3479         dmae->comp_val = 1;
3480
3481         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3482         dmae->opcode = opcode;
3483         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3484                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3485         dmae->src_addr_hi = 0;
3486         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3487                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3488         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3489                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3490         dmae->len = (2*sizeof(u32)) >> 2;
3491         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3492         dmae->comp_addr_hi = 0;
3493         dmae->comp_val = 1;
3494
3495         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3496         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3497                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3498                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3499 #ifdef __BIG_ENDIAN
3500                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3501 #else
3502                         DMAE_CMD_ENDIANITY_DW_SWAP |
3503 #endif
3504                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3505                         (vn << DMAE_CMD_E1HVN_SHIFT));
3506         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3507                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3508         dmae->src_addr_hi = 0;
3509         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3510                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3511         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3512                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3513         dmae->len = (2*sizeof(u32)) >> 2;
3514         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3515         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3516         dmae->comp_val = DMAE_COMP_VAL;
3517
3518         *stats_comp = 0;
3519 }
3520
3521 static void bnx2x_func_stats_init(struct bnx2x *bp)
3522 {
3523         struct dmae_command *dmae = &bp->stats_dmae;
3524         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3525
3526         /* sanity */
3527         if (!bp->func_stx) {
3528                 BNX2X_ERR("BUG!\n");
3529                 return;
3530         }
3531
3532         bp->executer_idx = 0;
3533         memset(dmae, 0, sizeof(struct dmae_command));
3534
3535         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3536                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3537                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3538 #ifdef __BIG_ENDIAN
3539                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3540 #else
3541                         DMAE_CMD_ENDIANITY_DW_SWAP |
3542 #endif
3543                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3544                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3545         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3546         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3547         dmae->dst_addr_lo = bp->func_stx >> 2;
3548         dmae->dst_addr_hi = 0;
3549         dmae->len = sizeof(struct host_func_stats) >> 2;
3550         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3551         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3552         dmae->comp_val = DMAE_COMP_VAL;
3553
3554         *stats_comp = 0;
3555 }
3556
3557 static void bnx2x_stats_start(struct bnx2x *bp)
3558 {
3559         if (bp->port.pmf)
3560                 bnx2x_port_stats_init(bp);
3561
3562         else if (bp->func_stx)
3563                 bnx2x_func_stats_init(bp);
3564
3565         bnx2x_hw_stats_post(bp);
3566         bnx2x_storm_stats_post(bp);
3567 }
3568
3569 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3570 {
3571         bnx2x_stats_comp(bp);
3572         bnx2x_stats_pmf_update(bp);
3573         bnx2x_stats_start(bp);
3574 }
3575
3576 static void bnx2x_stats_restart(struct bnx2x *bp)
3577 {
3578         bnx2x_stats_comp(bp);
3579         bnx2x_stats_start(bp);
3580 }
3581
3582 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3583 {
3584         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3585         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3586         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3587         struct {
3588                 u32 lo;
3589                 u32 hi;
3590         } diff;
3591
3592         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3593         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3594         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3595         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3596         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3597         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3598         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3599         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3600         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3601         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3602         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3603         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3604         UPDATE_STAT64(tx_stat_gt127,
3605                                 tx_stat_etherstatspkts65octetsto127octets);
3606         UPDATE_STAT64(tx_stat_gt255,
3607                                 tx_stat_etherstatspkts128octetsto255octets);
3608         UPDATE_STAT64(tx_stat_gt511,
3609                                 tx_stat_etherstatspkts256octetsto511octets);
3610         UPDATE_STAT64(tx_stat_gt1023,
3611                                 tx_stat_etherstatspkts512octetsto1023octets);
3612         UPDATE_STAT64(tx_stat_gt1518,
3613                                 tx_stat_etherstatspkts1024octetsto1522octets);
3614         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3615         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3616         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3617         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3618         UPDATE_STAT64(tx_stat_gterr,
3619                                 tx_stat_dot3statsinternalmactransmiterrors);
3620         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3621
3622         estats->pause_frames_received_hi =
3623                                 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3624         estats->pause_frames_received_lo =
3625                                 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3626
3627         estats->pause_frames_sent_hi =
3628                                 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3629         estats->pause_frames_sent_lo =
3630                                 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3631 }
3632
3633 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3634 {
3635         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3636         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3637         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3638
3639         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3640         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3641         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3642         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3643         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3644         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3645         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3646         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3647         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3648         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3649         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3650         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3651         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3652         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3653         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3654         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3655         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3656         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3657         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3658         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3659         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3660         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3661         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3662         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3663         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3664         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3665         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3666         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3667         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3668         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3669         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3670
3671         estats->pause_frames_received_hi =
3672                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3673         estats->pause_frames_received_lo =
3674                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3675         ADD_64(estats->pause_frames_received_hi,
3676                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3677                estats->pause_frames_received_lo,
3678                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3679
3680         estats->pause_frames_sent_hi =
3681                         pstats->mac_stx[1].tx_stat_outxonsent_hi;
3682         estats->pause_frames_sent_lo =
3683                         pstats->mac_stx[1].tx_stat_outxonsent_lo;
3684         ADD_64(estats->pause_frames_sent_hi,
3685                pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3686                estats->pause_frames_sent_lo,
3687                pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3688 }
3689
3690 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3691 {
3692         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3693         struct nig_stats *old = &(bp->port.old_nig_stats);
3694         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3695         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3696         struct {
3697                 u32 lo;
3698                 u32 hi;
3699         } diff;
3700         u32 nig_timer_max;
3701
3702         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3703                 bnx2x_bmac_stats_update(bp);
3704
3705         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3706                 bnx2x_emac_stats_update(bp);
3707
3708         else { /* unreached */
3709                 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3710                 return -1;
3711         }
3712
3713         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3714                       new->brb_discard - old->brb_discard);
3715         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3716                       new->brb_truncate - old->brb_truncate);
3717
3718         UPDATE_STAT64_NIG(egress_mac_pkt0,
3719                                         etherstatspkts1024octetsto1522octets);
3720         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3721
3722         memcpy(old, new, sizeof(struct nig_stats));
3723
3724         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3725                sizeof(struct mac_stx));
3726         estats->brb_drop_hi = pstats->brb_drop_hi;
3727         estats->brb_drop_lo = pstats->brb_drop_lo;
3728
3729         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3730
3731         nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3732         if (nig_timer_max != estats->nig_timer_max) {
3733                 estats->nig_timer_max = nig_timer_max;
3734                 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3735         }
3736
3737         return 0;
3738 }
3739
3740 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3741 {
3742         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3743         struct tstorm_per_port_stats *tport =
3744                                         &stats->tstorm_common.port_statistics;
3745         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3746         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3747         int i;
3748
3749         memset(&(fstats->total_bytes_received_hi), 0,
3750                sizeof(struct host_func_stats) - 2*sizeof(u32));
3751         estats->error_bytes_received_hi = 0;
3752         estats->error_bytes_received_lo = 0;
3753         estats->etherstatsoverrsizepkts_hi = 0;
3754         estats->etherstatsoverrsizepkts_lo = 0;
3755         estats->no_buff_discard_hi = 0;
3756         estats->no_buff_discard_lo = 0;
3757
3758         for_each_queue(bp, i) {
3759                 struct bnx2x_fastpath *fp = &bp->fp[i];
3760                 int cl_id = fp->cl_id;
3761                 struct tstorm_per_client_stats *tclient =
3762                                 &stats->tstorm_common.client_statistics[cl_id];
3763                 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3764                 struct ustorm_per_client_stats *uclient =
3765                                 &stats->ustorm_common.client_statistics[cl_id];
3766                 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3767                 struct xstorm_per_client_stats *xclient =
3768                                 &stats->xstorm_common.client_statistics[cl_id];
3769                 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3770                 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3771                 u32 diff;
3772
3773                 /* are storm stats valid? */
3774                 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3775                                                         bp->stats_counter) {
3776                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3777                            "  xstorm counter (%d) != stats_counter (%d)\n",
3778                            i, xclient->stats_counter, bp->stats_counter);
3779                         return -1;
3780                 }
3781                 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3782                                                         bp->stats_counter) {
3783                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3784                            "  tstorm counter (%d) != stats_counter (%d)\n",
3785                            i, tclient->stats_counter, bp->stats_counter);
3786                         return -2;
3787                 }
3788                 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3789                                                         bp->stats_counter) {
3790                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3791                            "  ustorm counter (%d) != stats_counter (%d)\n",
3792                            i, uclient->stats_counter, bp->stats_counter);
3793                         return -4;
3794                 }
3795
3796                 qstats->total_bytes_received_hi =
3797                 qstats->valid_bytes_received_hi =
3798                                 le32_to_cpu(tclient->total_rcv_bytes.hi);
3799                 qstats->total_bytes_received_lo =
3800                 qstats->valid_bytes_received_lo =
3801                                 le32_to_cpu(tclient->total_rcv_bytes.lo);
3802
3803                 qstats->error_bytes_received_hi =
3804                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
3805                 qstats->error_bytes_received_lo =
3806                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
3807
3808                 ADD_64(qstats->total_bytes_received_hi,
3809                        qstats->error_bytes_received_hi,
3810                        qstats->total_bytes_received_lo,
3811                        qstats->error_bytes_received_lo);
3812
3813                 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3814                                         total_unicast_packets_received);
3815                 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3816                                         total_multicast_packets_received);
3817                 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3818                                         total_broadcast_packets_received);
3819                 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3820                                         etherstatsoverrsizepkts);
3821                 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3822
3823                 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3824                                         total_unicast_packets_received);
3825                 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3826                                         total_multicast_packets_received);
3827                 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3828                                         total_broadcast_packets_received);
3829                 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3830                 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3831                 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3832
3833                 qstats->total_bytes_transmitted_hi =
3834                                 le32_to_cpu(xclient->total_sent_bytes.hi);
3835                 qstats->total_bytes_transmitted_lo =
3836                                 le32_to_cpu(xclient->total_sent_bytes.lo);
3837
3838                 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3839                                         total_unicast_packets_transmitted);
3840                 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3841                                         total_multicast_packets_transmitted);
3842                 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3843                                         total_broadcast_packets_transmitted);
3844
3845                 old_tclient->checksum_discard = tclient->checksum_discard;
3846                 old_tclient->ttl0_discard = tclient->ttl0_discard;
3847
3848                 ADD_64(fstats->total_bytes_received_hi,
3849                        qstats->total_bytes_received_hi,
3850                        fstats->total_bytes_received_lo,
3851                        qstats->total_bytes_received_lo);
3852                 ADD_64(fstats->total_bytes_transmitted_hi,
3853                        qstats->total_bytes_transmitted_hi,
3854                        fstats->total_bytes_transmitted_lo,
3855                        qstats->total_bytes_transmitted_lo);
3856                 ADD_64(fstats->total_unicast_packets_received_hi,
3857                        qstats->total_unicast_packets_received_hi,
3858                        fstats->total_unicast_packets_received_lo,
3859                        qstats->total_unicast_packets_received_lo);
3860                 ADD_64(fstats->total_multicast_packets_received_hi,
3861                        qstats->total_multicast_packets_received_hi,
3862                        fstats->total_multicast_packets_received_lo,
3863                        qstats->total_multicast_packets_received_lo);
3864                 ADD_64(fstats->total_broadcast_packets_received_hi,
3865                        qstats->total_broadcast_packets_received_hi,
3866                        fstats->total_broadcast_packets_received_lo,
3867                        qstats->total_broadcast_packets_received_lo);
3868                 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3869                        qstats->total_unicast_packets_transmitted_hi,
3870                        fstats->total_unicast_packets_transmitted_lo,
3871                        qstats->total_unicast_packets_transmitted_lo);
3872                 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3873                        qstats->total_multicast_packets_transmitted_hi,
3874                        fstats->total_multicast_packets_transmitted_lo,
3875                        qstats->total_multicast_packets_transmitted_lo);
3876                 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3877                        qstats->total_broadcast_packets_transmitted_hi,
3878                        fstats->total_broadcast_packets_transmitted_lo,
3879                        qstats->total_broadcast_packets_transmitted_lo);
3880                 ADD_64(fstats->valid_bytes_received_hi,
3881                        qstats->valid_bytes_received_hi,
3882                        fstats->valid_bytes_received_lo,
3883                        qstats->valid_bytes_received_lo);
3884
3885                 ADD_64(estats->error_bytes_received_hi,
3886                        qstats->error_bytes_received_hi,
3887                        estats->error_bytes_received_lo,
3888                        qstats->error_bytes_received_lo);
3889                 ADD_64(estats->etherstatsoverrsizepkts_hi,
3890                        qstats->etherstatsoverrsizepkts_hi,
3891                        estats->etherstatsoverrsizepkts_lo,
3892                        qstats->etherstatsoverrsizepkts_lo);
3893                 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3894                        estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3895         }
3896
3897         ADD_64(fstats->total_bytes_received_hi,
3898                estats->rx_stat_ifhcinbadoctets_hi,
3899                fstats->total_bytes_received_lo,
3900                estats->rx_stat_ifhcinbadoctets_lo);
3901
3902         memcpy(estats, &(fstats->total_bytes_received_hi),
3903                sizeof(struct host_func_stats) - 2*sizeof(u32));
3904
3905         ADD_64(estats->etherstatsoverrsizepkts_hi,
3906                estats->rx_stat_dot3statsframestoolong_hi,
3907                estats->etherstatsoverrsizepkts_lo,
3908                estats->rx_stat_dot3statsframestoolong_lo);
3909         ADD_64(estats->error_bytes_received_hi,
3910                estats->rx_stat_ifhcinbadoctets_hi,
3911                estats->error_bytes_received_lo,
3912                estats->rx_stat_ifhcinbadoctets_lo);
3913
3914         if (bp->port.pmf) {
3915                 estats->mac_filter_discard =
3916                                 le32_to_cpu(tport->mac_filter_discard);
3917                 estats->xxoverflow_discard =
3918                                 le32_to_cpu(tport->xxoverflow_discard);
3919                 estats->brb_truncate_discard =
3920                                 le32_to_cpu(tport->brb_truncate_discard);
3921                 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3922         }
3923
3924         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3925
3926         bp->stats_pending = 0;
3927
3928         return 0;
3929 }
3930
3931 static void bnx2x_net_stats_update(struct bnx2x *bp)
3932 {
3933         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3934         struct net_device_stats *nstats = &bp->dev->stats;
3935         int i;
3936
3937         nstats->rx_packets =
3938                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3939                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3940                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3941
3942         nstats->tx_packets =
3943                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3944                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3945                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3946
3947         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
3948
3949         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3950
3951         nstats->rx_dropped = estats->mac_discard;
3952         for_each_queue(bp, i)
3953                 nstats->rx_dropped +=
3954                         le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3955
3956         nstats->tx_dropped = 0;
3957
3958         nstats->multicast =
3959                 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
3960
3961         nstats->collisions =
3962                 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
3963
3964         nstats->rx_length_errors =
3965                 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3966                 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3967         nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3968                                  bnx2x_hilo(&estats->brb_truncate_hi);
3969         nstats->rx_crc_errors =
3970                 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3971         nstats->rx_frame_errors =
3972                 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3973         nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
3974         nstats->rx_missed_errors = estats->xxoverflow_discard;
3975
3976         nstats->rx_errors = nstats->rx_length_errors +
3977                             nstats->rx_over_errors +
3978                             nstats->rx_crc_errors +
3979                             nstats->rx_frame_errors +
3980                             nstats->rx_fifo_errors +
3981                             nstats->rx_missed_errors;
3982
3983         nstats->tx_aborted_errors =
3984                 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3985                 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3986         nstats->tx_carrier_errors =
3987                 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
3988         nstats->tx_fifo_errors = 0;
3989         nstats->tx_heartbeat_errors = 0;
3990         nstats->tx_window_errors = 0;
3991
3992         nstats->tx_errors = nstats->tx_aborted_errors +
3993                             nstats->tx_carrier_errors +
3994             bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3995 }
3996
3997 static void bnx2x_drv_stats_update(struct bnx2x *bp)
3998 {
3999         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4000         int i;
4001
4002         estats->driver_xoff = 0;
4003         estats->rx_err_discard_pkt = 0;
4004         estats->rx_skb_alloc_failed = 0;
4005         estats->hw_csum_err = 0;
4006         for_each_queue(bp, i) {
4007                 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4008
4009                 estats->driver_xoff += qstats->driver_xoff;
4010                 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4011                 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4012                 estats->hw_csum_err += qstats->hw_csum_err;
4013         }
4014 }
4015
4016 static void bnx2x_stats_update(struct bnx2x *bp)
4017 {
4018         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4019
4020         if (*stats_comp != DMAE_COMP_VAL)
4021                 return;
4022
4023         if (bp->port.pmf)
4024                 bnx2x_hw_stats_update(bp);
4025
4026         if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4027                 BNX2X_ERR("storm stats were not updated for 3 times\n");
4028                 bnx2x_panic();
4029                 return;
4030         }
4031
4032         bnx2x_net_stats_update(bp);
4033         bnx2x_drv_stats_update(bp);
4034
4035         if (bp->msglevel & NETIF_MSG_TIMER) {
4036                 struct tstorm_per_client_stats *old_tclient =
4037                                                         &bp->fp->old_tclient;
4038                 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4039                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4040                 struct net_device_stats *nstats = &bp->dev->stats;
4041                 int i;
4042
4043                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4044                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
4045                                   "  tx pkt (%lx)\n",
4046                        bnx2x_tx_avail(bp->fp),
4047                        le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
4048                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
4049                                   "  rx pkt (%lx)\n",
4050                        (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
4051                              bp->fp->rx_comp_cons),
4052                        le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
4053                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u  "
4054                                   "brb truncate %u\n",
4055                        (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4056                        qstats->driver_xoff,
4057                        estats->brb_drop_lo, estats->brb_truncate_lo);
4058                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
4059                         "packets_too_big_discard %lu  no_buff_discard %lu  "
4060                         "mac_discard %u  mac_filter_discard %u  "
4061                         "xxovrflow_discard %u  brb_truncate_discard %u  "
4062                         "ttl0_discard %u\n",
4063                        le32_to_cpu(old_tclient->checksum_discard),
4064                        bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4065                        bnx2x_hilo(&qstats->no_buff_discard_hi),
4066                        estats->mac_discard, estats->mac_filter_discard,
4067                        estats->xxoverflow_discard, estats->brb_truncate_discard,
4068                        le32_to_cpu(old_tclient->ttl0_discard));
4069
4070                 for_each_queue(bp, i) {
4071                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4072                                bnx2x_fp(bp, i, tx_pkt),
4073                                bnx2x_fp(bp, i, rx_pkt),
4074                                bnx2x_fp(bp, i, rx_calls));
4075                 }
4076         }
4077
4078         bnx2x_hw_stats_post(bp);
4079         bnx2x_storm_stats_post(bp);
4080 }
4081
4082 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4083 {
4084         struct dmae_command *dmae;
4085         u32 opcode;
4086         int loader_idx = PMF_DMAE_C(bp);
4087         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4088
4089         bp->executer_idx = 0;
4090
4091         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4092                   DMAE_CMD_C_ENABLE |
4093                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4094 #ifdef __BIG_ENDIAN
4095                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4096 #else
4097                   DMAE_CMD_ENDIANITY_DW_SWAP |
4098 #endif
4099                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4100                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4101
4102         if (bp->port.port_stx) {
4103
4104                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4105                 if (bp->func_stx)
4106                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4107                 else
4108                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4109                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4110                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4111                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4112                 dmae->dst_addr_hi = 0;
4113                 dmae->len = sizeof(struct host_port_stats) >> 2;
4114                 if (bp->func_stx) {
4115                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4116                         dmae->comp_addr_hi = 0;
4117                         dmae->comp_val = 1;
4118                 } else {
4119                         dmae->comp_addr_lo =
4120                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4121                         dmae->comp_addr_hi =
4122                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4123                         dmae->comp_val = DMAE_COMP_VAL;
4124
4125                         *stats_comp = 0;
4126                 }
4127         }
4128
4129         if (bp->func_stx) {
4130
4131                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4132                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4133                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4134                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4135                 dmae->dst_addr_lo = bp->func_stx >> 2;
4136                 dmae->dst_addr_hi = 0;
4137                 dmae->len = sizeof(struct host_func_stats) >> 2;
4138                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4139                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4140                 dmae->comp_val = DMAE_COMP_VAL;
4141
4142                 *stats_comp = 0;
4143         }
4144 }
4145
4146 static void bnx2x_stats_stop(struct bnx2x *bp)
4147 {
4148         int update = 0;
4149
4150         bnx2x_stats_comp(bp);
4151
4152         if (bp->port.pmf)
4153                 update = (bnx2x_hw_stats_update(bp) == 0);
4154
4155         update |= (bnx2x_storm_stats_update(bp) == 0);
4156
4157         if (update) {
4158                 bnx2x_net_stats_update(bp);
4159
4160                 if (bp->port.pmf)
4161                         bnx2x_port_stats_stop(bp);
4162
4163                 bnx2x_hw_stats_post(bp);
4164                 bnx2x_stats_comp(bp);
4165         }
4166 }
4167
4168 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4169 {
4170 }
4171
4172 static const struct {
4173         void (*action)(struct bnx2x *bp);
4174         enum bnx2x_stats_state next_state;
4175 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4176 /* state        event   */
4177 {
4178 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4179 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
4180 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4181 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4182 },
4183 {
4184 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
4185 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
4186 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
4187 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
4188 }
4189 };
4190
4191 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4192 {
4193         enum bnx2x_stats_state state = bp->stats_state;
4194
4195         bnx2x_stats_stm[state][event].action(bp);
4196         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4197
4198         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4199                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4200                    state, event, bp->stats_state);
4201 }
4202
4203 static void bnx2x_timer(unsigned long data)
4204 {
4205         struct bnx2x *bp = (struct bnx2x *) data;
4206
4207         if (!netif_running(bp->dev))
4208                 return;
4209
4210         if (atomic_read(&bp->intr_sem) != 0)
4211                 goto timer_restart;
4212
4213         if (poll) {
4214                 struct bnx2x_fastpath *fp = &bp->fp[0];
4215                 int rc;
4216
4217                 bnx2x_tx_int(fp);
4218                 rc = bnx2x_rx_int(fp, 1000);
4219         }
4220
4221         if (!BP_NOMCP(bp)) {
4222                 int func = BP_FUNC(bp);
4223                 u32 drv_pulse;
4224                 u32 mcp_pulse;
4225
4226                 ++bp->fw_drv_pulse_wr_seq;
4227                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4228                 /* TBD - add SYSTEM_TIME */
4229                 drv_pulse = bp->fw_drv_pulse_wr_seq;
4230                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4231
4232                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4233                              MCP_PULSE_SEQ_MASK);
4234                 /* The delta between driver pulse and mcp response
4235                  * should be 1 (before mcp response) or 0 (after mcp response)
4236                  */
4237                 if ((drv_pulse != mcp_pulse) &&
4238                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4239                         /* someone lost a heartbeat... */
4240                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4241                                   drv_pulse, mcp_pulse);
4242                 }
4243         }
4244
4245         if ((bp->state == BNX2X_STATE_OPEN) ||
4246             (bp->state == BNX2X_STATE_DISABLED))
4247                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4248
4249 timer_restart:
4250         mod_timer(&bp->timer, jiffies + bp->current_interval);
4251 }
4252
4253 /* end of Statistics */
4254
4255 /* nic init */
4256
4257 /*
4258  * nic init service functions
4259  */
4260
4261 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4262 {
4263         int port = BP_PORT(bp);
4264
4265         bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
4266                         USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4267                         sizeof(struct ustorm_status_block)/4);
4268         bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
4269                         CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4270                         sizeof(struct cstorm_status_block)/4);
4271 }
4272
4273 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4274                           dma_addr_t mapping, int sb_id)
4275 {
4276         int port = BP_PORT(bp);
4277         int func = BP_FUNC(bp);
4278         int index;
4279         u64 section;
4280
4281         /* USTORM */
4282         section = ((u64)mapping) + offsetof(struct host_status_block,
4283                                             u_status_block);
4284         sb->u_status_block.status_block_id = sb_id;
4285
4286         REG_WR(bp, BAR_USTRORM_INTMEM +
4287                USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4288         REG_WR(bp, BAR_USTRORM_INTMEM +
4289                ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4290                U64_HI(section));
4291         REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4292                 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4293
4294         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4295                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4296                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4297
4298         /* CSTORM */
4299         section = ((u64)mapping) + offsetof(struct host_status_block,
4300                                             c_status_block);
4301         sb->c_status_block.status_block_id = sb_id;
4302
4303         REG_WR(bp, BAR_CSTRORM_INTMEM +
4304                CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4305         REG_WR(bp, BAR_CSTRORM_INTMEM +
4306                ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4307                U64_HI(section));
4308         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4309                 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4310
4311         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4312                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4313                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4314
4315         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4316 }
4317
4318 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4319 {
4320         int func = BP_FUNC(bp);
4321
4322         bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR +
4323                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4324                         sizeof(struct tstorm_def_status_block)/4);
4325         bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
4326                         USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4327                         sizeof(struct ustorm_def_status_block)/4);
4328         bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
4329                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4330                         sizeof(struct cstorm_def_status_block)/4);
4331         bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR +
4332                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4333                         sizeof(struct xstorm_def_status_block)/4);
4334 }
4335
4336 static void bnx2x_init_def_sb(struct bnx2x *bp,
4337                               struct host_def_status_block *def_sb,
4338                               dma_addr_t mapping, int sb_id)
4339 {
4340         int port = BP_PORT(bp);
4341         int func = BP_FUNC(bp);
4342         int index, val, reg_offset;
4343         u64 section;
4344
4345         /* ATTN */
4346         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4347                                             atten_status_block);
4348         def_sb->atten_status_block.status_block_id = sb_id;
4349
4350         bp->attn_state = 0;
4351
4352         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4353                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4354
4355         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4356                 bp->attn_group[index].sig[0] = REG_RD(bp,
4357                                                      reg_offset + 0x10*index);
4358                 bp->attn_group[index].sig[1] = REG_RD(bp,
4359                                                reg_offset + 0x4 + 0x10*index);
4360                 bp->attn_group[index].sig[2] = REG_RD(bp,
4361                                                reg_offset + 0x8 + 0x10*index);
4362                 bp->attn_group[index].sig[3] = REG_RD(bp,
4363                                                reg_offset + 0xc + 0x10*index);
4364         }
4365
4366         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4367                              HC_REG_ATTN_MSG0_ADDR_L);
4368
4369         REG_WR(bp, reg_offset, U64_LO(section));
4370         REG_WR(bp, reg_offset + 4, U64_HI(section));
4371
4372         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4373
4374         val = REG_RD(bp, reg_offset);
4375         val |= sb_id;
4376         REG_WR(bp, reg_offset, val);
4377
4378         /* USTORM */
4379         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4380                                             u_def_status_block);
4381         def_sb->u_def_status_block.status_block_id = sb_id;
4382
4383         REG_WR(bp, BAR_USTRORM_INTMEM +
4384                USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4385         REG_WR(bp, BAR_USTRORM_INTMEM +
4386                ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4387                U64_HI(section));
4388         REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4389                 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4390
4391         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4392                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4393                          USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4394
4395         /* CSTORM */
4396         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4397                                             c_def_status_block);
4398         def_sb->c_def_status_block.status_block_id = sb_id;
4399
4400         REG_WR(bp, BAR_CSTRORM_INTMEM +
4401                CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4402         REG_WR(bp, BAR_CSTRORM_INTMEM +
4403                ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4404                U64_HI(section));
4405         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4406                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4407
4408         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4409                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4410                          CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4411
4412         /* TSTORM */
4413         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4414                                             t_def_status_block);
4415         def_sb->t_def_status_block.status_block_id = sb_id;
4416
4417         REG_WR(bp, BAR_TSTRORM_INTMEM +
4418                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4419         REG_WR(bp, BAR_TSTRORM_INTMEM +
4420                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4421                U64_HI(section));
4422         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4423                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4424
4425         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4426                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4427                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4428
4429         /* XSTORM */
4430         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4431                                             x_def_status_block);
4432         def_sb->x_def_status_block.status_block_id = sb_id;
4433
4434         REG_WR(bp, BAR_XSTRORM_INTMEM +
4435                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4436         REG_WR(bp, BAR_XSTRORM_INTMEM +
4437                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4438                U64_HI(section));
4439         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4440                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4441
4442         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4443                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4444                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4445
4446         bp->stats_pending = 0;
4447         bp->set_mac_pending = 0;
4448
4449         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4450 }
4451
4452 static void bnx2x_update_coalesce(struct bnx2x *bp)
4453 {
4454         int port = BP_PORT(bp);
4455         int i;
4456
4457         for_each_queue(bp, i) {
4458                 int sb_id = bp->fp[i].sb_id;
4459
4460                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4461                 REG_WR8(bp, BAR_USTRORM_INTMEM +
4462                         USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4463                                                     U_SB_ETH_RX_CQ_INDEX),
4464                         bp->rx_ticks/12);
4465                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4466                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4467                                                      U_SB_ETH_RX_CQ_INDEX),
4468                          (bp->rx_ticks/12) ? 0 : 1);
4469
4470                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4471                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4472                         CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4473                                                     C_SB_ETH_TX_CQ_INDEX),
4474                         bp->tx_ticks/12);
4475                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4476                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4477                                                      C_SB_ETH_TX_CQ_INDEX),
4478                          (bp->tx_ticks/12) ? 0 : 1);
4479         }
4480 }
4481
4482 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4483                                        struct bnx2x_fastpath *fp, int last)
4484 {
4485         int i;
4486
4487         for (i = 0; i < last; i++) {
4488                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4489                 struct sk_buff *skb = rx_buf->skb;
4490
4491                 if (skb == NULL) {
4492                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4493                         continue;
4494                 }
4495
4496                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4497                         pci_unmap_single(bp->pdev,
4498                                          pci_unmap_addr(rx_buf, mapping),
4499                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4500
4501                 dev_kfree_skb(skb);
4502                 rx_buf->skb = NULL;
4503         }
4504 }
4505
4506 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4507 {
4508         int func = BP_FUNC(bp);
4509         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4510                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4511         u16 ring_prod, cqe_ring_prod;
4512         int i, j;
4513
4514         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4515         DP(NETIF_MSG_IFUP,
4516            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4517
4518         if (bp->flags & TPA_ENABLE_FLAG) {
4519
4520                 for_each_rx_queue(bp, j) {
4521                         struct bnx2x_fastpath *fp = &bp->fp[j];
4522
4523                         for (i = 0; i < max_agg_queues; i++) {
4524                                 fp->tpa_pool[i].skb =
4525                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4526                                 if (!fp->tpa_pool[i].skb) {
4527                                         BNX2X_ERR("Failed to allocate TPA "
4528                                                   "skb pool for queue[%d] - "
4529                                                   "disabling TPA on this "
4530                                                   "queue!\n", j);
4531                                         bnx2x_free_tpa_pool(bp, fp, i);
4532                                         fp->disable_tpa = 1;
4533                                         break;
4534                                 }
4535                                 pci_unmap_addr_set((struct sw_rx_bd *)
4536                                                         &bp->fp->tpa_pool[i],
4537                                                    mapping, 0);
4538                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4539                         }
4540                 }
4541         }
4542
4543         for_each_rx_queue(bp, j) {
4544                 struct bnx2x_fastpath *fp = &bp->fp[j];
4545
4546                 fp->rx_bd_cons = 0;
4547                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4548                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4549
4550                 /* "next page" elements initialization */
4551                 /* SGE ring */
4552                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4553                         struct eth_rx_sge *sge;
4554
4555                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4556                         sge->addr_hi =
4557                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4558                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4559                         sge->addr_lo =
4560                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4561                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4562                 }
4563
4564                 bnx2x_init_sge_ring_bit_mask(fp);
4565
4566                 /* RX BD ring */
4567                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4568                         struct eth_rx_bd *rx_bd;
4569
4570                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4571                         rx_bd->addr_hi =
4572                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4573                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4574                         rx_bd->addr_lo =
4575                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4576                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4577                 }
4578
4579                 /* CQ ring */
4580                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4581                         struct eth_rx_cqe_next_page *nextpg;
4582
4583                         nextpg = (struct eth_rx_cqe_next_page *)
4584                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4585                         nextpg->addr_hi =
4586                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4587                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4588                         nextpg->addr_lo =
4589                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4590                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4591                 }
4592
4593                 /* Allocate SGEs and initialize the ring elements */
4594                 for (i = 0, ring_prod = 0;
4595                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4596
4597                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4598                                 BNX2X_ERR("was only able to allocate "
4599                                           "%d rx sges\n", i);
4600                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4601                                 /* Cleanup already allocated elements */
4602                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4603                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4604                                 fp->disable_tpa = 1;
4605                                 ring_prod = 0;
4606                                 break;
4607                         }
4608                         ring_prod = NEXT_SGE_IDX(ring_prod);
4609                 }
4610                 fp->rx_sge_prod = ring_prod;
4611
4612                 /* Allocate BDs and initialize BD ring */
4613                 fp->rx_comp_cons = 0;
4614                 cqe_ring_prod = ring_prod = 0;
4615                 for (i = 0; i < bp->rx_ring_size; i++) {
4616                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4617                                 BNX2X_ERR("was only able to allocate "
4618                                           "%d rx skbs on queue[%d]\n", i, j);
4619                                 fp->eth_q_stats.rx_skb_alloc_failed++;
4620                                 break;
4621                         }
4622                         ring_prod = NEXT_RX_IDX(ring_prod);
4623                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4624                         WARN_ON(ring_prod <= i);
4625                 }
4626
4627                 fp->rx_bd_prod = ring_prod;
4628                 /* must not have more available CQEs than BDs */
4629                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4630                                        cqe_ring_prod);
4631                 fp->rx_pkt = fp->rx_calls = 0;
4632
4633                 /* Warning!
4634                  * this will generate an interrupt (to the TSTORM)
4635                  * must only be done after chip is initialized
4636                  */
4637                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4638                                      fp->rx_sge_prod);
4639                 if (j != 0)
4640                         continue;
4641
4642                 REG_WR(bp, BAR_USTRORM_INTMEM +
4643                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4644                        U64_LO(fp->rx_comp_mapping));
4645                 REG_WR(bp, BAR_USTRORM_INTMEM +
4646                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4647                        U64_HI(fp->rx_comp_mapping));
4648         }
4649 }
4650
4651 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4652 {
4653         int i, j;
4654
4655         for_each_tx_queue(bp, j) {
4656                 struct bnx2x_fastpath *fp = &bp->fp[j];
4657
4658                 for (i = 1; i <= NUM_TX_RINGS; i++) {
4659                         struct eth_tx_bd *tx_bd =
4660                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4661
4662                         tx_bd->addr_hi =
4663                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4664                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4665                         tx_bd->addr_lo =
4666                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4667                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4668                 }
4669
4670                 fp->tx_pkt_prod = 0;
4671                 fp->tx_pkt_cons = 0;
4672                 fp->tx_bd_prod = 0;
4673                 fp->tx_bd_cons = 0;
4674                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4675                 fp->tx_pkt = 0;
4676         }
4677 }
4678
4679 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4680 {
4681         int func = BP_FUNC(bp);
4682
4683         spin_lock_init(&bp->spq_lock);
4684
4685         bp->spq_left = MAX_SPQ_PENDING;
4686         bp->spq_prod_idx = 0;
4687         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4688         bp->spq_prod_bd = bp->spq;
4689         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4690
4691         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4692                U64_LO(bp->spq_mapping));
4693         REG_WR(bp,
4694                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4695                U64_HI(bp->spq_mapping));
4696
4697         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4698                bp->spq_prod_idx);
4699 }
4700
4701 static void bnx2x_init_context(struct bnx2x *bp)
4702 {
4703         int i;
4704
4705         for_each_queue(bp, i) {
4706                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4707                 struct bnx2x_fastpath *fp = &bp->fp[i];
4708                 u8 cl_id = fp->cl_id;
4709                 u8 sb_id = fp->sb_id;
4710
4711                 context->ustorm_st_context.common.sb_index_numbers =
4712                                                 BNX2X_RX_SB_INDEX_NUM;
4713                 context->ustorm_st_context.common.clientId = cl_id;
4714                 context->ustorm_st_context.common.status_block_id = sb_id;
4715                 context->ustorm_st_context.common.flags =
4716                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4717                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4718                 context->ustorm_st_context.common.statistics_counter_id =
4719                                                 cl_id;
4720                 context->ustorm_st_context.common.mc_alignment_log_size =
4721                                                 BNX2X_RX_ALIGN_SHIFT;
4722                 context->ustorm_st_context.common.bd_buff_size =
4723                                                 bp->rx_buf_size;
4724                 context->ustorm_st_context.common.bd_page_base_hi =
4725                                                 U64_HI(fp->rx_desc_mapping);
4726                 context->ustorm_st_context.common.bd_page_base_lo =
4727                                                 U64_LO(fp->rx_desc_mapping);
4728                 if (!fp->disable_tpa) {
4729                         context->ustorm_st_context.common.flags |=
4730                                 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4731                                  USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4732                         context->ustorm_st_context.common.sge_buff_size =
4733                                 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4734                                          (u32)0xffff);
4735                         context->ustorm_st_context.common.sge_page_base_hi =
4736                                                 U64_HI(fp->rx_sge_mapping);
4737                         context->ustorm_st_context.common.sge_page_base_lo =
4738                                                 U64_LO(fp->rx_sge_mapping);
4739                 }
4740
4741                 context->ustorm_ag_context.cdu_usage =
4742                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4743                                                CDU_REGION_NUMBER_UCM_AG,
4744                                                ETH_CONNECTION_TYPE);
4745
4746                 context->xstorm_st_context.tx_bd_page_base_hi =
4747                                                 U64_HI(fp->tx_desc_mapping);
4748                 context->xstorm_st_context.tx_bd_page_base_lo =
4749                                                 U64_LO(fp->tx_desc_mapping);
4750                 context->xstorm_st_context.db_data_addr_hi =
4751                                                 U64_HI(fp->tx_prods_mapping);
4752                 context->xstorm_st_context.db_data_addr_lo =
4753                                                 U64_LO(fp->tx_prods_mapping);
4754                 context->xstorm_st_context.statistics_data = (cl_id |
4755                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4756                 context->cstorm_st_context.sb_index_number =
4757                                                 C_SB_ETH_TX_CQ_INDEX;
4758                 context->cstorm_st_context.status_block_id = sb_id;
4759
4760                 context->xstorm_ag_context.cdu_reserved =
4761                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4762                                                CDU_REGION_NUMBER_XCM_AG,
4763                                                ETH_CONNECTION_TYPE);
4764         }
4765 }
4766
4767 static void bnx2x_init_ind_table(struct bnx2x *bp)
4768 {
4769         int func = BP_FUNC(bp);
4770         int i;
4771
4772         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4773                 return;
4774
4775         DP(NETIF_MSG_IFUP,
4776            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
4777         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4778                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4779                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4780                         bp->fp->cl_id + (i % bp->num_rx_queues));
4781 }
4782
4783 static void bnx2x_set_client_config(struct bnx2x *bp)
4784 {
4785         struct tstorm_eth_client_config tstorm_client = {0};
4786         int port = BP_PORT(bp);
4787         int i;
4788
4789         tstorm_client.mtu = bp->dev->mtu;
4790         tstorm_client.config_flags =
4791                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4792                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
4793 #ifdef BCM_VLAN
4794         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4795                 tstorm_client.config_flags |=
4796                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
4797                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4798         }
4799 #endif
4800
4801         if (bp->flags & TPA_ENABLE_FLAG) {
4802                 tstorm_client.max_sges_for_packet =
4803                         SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4804                 tstorm_client.max_sges_for_packet =
4805                         ((tstorm_client.max_sges_for_packet +
4806                           PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4807                         PAGES_PER_SGE_SHIFT;
4808
4809                 tstorm_client.config_flags |=
4810                                 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4811         }
4812
4813         for_each_queue(bp, i) {
4814                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4815
4816                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4817                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4818                        ((u32 *)&tstorm_client)[0]);
4819                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4820                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4821                        ((u32 *)&tstorm_client)[1]);
4822         }
4823
4824         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4825            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4826 }
4827
4828 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4829 {
4830         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4831         int mode = bp->rx_mode;
4832         int mask = (1 << BP_L_ID(bp));
4833         int func = BP_FUNC(bp);
4834         int port = BP_PORT(bp);
4835         int i;
4836         /* All but management unicast packets should pass to the host as well */
4837         u32 llh_mask =
4838                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4839                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4840                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4841                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
4842
4843         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
4844
4845         switch (mode) {
4846         case BNX2X_RX_MODE_NONE: /* no Rx */
4847                 tstorm_mac_filter.ucast_drop_all = mask;
4848                 tstorm_mac_filter.mcast_drop_all = mask;
4849                 tstorm_mac_filter.bcast_drop_all = mask;
4850                 break;
4851
4852         case BNX2X_RX_MODE_NORMAL:
4853                 tstorm_mac_filter.bcast_accept_all = mask;
4854                 break;
4855
4856         case BNX2X_RX_MODE_ALLMULTI:
4857                 tstorm_mac_filter.mcast_accept_all = mask;
4858                 tstorm_mac_filter.bcast_accept_all = mask;
4859                 break;
4860
4861         case BNX2X_RX_MODE_PROMISC:
4862                 tstorm_mac_filter.ucast_accept_all = mask;
4863                 tstorm_mac_filter.mcast_accept_all = mask;
4864                 tstorm_mac_filter.bcast_accept_all = mask;
4865                 /* pass management unicast packets as well */
4866                 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
4867                 break;
4868
4869         default:
4870                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4871                 break;
4872         }
4873
4874         REG_WR(bp,
4875                (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
4876                llh_mask);
4877
4878         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4879                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4880                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4881                        ((u32 *)&tstorm_mac_filter)[i]);
4882
4883 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4884                    ((u32 *)&tstorm_mac_filter)[i]); */
4885         }
4886
4887         if (mode != BNX2X_RX_MODE_NONE)
4888                 bnx2x_set_client_config(bp);
4889 }
4890
4891 static void bnx2x_init_internal_common(struct bnx2x *bp)
4892 {
4893         int i;
4894
4895         if (bp->flags & TPA_ENABLE_FLAG) {
4896                 struct tstorm_eth_tpa_exist tpa = {0};
4897
4898                 tpa.tpa_exist = 1;
4899
4900                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4901                        ((u32 *)&tpa)[0]);
4902                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4903                        ((u32 *)&tpa)[1]);
4904         }
4905
4906         /* Zero this manually as its initialization is
4907            currently missing in the initTool */
4908         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4909                 REG_WR(bp, BAR_USTRORM_INTMEM +
4910                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4911 }
4912
4913 static void bnx2x_init_internal_port(struct bnx2x *bp)
4914 {
4915         int port = BP_PORT(bp);
4916
4917         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4918         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4919         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4920         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4921 }
4922
4923 /* Calculates the sum of vn_min_rates.
4924    It's needed for further normalizing of the min_rates.
4925    Returns:
4926      sum of vn_min_rates.
4927        or
4928      0 - if all the min_rates are 0.
4929      In the later case fainess algorithm should be deactivated.
4930      If not all min_rates are zero then those that are zeroes will be set to 1.
4931  */
4932 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4933 {
4934         int all_zero = 1;
4935         int port = BP_PORT(bp);
4936         int vn;
4937
4938         bp->vn_weight_sum = 0;
4939         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4940                 int func = 2*vn + port;
4941                 u32 vn_cfg =
4942                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4943                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4944                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4945
4946                 /* Skip hidden vns */
4947                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4948                         continue;
4949
4950                 /* If min rate is zero - set it to 1 */
4951                 if (!vn_min_rate)
4952                         vn_min_rate = DEF_MIN_RATE;
4953                 else
4954                         all_zero = 0;
4955
4956                 bp->vn_weight_sum += vn_min_rate;
4957         }
4958
4959         /* ... only if all min rates are zeros - disable fairness */
4960         if (all_zero)
4961                 bp->vn_weight_sum = 0;
4962 }
4963
4964 static void bnx2x_init_internal_func(struct bnx2x *bp)
4965 {
4966         struct tstorm_eth_function_common_config tstorm_config = {0};
4967         struct stats_indication_flags stats_flags = {0};
4968         int port = BP_PORT(bp);
4969         int func = BP_FUNC(bp);
4970         int i, j;
4971         u32 offset;
4972         u16 max_agg_size;
4973
4974         if (is_multi(bp)) {
4975                 tstorm_config.config_flags = MULTI_FLAGS(bp);
4976                 tstorm_config.rss_result_mask = MULTI_MASK;
4977         }
4978         if (IS_E1HMF(bp))
4979                 tstorm_config.config_flags |=
4980                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
4981
4982         tstorm_config.leading_client_id = BP_L_ID(bp);
4983
4984         REG_WR(bp, BAR_TSTRORM_INTMEM +
4985                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4986                (*(u32 *)&tstorm_config));
4987
4988         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4989         bnx2x_set_storm_rx_mode(bp);
4990
4991         for_each_queue(bp, i) {
4992                 u8 cl_id = bp->fp[i].cl_id;
4993
4994                 /* reset xstorm per client statistics */
4995                 offset = BAR_XSTRORM_INTMEM +
4996                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4997                 for (j = 0;
4998                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4999                         REG_WR(bp, offset + j*4, 0);
5000
5001                 /* reset tstorm per client statistics */
5002                 offset = BAR_TSTRORM_INTMEM +
5003                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5004                 for (j = 0;
5005                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5006                         REG_WR(bp, offset + j*4, 0);
5007
5008                 /* reset ustorm per client statistics */
5009                 offset = BAR_USTRORM_INTMEM +
5010                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5011                 for (j = 0;
5012                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5013                         REG_WR(bp, offset + j*4, 0);
5014         }
5015
5016         /* Init statistics related context */
5017         stats_flags.collect_eth = 1;
5018
5019         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5020                ((u32 *)&stats_flags)[0]);
5021         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5022                ((u32 *)&stats_flags)[1]);
5023
5024         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5025                ((u32 *)&stats_flags)[0]);
5026         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5027                ((u32 *)&stats_flags)[1]);
5028
5029         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5030                ((u32 *)&stats_flags)[0]);
5031         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5032                ((u32 *)&stats_flags)[1]);
5033
5034         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5035                ((u32 *)&stats_flags)[0]);
5036         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5037                ((u32 *)&stats_flags)[1]);
5038
5039         REG_WR(bp, BAR_XSTRORM_INTMEM +
5040                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5041                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5042         REG_WR(bp, BAR_XSTRORM_INTMEM +
5043                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5044                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5045
5046         REG_WR(bp, BAR_TSTRORM_INTMEM +
5047                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5048                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5049         REG_WR(bp, BAR_TSTRORM_INTMEM +
5050                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5051                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5052
5053         REG_WR(bp, BAR_USTRORM_INTMEM +
5054                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5055                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5056         REG_WR(bp, BAR_USTRORM_INTMEM +
5057                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5058                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5059
5060         if (CHIP_IS_E1H(bp)) {
5061                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5062                         IS_E1HMF(bp));
5063                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5064                         IS_E1HMF(bp));
5065                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5066                         IS_E1HMF(bp));
5067                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5068                         IS_E1HMF(bp));
5069
5070                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5071                          bp->e1hov);
5072         }
5073
5074         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5075         max_agg_size =
5076                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5077                           SGE_PAGE_SIZE * PAGES_PER_SGE),
5078                     (u32)0xffff);
5079         for_each_rx_queue(bp, i) {
5080                 struct bnx2x_fastpath *fp = &bp->fp[i];
5081
5082                 REG_WR(bp, BAR_USTRORM_INTMEM +
5083                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5084                        U64_LO(fp->rx_comp_mapping));
5085                 REG_WR(bp, BAR_USTRORM_INTMEM +
5086                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5087                        U64_HI(fp->rx_comp_mapping));
5088
5089                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5090                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5091                          max_agg_size);
5092         }
5093
5094         /* dropless flow control */
5095         if (CHIP_IS_E1H(bp)) {
5096                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5097
5098                 rx_pause.bd_thr_low = 250;
5099                 rx_pause.cqe_thr_low = 250;
5100                 rx_pause.cos = 1;
5101                 rx_pause.sge_thr_low = 0;
5102                 rx_pause.bd_thr_high = 350;
5103                 rx_pause.cqe_thr_high = 350;
5104                 rx_pause.sge_thr_high = 0;
5105
5106                 for_each_rx_queue(bp, i) {
5107                         struct bnx2x_fastpath *fp = &bp->fp[i];
5108
5109                         if (!fp->disable_tpa) {
5110                                 rx_pause.sge_thr_low = 150;
5111                                 rx_pause.sge_thr_high = 250;
5112                         }
5113
5114
5115                         offset = BAR_USTRORM_INTMEM +
5116                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5117                                                                    fp->cl_id);
5118                         for (j = 0;
5119                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5120                              j++)
5121                                 REG_WR(bp, offset + j*4,
5122                                        ((u32 *)&rx_pause)[j]);
5123                 }
5124         }
5125
5126         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5127
5128         /* Init rate shaping and fairness contexts */
5129         if (IS_E1HMF(bp)) {
5130                 int vn;
5131
5132                 /* During init there is no active link
5133                    Until link is up, set link rate to 10Gbps */
5134                 bp->link_vars.line_speed = SPEED_10000;
5135                 bnx2x_init_port_minmax(bp);
5136
5137                 bnx2x_calc_vn_weight_sum(bp);
5138
5139                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5140                         bnx2x_init_vn_minmax(bp, 2*vn + port);
5141
5142                 /* Enable rate shaping and fairness */
5143                 bp->cmng.flags.cmng_enables =
5144                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5145                 if (bp->vn_weight_sum)
5146                         bp->cmng.flags.cmng_enables |=
5147                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5148                 else
5149                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5150                            "  fairness will be disabled\n");
5151         } else {
5152                 /* rate shaping and fairness are disabled */
5153                 DP(NETIF_MSG_IFUP,
5154                    "single function mode  minmax will be disabled\n");
5155         }
5156
5157
5158         /* Store it to internal memory */
5159         if (bp->port.pmf)
5160                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5161                         REG_WR(bp, BAR_XSTRORM_INTMEM +
5162                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5163                                ((u32 *)(&bp->cmng))[i]);
5164 }
5165
5166 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5167 {
5168         switch (load_code) {
5169         case FW_MSG_CODE_DRV_LOAD_COMMON:
5170                 bnx2x_init_internal_common(bp);
5171                 /* no break */
5172
5173         case FW_MSG_CODE_DRV_LOAD_PORT:
5174                 bnx2x_init_internal_port(bp);
5175                 /* no break */
5176
5177         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5178                 bnx2x_init_internal_func(bp);
5179                 break;
5180
5181         default:
5182                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5183                 break;
5184         }
5185 }
5186
5187 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5188 {
5189         int i;
5190
5191         for_each_queue(bp, i) {
5192                 struct bnx2x_fastpath *fp = &bp->fp[i];
5193
5194                 fp->bp = bp;
5195                 fp->state = BNX2X_FP_STATE_CLOSED;
5196                 fp->index = i;
5197                 fp->cl_id = BP_L_ID(bp) + i;
5198                 fp->sb_id = fp->cl_id;
5199                 DP(NETIF_MSG_IFUP,
5200                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
5201                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5202                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5203                               fp->sb_id);
5204                 bnx2x_update_fpsb_idx(fp);
5205         }
5206
5207         /* ensure status block indices were read */
5208         rmb();
5209
5210
5211         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5212                           DEF_SB_ID);
5213         bnx2x_update_dsb_idx(bp);
5214         bnx2x_update_coalesce(bp);
5215         bnx2x_init_rx_rings(bp);
5216         bnx2x_init_tx_ring(bp);
5217         bnx2x_init_sp_ring(bp);
5218         bnx2x_init_context(bp);
5219         bnx2x_init_internal(bp, load_code);
5220         bnx2x_init_ind_table(bp);
5221         bnx2x_stats_init(bp);
5222
5223         /* At this point, we are ready for interrupts */
5224         atomic_set(&bp->intr_sem, 0);
5225
5226         /* flush all before enabling interrupts */
5227         mb();
5228         mmiowb();
5229
5230         bnx2x_int_enable(bp);
5231
5232         /* Check for SPIO5 */
5233         bnx2x_attn_int_deasserted0(bp,
5234                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5235                                    AEU_INPUTS_ATTN_BITS_SPIO5);
5236 }
5237
5238 /* end of nic init */
5239
5240 /*
5241  * gzip service functions
5242  */
5243
5244 static int bnx2x_gunzip_init(struct bnx2x *bp)
5245 {
5246         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5247                                               &bp->gunzip_mapping);
5248         if (bp->gunzip_buf  == NULL)
5249                 goto gunzip_nomem1;
5250
5251         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5252         if (bp->strm  == NULL)
5253                 goto gunzip_nomem2;
5254
5255         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5256                                       GFP_KERNEL);
5257         if (bp->strm->workspace == NULL)
5258                 goto gunzip_nomem3;
5259
5260         return 0;
5261
5262 gunzip_nomem3:
5263         kfree(bp->strm);
5264         bp->strm = NULL;
5265
5266 gunzip_nomem2:
5267         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5268                             bp->gunzip_mapping);
5269         bp->gunzip_buf = NULL;
5270
5271 gunzip_nomem1:
5272         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5273                " un-compression\n", bp->dev->name);
5274         return -ENOMEM;
5275 }
5276
5277 static void bnx2x_gunzip_end(struct bnx2x *bp)
5278 {
5279         kfree(bp->strm->workspace);
5280
5281         kfree(bp->strm);
5282         bp->strm = NULL;
5283
5284         if (bp->gunzip_buf) {
5285                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5286                                     bp->gunzip_mapping);
5287                 bp->gunzip_buf = NULL;
5288         }
5289 }
5290
5291 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5292 {
5293         int n, rc;
5294
5295         /* check gzip header */
5296         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5297                 BNX2X_ERR("Bad gzip header\n");
5298                 return -EINVAL;
5299         }
5300
5301         n = 10;
5302
5303 #define FNAME                           0x8
5304
5305         if (zbuf[3] & FNAME)
5306                 while ((zbuf[n++] != 0) && (n < len));
5307
5308         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5309         bp->strm->avail_in = len - n;
5310         bp->strm->next_out = bp->gunzip_buf;
5311         bp->strm->avail_out = FW_BUF_SIZE;
5312
5313         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5314         if (rc != Z_OK)
5315                 return rc;
5316
5317         rc = zlib_inflate(bp->strm, Z_FINISH);
5318         if ((rc != Z_OK) && (rc != Z_STREAM_END))
5319                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5320                        bp->dev->name, bp->strm->msg);
5321
5322         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5323         if (bp->gunzip_outlen & 0x3)
5324                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5325                                     " gunzip_outlen (%d) not aligned\n",
5326                        bp->dev->name, bp->gunzip_outlen);
5327         bp->gunzip_outlen >>= 2;
5328
5329         zlib_inflateEnd(bp->strm);
5330
5331         if (rc == Z_STREAM_END)
5332                 return 0;
5333
5334         return rc;
5335 }
5336
5337 /* nic load/unload */
5338
5339 /*
5340  * General service functions
5341  */
5342
5343 /* send a NIG loopback debug packet */
5344 static void bnx2x_lb_pckt(struct bnx2x *bp)
5345 {
5346         u32 wb_write[3];
5347
5348         /* Ethernet source and destination addresses */
5349         wb_write[0] = 0x55555555;
5350         wb_write[1] = 0x55555555;
5351         wb_write[2] = 0x20;             /* SOP */
5352         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5353
5354         /* NON-IP protocol */
5355         wb_write[0] = 0x09000000;
5356         wb_write[1] = 0x55555555;
5357         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
5358         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5359 }
5360
5361 /* some of the internal memories
5362  * are not directly readable from the driver
5363  * to test them we send debug packets
5364  */
5365 static int bnx2x_int_mem_test(struct bnx2x *bp)
5366 {
5367         int factor;
5368         int count, i;
5369         u32 val = 0;
5370
5371         if (CHIP_REV_IS_FPGA(bp))
5372                 factor = 120;
5373         else if (CHIP_REV_IS_EMUL(bp))
5374                 factor = 200;
5375         else
5376                 factor = 1;
5377
5378         DP(NETIF_MSG_HW, "start part1\n");
5379
5380         /* Disable inputs of parser neighbor blocks */
5381         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5382         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5383         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5384         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5385
5386         /*  Write 0 to parser credits for CFC search request */
5387         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5388
5389         /* send Ethernet packet */
5390         bnx2x_lb_pckt(bp);
5391
5392         /* TODO do i reset NIG statistic? */
5393         /* Wait until NIG register shows 1 packet of size 0x10 */
5394         count = 1000 * factor;
5395         while (count) {
5396
5397                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5398                 val = *bnx2x_sp(bp, wb_data[0]);
5399                 if (val == 0x10)
5400                         break;
5401
5402                 msleep(10);
5403                 count--;
5404         }
5405         if (val != 0x10) {
5406                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5407                 return -1;
5408         }
5409
5410         /* Wait until PRS register shows 1 packet */
5411         count = 1000 * factor;
5412         while (count) {
5413                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5414                 if (val == 1)
5415                         break;
5416
5417                 msleep(10);
5418                 count--;
5419         }
5420         if (val != 0x1) {
5421                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5422                 return -2;
5423         }
5424
5425         /* Reset and init BRB, PRS */
5426         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5427         msleep(50);
5428         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5429         msleep(50);
5430         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5431         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5432
5433         DP(NETIF_MSG_HW, "part2\n");
5434
5435         /* Disable inputs of parser neighbor blocks */
5436         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5437         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5438         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5439         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5440
5441         /* Write 0 to parser credits for CFC search request */
5442         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5443
5444         /* send 10 Ethernet packets */
5445         for (i = 0; i < 10; i++)
5446                 bnx2x_lb_pckt(bp);
5447
5448         /* Wait until NIG register shows 10 + 1
5449            packets of size 11*0x10 = 0xb0 */
5450         count = 1000 * factor;
5451         while (count) {
5452
5453                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5454                 val = *bnx2x_sp(bp, wb_data[0]);
5455                 if (val == 0xb0)
5456                         break;
5457
5458                 msleep(10);
5459                 count--;
5460         }
5461         if (val != 0xb0) {
5462                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5463                 return -3;
5464         }
5465
5466         /* Wait until PRS register shows 2 packets */
5467         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5468         if (val != 2)
5469                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5470
5471         /* Write 1 to parser credits for CFC search request */
5472         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5473
5474         /* Wait until PRS register shows 3 packets */
5475         msleep(10 * factor);
5476         /* Wait until NIG register shows 1 packet of size 0x10 */
5477         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5478         if (val != 3)
5479                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5480
5481         /* clear NIG EOP FIFO */
5482         for (i = 0; i < 11; i++)
5483                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5484         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5485         if (val != 1) {
5486                 BNX2X_ERR("clear of NIG failed\n");
5487                 return -4;
5488         }
5489
5490         /* Reset and init BRB, PRS, NIG */
5491         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5492         msleep(50);
5493         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5494         msleep(50);
5495         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5496         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5497 #ifndef BCM_ISCSI
5498         /* set NIC mode */
5499         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5500 #endif
5501
5502         /* Enable inputs of parser neighbor blocks */
5503         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5504         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5505         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5506         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5507
5508         DP(NETIF_MSG_HW, "done\n");
5509
5510         return 0; /* OK */
5511 }
5512
5513 static void enable_blocks_attention(struct bnx2x *bp)
5514 {
5515         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5516         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5517         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5518         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5519         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5520         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5521         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5522         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5523         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5524 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5525 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5526         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5527         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5528         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5529 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5530 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5531         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5532         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5533         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5534         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5535 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5536 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5537         if (CHIP_REV_IS_FPGA(bp))
5538                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5539         else
5540                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5541         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5542         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5543         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5544 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5545 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5546         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5547         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5548 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5549         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5550 }
5551
5552
5553 static void bnx2x_reset_common(struct bnx2x *bp)
5554 {
5555         /* reset_common */
5556         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5557                0xd3ffff7f);
5558         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5559 }
5560
5561
5562 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5563 {
5564         u32 val;
5565         u8 port;
5566         u8 is_required = 0;
5567
5568         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5569               SHARED_HW_CFG_FAN_FAILURE_MASK;
5570
5571         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5572                 is_required = 1;
5573
5574         /*
5575          * The fan failure mechanism is usually related to the PHY type since
5576          * the power consumption of the board is affected by the PHY. Currently,
5577          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5578          */
5579         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5580                 for (port = PORT_0; port < PORT_MAX; port++) {
5581                         u32 phy_type =
5582                                 SHMEM_RD(bp, dev_info.port_hw_config[port].
5583                                          external_phy_config) &
5584                                 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
5585                         is_required |=
5586                                 ((phy_type ==
5587                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
5588                                  (phy_type ==
5589                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
5590                                  (phy_type ==
5591                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
5592                 }
5593
5594         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
5595
5596         if (is_required == 0)
5597                 return;
5598
5599         /* Fan failure is indicated by SPIO 5 */
5600         bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5601                        MISC_REGISTERS_SPIO_INPUT_HI_Z);
5602
5603         /* set to active low mode */
5604         val = REG_RD(bp, MISC_REG_SPIO_INT);
5605         val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5606                                 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5607         REG_WR(bp, MISC_REG_SPIO_INT, val);
5608
5609         /* enable interrupt to signal the IGU */
5610         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5611         val |= (1 << MISC_REGISTERS_SPIO_5);
5612         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5613 }
5614
5615 static int bnx2x_init_common(struct bnx2x *bp)
5616 {
5617         u32 val, i;
5618
5619         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5620
5621         bnx2x_reset_common(bp);
5622         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5623         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5624
5625         bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
5626         if (CHIP_IS_E1H(bp))
5627                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5628
5629         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5630         msleep(30);
5631         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5632
5633         bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
5634         if (CHIP_IS_E1(bp)) {
5635                 /* enable HW interrupt from PXP on USDM overflow
5636                    bit 16 on INT_MASK_0 */
5637                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5638         }
5639
5640         bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
5641         bnx2x_init_pxp(bp);
5642
5643 #ifdef __BIG_ENDIAN
5644         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5645         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5646         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5647         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5648         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5649         /* make sure this value is 0 */
5650         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5651
5652 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5653         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5654         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5655         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5656         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5657 #endif
5658
5659         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5660 #ifdef BCM_ISCSI
5661         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5662         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5663         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5664 #endif
5665
5666         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5667                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5668
5669         /* let the HW do it's magic ... */
5670         msleep(100);
5671         /* finish PXP init */
5672         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5673         if (val != 1) {
5674                 BNX2X_ERR("PXP2 CFG failed\n");
5675                 return -EBUSY;
5676         }
5677         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5678         if (val != 1) {
5679                 BNX2X_ERR("PXP2 RD_INIT failed\n");
5680                 return -EBUSY;
5681         }
5682
5683         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5684         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5685
5686         bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
5687
5688         /* clean the DMAE memory */
5689         bp->dmae_ready = 1;
5690         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5691
5692         bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5693         bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5694         bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5695         bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
5696
5697         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5698         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5699         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5700         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5701
5702         bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
5703         /* soft reset pulse */
5704         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5705         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5706
5707 #ifdef BCM_ISCSI
5708         bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
5709 #endif
5710
5711         bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
5712         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5713         if (!CHIP_REV_IS_SLOW(bp)) {
5714                 /* enable hw interrupt from doorbell Q */
5715                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5716         }
5717
5718         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5719         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5720         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5721         /* set NIC mode */
5722         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5723         if (CHIP_IS_E1H(bp))
5724                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5725
5726         bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5727         bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5728         bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5729         bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
5730
5731         bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5732         bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5733         bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5734         bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5735
5736         bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5737         bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5738         bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5739         bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
5740
5741         /* sync semi rtc */
5742         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5743                0x80000000);
5744         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5745                0x80000000);
5746
5747         bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5748         bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5749         bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
5750
5751         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5752         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5753                 REG_WR(bp, i, 0xc0cac01a);
5754                 /* TODO: replace with something meaningful */
5755         }
5756         bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
5757         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5758
5759         if (sizeof(union cdu_context) != 1024)
5760                 /* we currently assume that a context is 1024 bytes */
5761                 printk(KERN_ALERT PFX "please adjust the size of"
5762                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5763
5764         bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
5765         val = (4 << 24) + (0 << 12) + 1024;
5766         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5767         if (CHIP_IS_E1(bp)) {
5768                 /* !!! fix pxp client crdit until excel update */
5769                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5770                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5771         }
5772
5773         bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
5774         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5775         /* enable context validation interrupt from CFC */
5776         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5777
5778         /* set the thresholds to prevent CFC/CDU race */
5779         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5780
5781         bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
5782         bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
5783
5784         /* PXPCS COMMON comes here */
5785         bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
5786         /* Reset PCIE errors for debug */
5787         REG_WR(bp, 0x2814, 0xffffffff);
5788         REG_WR(bp, 0x3820, 0xffffffff);
5789
5790         /* EMAC0 COMMON comes here */
5791         bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
5792         /* EMAC1 COMMON comes here */
5793         bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
5794         /* DBU COMMON comes here */
5795         bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
5796         /* DBG COMMON comes here */
5797         bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
5798
5799         bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
5800         if (CHIP_IS_E1H(bp)) {
5801                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5802                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5803         }
5804
5805         if (CHIP_REV_IS_SLOW(bp))
5806                 msleep(200);
5807
5808         /* finish CFC init */
5809         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5810         if (val != 1) {
5811                 BNX2X_ERR("CFC LL_INIT failed\n");
5812                 return -EBUSY;
5813         }
5814         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5815         if (val != 1) {
5816                 BNX2X_ERR("CFC AC_INIT failed\n");
5817                 return -EBUSY;
5818         }
5819         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5820         if (val != 1) {
5821                 BNX2X_ERR("CFC CAM_INIT failed\n");
5822                 return -EBUSY;
5823         }
5824         REG_WR(bp, CFC_REG_DEBUG0, 0);
5825
5826         /* read NIG statistic
5827            to see if this is our first up since powerup */
5828         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5829         val = *bnx2x_sp(bp, wb_data[0]);
5830
5831         /* do internal memory self test */
5832         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5833                 BNX2X_ERR("internal mem self test failed\n");
5834                 return -EBUSY;
5835         }
5836
5837         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5838         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5839         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5840         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5841         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
5842                 bp->port.need_hw_lock = 1;
5843                 break;
5844
5845         default:
5846                 break;
5847         }
5848
5849         bnx2x_setup_fan_failure_detection(bp);
5850
5851         /* clear PXP2 attentions */
5852         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5853
5854         enable_blocks_attention(bp);
5855
5856         if (!BP_NOMCP(bp)) {
5857                 bnx2x_acquire_phy_lock(bp);
5858                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5859                 bnx2x_release_phy_lock(bp);
5860         } else
5861                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5862
5863         return 0;
5864 }
5865
5866 static int bnx2x_init_port(struct bnx2x *bp)
5867 {
5868         int port = BP_PORT(bp);
5869         int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
5870         u32 low, high;
5871         u32 val;
5872
5873         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
5874
5875         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5876
5877         /* Port PXP comes here */
5878         bnx2x_init_block(bp, PXP_BLOCK, init_stage);
5879         /* Port PXP2 comes here */
5880         bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
5881 #ifdef BCM_ISCSI
5882         /* Port0  1
5883          * Port1  385 */
5884         i++;
5885         wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5886         wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5887         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5888         REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5889
5890         /* Port0  2
5891          * Port1  386 */
5892         i++;
5893         wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5894         wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5895         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5896         REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5897
5898         /* Port0  3
5899          * Port1  387 */
5900         i++;
5901         wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5902         wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5903         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5904         REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5905 #endif
5906         /* Port CMs come here */
5907         bnx2x_init_block(bp, XCM_BLOCK, init_stage);
5908
5909         /* Port QM comes here */
5910 #ifdef BCM_ISCSI
5911         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5912         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5913
5914         bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
5915 #endif
5916         /* Port DQ comes here */
5917         bnx2x_init_block(bp, DQ_BLOCK, init_stage);
5918
5919         bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5920         if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5921                 /* no pause for emulation and FPGA */
5922                 low = 0;
5923                 high = 513;
5924         } else {
5925                 if (IS_E1HMF(bp))
5926                         low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5927                 else if (bp->dev->mtu > 4096) {
5928                         if (bp->flags & ONE_PORT_FLAG)
5929                                 low = 160;
5930                         else {
5931                                 val = bp->dev->mtu;
5932                                 /* (24*1024 + val*4)/256 */
5933                                 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5934                         }
5935                 } else
5936                         low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5937                 high = low + 56;        /* 14*1024/256 */
5938         }
5939         REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5940         REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5941
5942
5943         /* Port PRS comes here */
5944         bnx2x_init_block(bp, PRS_BLOCK, init_stage);
5945         /* Port TSDM comes here */
5946         bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
5947         /* Port CSDM comes here */
5948         bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
5949         /* Port USDM comes here */
5950         bnx2x_init_block(bp, USDM_BLOCK, init_stage);
5951         /* Port XSDM comes here */
5952         bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
5953
5954         bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5955         bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5956         bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5957         bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
5958
5959         /* Port UPB comes here */
5960         bnx2x_init_block(bp, UPB_BLOCK, init_stage);
5961         /* Port XPB comes here */
5962         bnx2x_init_block(bp, XPB_BLOCK, init_stage);
5963
5964         bnx2x_init_block(bp, PBF_BLOCK, init_stage);
5965
5966         /* configure PBF to work without PAUSE mtu 9000 */
5967         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5968
5969         /* update threshold */
5970         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5971         /* update init credit */
5972         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5973
5974         /* probe changes */
5975         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5976         msleep(5);
5977         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5978
5979 #ifdef BCM_ISCSI
5980         /* tell the searcher where the T2 table is */
5981         REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5982
5983         wb_write[0] = U64_LO(bp->t2_mapping);
5984         wb_write[1] = U64_HI(bp->t2_mapping);
5985         REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5986         wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5987         wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5988         REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5989
5990         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5991         /* Port SRCH comes here */
5992 #endif
5993         /* Port CDU comes here */
5994         bnx2x_init_block(bp, CDU_BLOCK, init_stage);
5995         /* Port CFC comes here */
5996         bnx2x_init_block(bp, CFC_BLOCK, init_stage);
5997
5998         if (CHIP_IS_E1(bp)) {
5999                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6000                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6001         }
6002         bnx2x_init_block(bp, HC_BLOCK, init_stage);
6003
6004         bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6005         /* init aeu_mask_attn_func_0/1:
6006          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6007          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6008          *             bits 4-7 are used for "per vn group attention" */
6009         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6010                (IS_E1HMF(bp) ? 0xF7 : 0x7));
6011
6012         /* Port PXPCS comes here */
6013         bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6014         /* Port EMAC0 comes here */
6015         bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6016         /* Port EMAC1 comes here */
6017         bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6018         /* Port DBU comes here */
6019         bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6020         /* Port DBG comes here */
6021         bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6022
6023         bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6024
6025         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6026
6027         if (CHIP_IS_E1H(bp)) {
6028                 /* 0x2 disable e1hov, 0x1 enable */
6029                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6030                        (IS_E1HMF(bp) ? 0x1 : 0x2));
6031
6032                 /* support pause requests from USDM, TSDM and BRB */
6033                 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
6034
6035                 {
6036                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6037                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6038                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6039                 }
6040         }
6041
6042         /* Port MCP comes here */
6043         bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6044         /* Port DMAE comes here */
6045         bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6046
6047         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6048         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6049                 {
6050                 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6051
6052                 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6053                                MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6054
6055                 /* The GPIO should be swapped if the swap register is
6056                    set and active */
6057                 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6058                 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6059
6060                 /* Select function upon port-swap configuration */
6061                 if (port == 0) {
6062                         offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6063                         aeu_gpio_mask = (swap_val && swap_override) ?
6064                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6065                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6066                 } else {
6067                         offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6068                         aeu_gpio_mask = (swap_val && swap_override) ?
6069                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6070                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6071                 }
6072                 val = REG_RD(bp, offset);
6073                 /* add GPIO3 to group */
6074                 val |= aeu_gpio_mask;
6075                 REG_WR(bp, offset, val);
6076                 }
6077                 break;
6078
6079         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6080         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6081                 /* add SPIO 5 to group 0 */
6082                 {
6083                 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6084                                        MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6085                 val = REG_RD(bp, reg_addr);
6086                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6087                 REG_WR(bp, reg_addr, val);
6088                 }
6089                 break;
6090
6091         default:
6092                 break;
6093         }
6094
6095         bnx2x__link_reset(bp);
6096
6097         return 0;
6098 }
6099
6100 #define ILT_PER_FUNC            (768/2)
6101 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
6102 /* the phys address is shifted right 12 bits and has an added
6103    1=valid bit added to the 53rd bit
6104    then since this is a wide register(TM)
6105    we split it into two 32 bit writes
6106  */
6107 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6108 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
6109 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
6110 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
6111
6112 #define CNIC_ILT_LINES          0
6113
6114 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6115 {
6116         int reg;
6117
6118         if (CHIP_IS_E1H(bp))
6119                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6120         else /* E1 */
6121                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6122
6123         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6124 }
6125
6126 static int bnx2x_init_func(struct bnx2x *bp)
6127 {
6128         int port = BP_PORT(bp);
6129         int func = BP_FUNC(bp);
6130         u32 addr, val;
6131         int i;
6132
6133         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
6134
6135         /* set MSI reconfigure capability */
6136         addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6137         val = REG_RD(bp, addr);
6138         val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6139         REG_WR(bp, addr, val);
6140
6141         i = FUNC_ILT_BASE(func);
6142
6143         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6144         if (CHIP_IS_E1H(bp)) {
6145                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6146                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6147         } else /* E1 */
6148                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6149                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6150
6151
6152         if (CHIP_IS_E1H(bp)) {
6153                 for (i = 0; i < 9; i++)
6154                         bnx2x_init_block(bp,
6155                                          cm_blocks[i], FUNC0_STAGE + func);
6156
6157                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6158                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6159         }
6160
6161         /* HC init per function */
6162         if (CHIP_IS_E1H(bp)) {
6163                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6164
6165                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6166                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6167         }
6168         bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
6169
6170         /* Reset PCIE errors for debug */
6171         REG_WR(bp, 0x2114, 0xffffffff);
6172         REG_WR(bp, 0x2120, 0xffffffff);
6173
6174         return 0;
6175 }
6176
6177 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6178 {
6179         int i, rc = 0;
6180
6181         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
6182            BP_FUNC(bp), load_code);
6183
6184         bp->dmae_ready = 0;
6185         mutex_init(&bp->dmae_mutex);
6186         bnx2x_gunzip_init(bp);
6187
6188         switch (load_code) {
6189         case FW_MSG_CODE_DRV_LOAD_COMMON:
6190                 rc = bnx2x_init_common(bp);
6191                 if (rc)
6192                         goto init_hw_err;
6193                 /* no break */
6194
6195         case FW_MSG_CODE_DRV_LOAD_PORT:
6196                 bp->dmae_ready = 1;
6197                 rc = bnx2x_init_port(bp);
6198                 if (rc)
6199                         goto init_hw_err;
6200                 /* no break */
6201
6202         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6203                 bp->dmae_ready = 1;
6204                 rc = bnx2x_init_func(bp);
6205                 if (rc)
6206                         goto init_hw_err;
6207                 break;
6208
6209         default:
6210                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6211                 break;
6212         }
6213
6214         if (!BP_NOMCP(bp)) {
6215                 int func = BP_FUNC(bp);
6216
6217                 bp->fw_drv_pulse_wr_seq =
6218                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6219                                  DRV_PULSE_SEQ_MASK);
6220                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6221                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x  func_stx 0x%x\n",
6222                    bp->fw_drv_pulse_wr_seq, bp->func_stx);
6223         } else
6224                 bp->func_stx = 0;
6225
6226         /* this needs to be done before gunzip end */
6227         bnx2x_zero_def_sb(bp);
6228         for_each_queue(bp, i)
6229                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6230
6231 init_hw_err:
6232         bnx2x_gunzip_end(bp);
6233
6234         return rc;
6235 }
6236
6237 /* send the MCP a request, block until there is a reply */
6238 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6239 {
6240         int func = BP_FUNC(bp);
6241         u32 seq = ++bp->fw_seq;
6242         u32 rc = 0;
6243         u32 cnt = 1;
6244         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
6245
6246         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
6247         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
6248
6249         do {
6250                 /* let the FW do it's magic ... */
6251                 msleep(delay);
6252
6253                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
6254
6255                 /* Give the FW up to 2 second (200*10ms) */
6256         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6257
6258         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6259            cnt*delay, rc, seq);
6260
6261         /* is this a reply to our command? */
6262         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6263                 rc &= FW_MSG_CODE_MASK;
6264
6265         } else {
6266                 /* FW BUG! */
6267                 BNX2X_ERR("FW failed to respond!\n");
6268                 bnx2x_fw_dump(bp);
6269                 rc = 0;
6270         }
6271
6272         return rc;
6273 }
6274
6275 static void bnx2x_free_mem(struct bnx2x *bp)
6276 {
6277
6278 #define BNX2X_PCI_FREE(x, y, size) \
6279         do { \
6280                 if (x) { \
6281                         pci_free_consistent(bp->pdev, size, x, y); \
6282                         x = NULL; \
6283                         y = 0; \
6284                 } \
6285         } while (0)
6286
6287 #define BNX2X_FREE(x) \
6288         do { \
6289                 if (x) { \
6290                         vfree(x); \
6291                         x = NULL; \
6292                 } \
6293         } while (0)
6294
6295         int i;
6296
6297         /* fastpath */
6298         /* Common */
6299         for_each_queue(bp, i) {
6300
6301                 /* status blocks */
6302                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6303                                bnx2x_fp(bp, i, status_blk_mapping),
6304                                sizeof(struct host_status_block) +
6305                                sizeof(struct eth_tx_db_data));
6306         }
6307         /* Rx */
6308         for_each_rx_queue(bp, i) {
6309
6310                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6311                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6312                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6313                                bnx2x_fp(bp, i, rx_desc_mapping),
6314                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
6315
6316                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6317                                bnx2x_fp(bp, i, rx_comp_mapping),
6318                                sizeof(struct eth_fast_path_rx_cqe) *
6319                                NUM_RCQ_BD);
6320
6321                 /* SGE ring */
6322                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6323                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6324                                bnx2x_fp(bp, i, rx_sge_mapping),
6325                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6326         }
6327         /* Tx */
6328         for_each_tx_queue(bp, i) {
6329
6330                 /* fastpath tx rings: tx_buf tx_desc */
6331                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6332                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6333                                bnx2x_fp(bp, i, tx_desc_mapping),
6334                                sizeof(struct eth_tx_bd) * NUM_TX_BD);
6335         }
6336         /* end of fastpath */
6337
6338         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6339                        sizeof(struct host_def_status_block));
6340
6341         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6342                        sizeof(struct bnx2x_slowpath));
6343
6344 #ifdef BCM_ISCSI
6345         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6346         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6347         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6348         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6349 #endif
6350         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6351
6352 #undef BNX2X_PCI_FREE
6353 #undef BNX2X_KFREE
6354 }
6355
6356 static int bnx2x_alloc_mem(struct bnx2x *bp)
6357 {
6358
6359 #define BNX2X_PCI_ALLOC(x, y, size) \
6360         do { \
6361                 x = pci_alloc_consistent(bp->pdev, size, y); \
6362                 if (x == NULL) \
6363                         goto alloc_mem_err; \
6364                 memset(x, 0, size); \
6365         } while (0)
6366
6367 #define BNX2X_ALLOC(x, size) \
6368         do { \
6369                 x = vmalloc(size); \
6370                 if (x == NULL) \
6371                         goto alloc_mem_err; \
6372                 memset(x, 0, size); \
6373         } while (0)
6374
6375         int i;
6376
6377         /* fastpath */
6378         /* Common */
6379         for_each_queue(bp, i) {
6380                 bnx2x_fp(bp, i, bp) = bp;
6381
6382                 /* status blocks */
6383                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6384                                 &bnx2x_fp(bp, i, status_blk_mapping),
6385                                 sizeof(struct host_status_block) +
6386                                 sizeof(struct eth_tx_db_data));
6387         }
6388         /* Rx */
6389         for_each_rx_queue(bp, i) {
6390
6391                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6392                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6393                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6394                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6395                                 &bnx2x_fp(bp, i, rx_desc_mapping),
6396                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6397
6398                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6399                                 &bnx2x_fp(bp, i, rx_comp_mapping),
6400                                 sizeof(struct eth_fast_path_rx_cqe) *
6401                                 NUM_RCQ_BD);
6402
6403                 /* SGE ring */
6404                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6405                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6406                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6407                                 &bnx2x_fp(bp, i, rx_sge_mapping),
6408                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6409         }
6410         /* Tx */
6411         for_each_tx_queue(bp, i) {
6412
6413                 bnx2x_fp(bp, i, hw_tx_prods) =
6414                                 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6415
6416                 bnx2x_fp(bp, i, tx_prods_mapping) =
6417                                 bnx2x_fp(bp, i, status_blk_mapping) +
6418                                 sizeof(struct host_status_block);
6419
6420                 /* fastpath tx rings: tx_buf tx_desc */
6421                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6422                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6423                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6424                                 &bnx2x_fp(bp, i, tx_desc_mapping),
6425                                 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6426         }
6427         /* end of fastpath */
6428
6429         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6430                         sizeof(struct host_def_status_block));
6431
6432         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6433                         sizeof(struct bnx2x_slowpath));
6434
6435 #ifdef BCM_ISCSI
6436         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6437
6438         /* Initialize T1 */
6439         for (i = 0; i < 64*1024; i += 64) {
6440                 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6441                 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6442         }
6443
6444         /* allocate searcher T2 table
6445            we allocate 1/4 of alloc num for T2
6446           (which is not entered into the ILT) */
6447         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6448
6449         /* Initialize T2 */
6450         for (i = 0; i < 16*1024; i += 64)
6451                 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6452
6453         /* now fixup the last line in the block to point to the next block */
6454         *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6455
6456         /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6457         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6458
6459         /* QM queues (128*MAX_CONN) */
6460         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6461 #endif
6462
6463         /* Slow path ring */
6464         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6465
6466         return 0;
6467
6468 alloc_mem_err:
6469         bnx2x_free_mem(bp);
6470         return -ENOMEM;
6471
6472 #undef BNX2X_PCI_ALLOC
6473 #undef BNX2X_ALLOC
6474 }
6475
6476 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6477 {
6478         int i;
6479
6480         for_each_tx_queue(bp, i) {
6481                 struct bnx2x_fastpath *fp = &bp->fp[i];
6482
6483                 u16 bd_cons = fp->tx_bd_cons;
6484                 u16 sw_prod = fp->tx_pkt_prod;
6485                 u16 sw_cons = fp->tx_pkt_cons;
6486
6487                 while (sw_cons != sw_prod) {
6488                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6489                         sw_cons++;
6490                 }
6491         }
6492 }
6493
6494 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6495 {
6496         int i, j;
6497
6498         for_each_rx_queue(bp, j) {
6499                 struct bnx2x_fastpath *fp = &bp->fp[j];
6500
6501                 for (i = 0; i < NUM_RX_BD; i++) {
6502                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6503                         struct sk_buff *skb = rx_buf->skb;
6504
6505                         if (skb == NULL)
6506                                 continue;
6507
6508                         pci_unmap_single(bp->pdev,
6509                                          pci_unmap_addr(rx_buf, mapping),
6510                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6511
6512                         rx_buf->skb = NULL;
6513                         dev_kfree_skb(skb);
6514                 }
6515                 if (!fp->disable_tpa)
6516                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6517                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
6518                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
6519         }
6520 }
6521
6522 static void bnx2x_free_skbs(struct bnx2x *bp)
6523 {
6524         bnx2x_free_tx_skbs(bp);
6525         bnx2x_free_rx_skbs(bp);
6526 }
6527
6528 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6529 {
6530         int i, offset = 1;
6531
6532         free_irq(bp->msix_table[0].vector, bp->dev);
6533         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6534            bp->msix_table[0].vector);
6535
6536         for_each_queue(bp, i) {
6537                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
6538                    "state %x\n", i, bp->msix_table[i + offset].vector,
6539                    bnx2x_fp(bp, i, state));
6540
6541                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6542         }
6543 }
6544
6545 static void bnx2x_free_irq(struct bnx2x *bp)
6546 {
6547         if (bp->flags & USING_MSIX_FLAG) {
6548                 bnx2x_free_msix_irqs(bp);
6549                 pci_disable_msix(bp->pdev);
6550                 bp->flags &= ~USING_MSIX_FLAG;
6551
6552         } else if (bp->flags & USING_MSI_FLAG) {
6553                 free_irq(bp->pdev->irq, bp->dev);
6554                 pci_disable_msi(bp->pdev);
6555                 bp->flags &= ~USING_MSI_FLAG;
6556
6557         } else
6558                 free_irq(bp->pdev->irq, bp->dev);
6559 }
6560
6561 static int bnx2x_enable_msix(struct bnx2x *bp)
6562 {
6563         int i, rc, offset = 1;
6564         int igu_vec = 0;
6565
6566         bp->msix_table[0].entry = igu_vec;
6567         DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6568
6569         for_each_queue(bp, i) {
6570                 igu_vec = BP_L_ID(bp) + offset + i;
6571                 bp->msix_table[i + offset].entry = igu_vec;
6572                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6573                    "(fastpath #%u)\n", i + offset, igu_vec, i);
6574         }
6575
6576         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6577                              BNX2X_NUM_QUEUES(bp) + offset);
6578         if (rc) {
6579                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
6580                 return rc;
6581         }
6582
6583         bp->flags |= USING_MSIX_FLAG;
6584
6585         return 0;
6586 }
6587
6588 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6589 {
6590         int i, rc, offset = 1;
6591
6592         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6593                          bp->dev->name, bp->dev);
6594         if (rc) {
6595                 BNX2X_ERR("request sp irq failed\n");
6596                 return -EBUSY;
6597         }
6598
6599         for_each_queue(bp, i) {
6600                 struct bnx2x_fastpath *fp = &bp->fp[i];
6601
6602                 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
6603                 rc = request_irq(bp->msix_table[i + offset].vector,
6604                                  bnx2x_msix_fp_int, 0, fp->name, fp);
6605                 if (rc) {
6606                         BNX2X_ERR("request fp #%d irq failed  rc %d\n", i, rc);
6607                         bnx2x_free_msix_irqs(bp);
6608                         return -EBUSY;
6609                 }
6610
6611                 fp->state = BNX2X_FP_STATE_IRQ;
6612         }
6613
6614         i = BNX2X_NUM_QUEUES(bp);
6615         if (is_multi(bp))
6616                 printk(KERN_INFO PFX
6617                        "%s: using MSI-X  IRQs: sp %d  fp %d - %d\n",
6618                        bp->dev->name, bp->msix_table[0].vector,
6619                        bp->msix_table[offset].vector,
6620                        bp->msix_table[offset + i - 1].vector);
6621         else
6622                 printk(KERN_INFO PFX "%s: using MSI-X  IRQs: sp %d  fp %d\n",
6623                        bp->dev->name, bp->msix_table[0].vector,
6624                        bp->msix_table[offset + i - 1].vector);
6625
6626         return 0;
6627 }
6628
6629 static int bnx2x_enable_msi(struct bnx2x *bp)
6630 {
6631         int rc;
6632
6633         rc = pci_enable_msi(bp->pdev);
6634         if (rc) {
6635                 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6636                 return -1;
6637         }
6638         bp->flags |= USING_MSI_FLAG;
6639
6640         return 0;
6641 }
6642
6643 static int bnx2x_req_irq(struct bnx2x *bp)
6644 {
6645         unsigned long flags;
6646         int rc;
6647
6648         if (bp->flags & USING_MSI_FLAG)
6649                 flags = 0;
6650         else
6651                 flags = IRQF_SHARED;
6652
6653         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
6654                          bp->dev->name, bp->dev);
6655         if (!rc)
6656                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6657
6658         return rc;
6659 }
6660
6661 static void bnx2x_napi_enable(struct bnx2x *bp)
6662 {
6663         int i;
6664
6665         for_each_rx_queue(bp, i)
6666                 napi_enable(&bnx2x_fp(bp, i, napi));
6667 }
6668
6669 static void bnx2x_napi_disable(struct bnx2x *bp)
6670 {
6671         int i;
6672
6673         for_each_rx_queue(bp, i)
6674                 napi_disable(&bnx2x_fp(bp, i, napi));
6675 }
6676
6677 static void bnx2x_netif_start(struct bnx2x *bp)
6678 {
6679         int intr_sem;
6680
6681         intr_sem = atomic_dec_and_test(&bp->intr_sem);
6682         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
6683
6684         if (intr_sem) {
6685                 if (netif_running(bp->dev)) {
6686                         bnx2x_napi_enable(bp);
6687                         bnx2x_int_enable(bp);
6688                         if (bp->state == BNX2X_STATE_OPEN)
6689                                 netif_tx_wake_all_queues(bp->dev);
6690                 }
6691         }
6692 }
6693
6694 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6695 {
6696         bnx2x_int_disable_sync(bp, disable_hw);
6697         bnx2x_napi_disable(bp);
6698         netif_tx_disable(bp->dev);
6699         bp->dev->trans_start = jiffies; /* prevent tx timeout */
6700 }
6701
6702 /*
6703  * Init service functions
6704  */
6705
6706 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6707 {
6708         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6709         int port = BP_PORT(bp);
6710
6711         /* CAM allocation
6712          * unicasts 0-31:port0 32-63:port1
6713          * multicast 64-127:port0 128-191:port1
6714          */
6715         config->hdr.length = 2;
6716         config->hdr.offset = port ? 32 : 0;
6717         config->hdr.client_id = bp->fp->cl_id;
6718         config->hdr.reserved1 = 0;
6719
6720         /* primary MAC */
6721         config->config_table[0].cam_entry.msb_mac_addr =
6722                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6723         config->config_table[0].cam_entry.middle_mac_addr =
6724                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6725         config->config_table[0].cam_entry.lsb_mac_addr =
6726                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6727         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6728         if (set)
6729                 config->config_table[0].target_table_entry.flags = 0;
6730         else
6731                 CAM_INVALIDATE(config->config_table[0]);
6732         config->config_table[0].target_table_entry.client_id = 0;
6733         config->config_table[0].target_table_entry.vlan_id = 0;
6734
6735         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6736            (set ? "setting" : "clearing"),
6737            config->config_table[0].cam_entry.msb_mac_addr,
6738            config->config_table[0].cam_entry.middle_mac_addr,
6739            config->config_table[0].cam_entry.lsb_mac_addr);
6740
6741         /* broadcast */
6742         config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
6743         config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
6744         config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
6745         config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6746         if (set)
6747                 config->config_table[1].target_table_entry.flags =
6748                                 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6749         else
6750                 CAM_INVALIDATE(config->config_table[1]);
6751         config->config_table[1].target_table_entry.client_id = 0;
6752         config->config_table[1].target_table_entry.vlan_id = 0;
6753
6754         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6755                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6756                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6757 }
6758
6759 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6760 {
6761         struct mac_configuration_cmd_e1h *config =
6762                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6763
6764         if (set && (bp->state != BNX2X_STATE_OPEN)) {
6765                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6766                 return;
6767         }
6768
6769         /* CAM allocation for E1H
6770          * unicasts: by func number
6771          * multicast: 20+FUNC*20, 20 each
6772          */
6773         config->hdr.length = 1;
6774         config->hdr.offset = BP_FUNC(bp);
6775         config->hdr.client_id = bp->fp->cl_id;
6776         config->hdr.reserved1 = 0;
6777
6778         /* primary MAC */
6779         config->config_table[0].msb_mac_addr =
6780                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6781         config->config_table[0].middle_mac_addr =
6782                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6783         config->config_table[0].lsb_mac_addr =
6784                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6785         config->config_table[0].client_id = BP_L_ID(bp);
6786         config->config_table[0].vlan_id = 0;
6787         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6788         if (set)
6789                 config->config_table[0].flags = BP_PORT(bp);
6790         else
6791                 config->config_table[0].flags =
6792                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6793
6794         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID %d\n",
6795            (set ? "setting" : "clearing"),
6796            config->config_table[0].msb_mac_addr,
6797            config->config_table[0].middle_mac_addr,
6798            config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6799
6800         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6801                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6802                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6803 }
6804
6805 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6806                              int *state_p, int poll)
6807 {
6808         /* can take a while if any port is running */
6809         int cnt = 5000;
6810
6811         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6812            poll ? "polling" : "waiting", state, idx);
6813
6814         might_sleep();
6815         while (cnt--) {
6816                 if (poll) {
6817                         bnx2x_rx_int(bp->fp, 10);
6818                         /* if index is different from 0
6819                          * the reply for some commands will
6820                          * be on the non default queue
6821                          */
6822                         if (idx)
6823                                 bnx2x_rx_int(&bp->fp[idx], 10);
6824                 }
6825
6826                 mb(); /* state is changed by bnx2x_sp_event() */
6827                 if (*state_p == state) {
6828 #ifdef BNX2X_STOP_ON_ERROR
6829                         DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
6830 #endif
6831                         return 0;
6832                 }
6833
6834                 msleep(1);
6835         }
6836
6837         /* timeout! */
6838         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6839                   poll ? "polling" : "waiting", state, idx);
6840 #ifdef BNX2X_STOP_ON_ERROR
6841         bnx2x_panic();
6842 #endif
6843
6844         return -EBUSY;
6845 }
6846
6847 static int bnx2x_setup_leading(struct bnx2x *bp)
6848 {
6849         int rc;
6850
6851         /* reset IGU state */
6852         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6853
6854         /* SETUP ramrod */
6855         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6856
6857         /* Wait for completion */
6858         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6859
6860         return rc;
6861 }
6862
6863 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6864 {
6865         struct bnx2x_fastpath *fp = &bp->fp[index];
6866
6867         /* reset IGU state */
6868         bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6869
6870         /* SETUP ramrod */
6871         fp->state = BNX2X_FP_STATE_OPENING;
6872         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6873                       fp->cl_id, 0);
6874
6875         /* Wait for completion */
6876         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6877                                  &(fp->state), 0);
6878 }
6879
6880 static int bnx2x_poll(struct napi_struct *napi, int budget);
6881
6882 static void bnx2x_set_int_mode(struct bnx2x *bp)
6883 {
6884         int num_queues;
6885
6886         switch (int_mode) {
6887         case INT_MODE_INTx:
6888         case INT_MODE_MSI:
6889                 num_queues = 1;
6890                 bp->num_rx_queues = num_queues;
6891                 bp->num_tx_queues = num_queues;
6892                 DP(NETIF_MSG_IFUP,
6893                    "set number of queues to %d\n", num_queues);
6894                 break;
6895
6896         case INT_MODE_MSIX:
6897         default:
6898                 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6899                         num_queues = min_t(u32, num_online_cpus(),
6900                                            BNX2X_MAX_QUEUES(bp));
6901                 else
6902                         num_queues = 1;
6903                 bp->num_rx_queues = num_queues;
6904                 bp->num_tx_queues = num_queues;
6905                 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6906                    "  number of tx queues to %d\n",
6907                    bp->num_rx_queues, bp->num_tx_queues);
6908                 /* if we can't use MSI-X we only need one fp,
6909                  * so try to enable MSI-X with the requested number of fp's
6910                  * and fallback to MSI or legacy INTx with one fp
6911                  */
6912                 if (bnx2x_enable_msix(bp)) {
6913                         /* failed to enable MSI-X */
6914                         num_queues = 1;
6915                         bp->num_rx_queues = num_queues;
6916                         bp->num_tx_queues = num_queues;
6917                         if (bp->multi_mode)
6918                                 BNX2X_ERR("Multi requested but failed to "
6919                                           "enable MSI-X  set number of "
6920                                           "queues to %d\n", num_queues);
6921                 }
6922                 break;
6923         }
6924         bp->dev->real_num_tx_queues = bp->num_tx_queues;
6925 }
6926
6927 static void bnx2x_set_rx_mode(struct net_device *dev);
6928
6929 /* must be called with rtnl_lock */
6930 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6931 {
6932         u32 load_code;
6933         int i, rc = 0;
6934 #ifdef BNX2X_STOP_ON_ERROR
6935         DP(NETIF_MSG_IFUP, "enter  load_mode %d\n", load_mode);
6936         if (unlikely(bp->panic))
6937                 return -EPERM;
6938 #endif
6939
6940         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6941
6942         bnx2x_set_int_mode(bp);
6943
6944         if (bnx2x_alloc_mem(bp))
6945                 return -ENOMEM;
6946
6947         for_each_rx_queue(bp, i)
6948                 bnx2x_fp(bp, i, disable_tpa) =
6949                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
6950
6951         for_each_rx_queue(bp, i)
6952                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6953                                bnx2x_poll, 128);
6954
6955 #ifdef BNX2X_STOP_ON_ERROR
6956         for_each_rx_queue(bp, i) {
6957                 struct bnx2x_fastpath *fp = &bp->fp[i];
6958
6959                 fp->poll_no_work = 0;
6960                 fp->poll_calls = 0;
6961                 fp->poll_max_calls = 0;
6962                 fp->poll_complete = 0;
6963                 fp->poll_exit = 0;
6964         }
6965 #endif
6966         bnx2x_napi_enable(bp);
6967
6968         if (bp->flags & USING_MSIX_FLAG) {
6969                 rc = bnx2x_req_msix_irqs(bp);
6970                 if (rc) {
6971                         pci_disable_msix(bp->pdev);
6972                         goto load_error1;
6973                 }
6974         } else {
6975                 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6976                         bnx2x_enable_msi(bp);
6977                 bnx2x_ack_int(bp);
6978                 rc = bnx2x_req_irq(bp);
6979                 if (rc) {
6980                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
6981                         if (bp->flags & USING_MSI_FLAG)
6982                                 pci_disable_msi(bp->pdev);
6983                         goto load_error1;
6984                 }
6985                 if (bp->flags & USING_MSI_FLAG) {
6986                         bp->dev->irq = bp->pdev->irq;
6987                         printk(KERN_INFO PFX "%s: using MSI  IRQ %d\n",
6988                                bp->dev->name, bp->pdev->irq);
6989                 }
6990         }
6991
6992         /* Send LOAD_REQUEST command to MCP
6993            Returns the type of LOAD command:
6994            if it is the first port to be initialized
6995            common blocks should be initialized, otherwise - not
6996         */
6997         if (!BP_NOMCP(bp)) {
6998                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6999                 if (!load_code) {
7000                         BNX2X_ERR("MCP response failure, aborting\n");
7001                         rc = -EBUSY;
7002                         goto load_error2;
7003                 }
7004                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7005                         rc = -EBUSY; /* other port in diagnostic mode */
7006                         goto load_error2;
7007                 }
7008
7009         } else {
7010                 int port = BP_PORT(bp);
7011
7012                 DP(NETIF_MSG_IFUP, "NO MCP - load counts      %d, %d, %d\n",
7013                    load_count[0], load_count[1], load_count[2]);
7014                 load_count[0]++;
7015                 load_count[1 + port]++;
7016                 DP(NETIF_MSG_IFUP, "NO MCP - new load counts  %d, %d, %d\n",
7017                    load_count[0], load_count[1], load_count[2]);
7018                 if (load_count[0] == 1)
7019                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7020                 else if (load_count[1 + port] == 1)
7021                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7022                 else
7023                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7024         }
7025
7026         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7027             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7028                 bp->port.pmf = 1;
7029         else
7030                 bp->port.pmf = 0;
7031         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7032
7033         /* Initialize HW */
7034         rc = bnx2x_init_hw(bp, load_code);
7035         if (rc) {
7036                 BNX2X_ERR("HW init failed, aborting\n");
7037                 goto load_error2;
7038         }
7039
7040         /* Setup NIC internals and enable interrupts */
7041         bnx2x_nic_init(bp, load_code);
7042
7043         /* Send LOAD_DONE command to MCP */
7044         if (!BP_NOMCP(bp)) {
7045                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7046                 if (!load_code) {
7047                         BNX2X_ERR("MCP response failure, aborting\n");
7048                         rc = -EBUSY;
7049                         goto load_error3;
7050                 }
7051         }
7052
7053         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7054
7055         rc = bnx2x_setup_leading(bp);
7056         if (rc) {
7057                 BNX2X_ERR("Setup leading failed!\n");
7058                 goto load_error3;
7059         }
7060
7061         if (CHIP_IS_E1H(bp))
7062                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
7063                         DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
7064                         bp->state = BNX2X_STATE_DISABLED;
7065                 }
7066
7067         if (bp->state == BNX2X_STATE_OPEN)
7068                 for_each_nondefault_queue(bp, i) {
7069                         rc = bnx2x_setup_multi(bp, i);
7070                         if (rc)
7071                                 goto load_error3;
7072                 }
7073
7074         if (CHIP_IS_E1(bp))
7075                 bnx2x_set_mac_addr_e1(bp, 1);
7076         else
7077                 bnx2x_set_mac_addr_e1h(bp, 1);
7078
7079         if (bp->port.pmf)
7080                 bnx2x_initial_phy_init(bp, load_mode);
7081
7082         /* Start fast path */
7083         switch (load_mode) {
7084         case LOAD_NORMAL:
7085                 /* Tx queue should be only reenabled */
7086                 netif_tx_wake_all_queues(bp->dev);
7087                 /* Initialize the receive filter. */
7088                 bnx2x_set_rx_mode(bp->dev);
7089                 break;
7090
7091         case LOAD_OPEN:
7092                 netif_tx_start_all_queues(bp->dev);
7093                 /* Initialize the receive filter. */
7094                 bnx2x_set_rx_mode(bp->dev);
7095                 break;
7096
7097         case LOAD_DIAG:
7098                 /* Initialize the receive filter. */
7099                 bnx2x_set_rx_mode(bp->dev);
7100                 bp->state = BNX2X_STATE_DIAG;
7101                 break;
7102
7103         default:
7104                 break;
7105         }
7106
7107         if (!bp->port.pmf)
7108                 bnx2x__link_status_update(bp);
7109
7110         /* start the timer */
7111         mod_timer(&bp->timer, jiffies + bp->current_interval);
7112
7113
7114         return 0;
7115
7116 load_error3:
7117         bnx2x_int_disable_sync(bp, 1);
7118         if (!BP_NOMCP(bp)) {
7119                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7120                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7121         }
7122         bp->port.pmf = 0;
7123         /* Free SKBs, SGEs, TPA pool and driver internals */
7124         bnx2x_free_skbs(bp);
7125         for_each_rx_queue(bp, i)
7126                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7127 load_error2:
7128         /* Release IRQs */
7129         bnx2x_free_irq(bp);
7130 load_error1:
7131         bnx2x_napi_disable(bp);
7132         for_each_rx_queue(bp, i)
7133                 netif_napi_del(&bnx2x_fp(bp, i, napi));
7134         bnx2x_free_mem(bp);
7135
7136         return rc;
7137 }
7138
7139 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7140 {
7141         struct bnx2x_fastpath *fp = &bp->fp[index];
7142         int rc;
7143
7144         /* halt the connection */
7145         fp->state = BNX2X_FP_STATE_HALTING;
7146         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7147
7148         /* Wait for completion */
7149         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7150                                &(fp->state), 1);
7151         if (rc) /* timeout */
7152                 return rc;
7153
7154         /* delete cfc entry */
7155         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7156
7157         /* Wait for completion */
7158         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7159                                &(fp->state), 1);
7160         return rc;
7161 }
7162
7163 static int bnx2x_stop_leading(struct bnx2x *bp)
7164 {
7165         __le16 dsb_sp_prod_idx;
7166         /* if the other port is handling traffic,
7167            this can take a lot of time */
7168         int cnt = 500;
7169         int rc;
7170
7171         might_sleep();
7172
7173         /* Send HALT ramrod */
7174         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7175         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7176
7177         /* Wait for completion */
7178         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7179                                &(bp->fp[0].state), 1);
7180         if (rc) /* timeout */
7181                 return rc;
7182
7183         dsb_sp_prod_idx = *bp->dsb_sp_prod;
7184
7185         /* Send PORT_DELETE ramrod */
7186         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7187
7188         /* Wait for completion to arrive on default status block
7189            we are going to reset the chip anyway
7190            so there is not much to do if this times out
7191          */
7192         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7193                 if (!cnt) {
7194                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7195                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7196                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
7197 #ifdef BNX2X_STOP_ON_ERROR
7198                         bnx2x_panic();
7199 #endif
7200                         rc = -EBUSY;
7201                         break;
7202                 }
7203                 cnt--;
7204                 msleep(1);
7205                 rmb(); /* Refresh the dsb_sp_prod */
7206         }
7207         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7208         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7209
7210         return rc;
7211 }
7212
7213 static void bnx2x_reset_func(struct bnx2x *bp)
7214 {
7215         int port = BP_PORT(bp);
7216         int func = BP_FUNC(bp);
7217         int base, i;
7218
7219         /* Configure IGU */
7220         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7221         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7222
7223         /* Clear ILT */
7224         base = FUNC_ILT_BASE(func);
7225         for (i = base; i < base + ILT_PER_FUNC; i++)
7226                 bnx2x_ilt_wr(bp, i, 0);
7227 }
7228
7229 static void bnx2x_reset_port(struct bnx2x *bp)
7230 {
7231         int port = BP_PORT(bp);
7232         u32 val;
7233
7234         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7235
7236         /* Do not rcv packets to BRB */
7237         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7238         /* Do not direct rcv packets that are not for MCP to the BRB */
7239         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7240                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7241
7242         /* Configure AEU */
7243         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7244
7245         msleep(100);
7246         /* Check for BRB port occupancy */
7247         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7248         if (val)
7249                 DP(NETIF_MSG_IFDOWN,
7250                    "BRB1 is not empty  %d blocks are occupied\n", val);
7251
7252         /* TODO: Close Doorbell port? */
7253 }
7254
7255 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7256 {
7257         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
7258            BP_FUNC(bp), reset_code);
7259
7260         switch (reset_code) {
7261         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7262                 bnx2x_reset_port(bp);
7263                 bnx2x_reset_func(bp);
7264                 bnx2x_reset_common(bp);
7265                 break;
7266
7267         case FW_MSG_CODE_DRV_UNLOAD_PORT:
7268                 bnx2x_reset_port(bp);
7269                 bnx2x_reset_func(bp);
7270                 break;
7271
7272         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7273                 bnx2x_reset_func(bp);
7274                 break;
7275
7276         default:
7277                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7278                 break;
7279         }
7280 }
7281
7282 /* must be called with rtnl_lock */
7283 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7284 {
7285         int port = BP_PORT(bp);
7286         u32 reset_code = 0;
7287         int i, cnt, rc;
7288
7289         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7290
7291         bp->rx_mode = BNX2X_RX_MODE_NONE;
7292         bnx2x_set_storm_rx_mode(bp);
7293
7294         bnx2x_netif_stop(bp, 1);
7295
7296         del_timer_sync(&bp->timer);
7297         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7298                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7299         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7300
7301         /* Release IRQs */
7302         bnx2x_free_irq(bp);
7303
7304         /* Wait until tx fastpath tasks complete */
7305         for_each_tx_queue(bp, i) {
7306                 struct bnx2x_fastpath *fp = &bp->fp[i];
7307
7308                 cnt = 1000;
7309                 while (bnx2x_has_tx_work_unload(fp)) {
7310
7311                         bnx2x_tx_int(fp);
7312                         if (!cnt) {
7313                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
7314                                           i);
7315 #ifdef BNX2X_STOP_ON_ERROR
7316                                 bnx2x_panic();
7317                                 return -EBUSY;
7318 #else
7319                                 break;
7320 #endif
7321                         }
7322                         cnt--;
7323                         msleep(1);
7324                 }
7325         }
7326         /* Give HW time to discard old tx messages */
7327         msleep(1);
7328
7329         if (CHIP_IS_E1(bp)) {
7330                 struct mac_configuration_cmd *config =
7331                                                 bnx2x_sp(bp, mcast_config);
7332
7333                 bnx2x_set_mac_addr_e1(bp, 0);
7334
7335                 for (i = 0; i < config->hdr.length; i++)
7336                         CAM_INVALIDATE(config->config_table[i]);
7337
7338                 config->hdr.length = i;
7339                 if (CHIP_REV_IS_SLOW(bp))
7340                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7341                 else
7342                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7343                 config->hdr.client_id = bp->fp->cl_id;
7344                 config->hdr.reserved1 = 0;
7345
7346                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7347                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7348                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7349
7350         } else { /* E1H */
7351                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7352
7353                 bnx2x_set_mac_addr_e1h(bp, 0);
7354
7355                 for (i = 0; i < MC_HASH_SIZE; i++)
7356                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7357
7358                 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7359         }
7360
7361         if (unload_mode == UNLOAD_NORMAL)
7362                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7363
7364         else if (bp->flags & NO_WOL_FLAG)
7365                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7366
7367         else if (bp->wol) {
7368                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7369                 u8 *mac_addr = bp->dev->dev_addr;
7370                 u32 val;
7371                 /* The mac address is written to entries 1-4 to
7372                    preserve entry 0 which is used by the PMF */
7373                 u8 entry = (BP_E1HVN(bp) + 1)*8;
7374
7375                 val = (mac_addr[0] << 8) | mac_addr[1];
7376                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7377
7378                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7379                       (mac_addr[4] << 8) | mac_addr[5];
7380                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7381
7382                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7383
7384         } else
7385                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7386
7387         /* Close multi and leading connections
7388            Completions for ramrods are collected in a synchronous way */
7389         for_each_nondefault_queue(bp, i)
7390                 if (bnx2x_stop_multi(bp, i))
7391                         goto unload_error;
7392
7393         rc = bnx2x_stop_leading(bp);
7394         if (rc) {
7395                 BNX2X_ERR("Stop leading failed!\n");
7396 #ifdef BNX2X_STOP_ON_ERROR
7397                 return -EBUSY;
7398 #else
7399                 goto unload_error;
7400 #endif
7401         }
7402
7403 unload_error:
7404         if (!BP_NOMCP(bp))
7405                 reset_code = bnx2x_fw_command(bp, reset_code);
7406         else {
7407                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts      %d, %d, %d\n",
7408                    load_count[0], load_count[1], load_count[2]);
7409                 load_count[0]--;
7410                 load_count[1 + port]--;
7411                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts  %d, %d, %d\n",
7412                    load_count[0], load_count[1], load_count[2]);
7413                 if (load_count[0] == 0)
7414                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7415                 else if (load_count[1 + port] == 0)
7416                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7417                 else
7418                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7419         }
7420
7421         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7422             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7423                 bnx2x__link_reset(bp);
7424
7425         /* Reset the chip */
7426         bnx2x_reset_chip(bp, reset_code);
7427
7428         /* Report UNLOAD_DONE to MCP */
7429         if (!BP_NOMCP(bp))
7430                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7431
7432         bp->port.pmf = 0;
7433
7434         /* Free SKBs, SGEs, TPA pool and driver internals */
7435         bnx2x_free_skbs(bp);
7436         for_each_rx_queue(bp, i)
7437                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7438         for_each_rx_queue(bp, i)
7439                 netif_napi_del(&bnx2x_fp(bp, i, napi));
7440         bnx2x_free_mem(bp);
7441
7442         bp->state = BNX2X_STATE_CLOSED;
7443
7444         netif_carrier_off(bp->dev);
7445
7446         return 0;
7447 }
7448
7449 static void bnx2x_reset_task(struct work_struct *work)
7450 {
7451         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7452
7453 #ifdef BNX2X_STOP_ON_ERROR
7454         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7455                   " so reset not done to allow debug dump,\n"
7456          KERN_ERR " you will need to reboot when done\n");
7457         return;
7458 #endif
7459
7460         rtnl_lock();
7461
7462         if (!netif_running(bp->dev))
7463                 goto reset_task_exit;
7464
7465         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7466         bnx2x_nic_load(bp, LOAD_NORMAL);
7467
7468 reset_task_exit:
7469         rtnl_unlock();
7470 }
7471
7472 /* end of nic load/unload */
7473
7474 /* ethtool_ops */
7475
7476 /*
7477  * Init service functions
7478  */
7479
7480 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7481 {
7482         switch (func) {
7483         case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7484         case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7485         case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7486         case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7487         case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7488         case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7489         case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7490         case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7491         default:
7492                 BNX2X_ERR("Unsupported function index: %d\n", func);
7493                 return (u32)(-1);
7494         }
7495 }
7496
7497 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7498 {
7499         u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7500
7501         /* Flush all outstanding writes */
7502         mmiowb();
7503
7504         /* Pretend to be function 0 */
7505         REG_WR(bp, reg, 0);
7506         /* Flush the GRC transaction (in the chip) */
7507         new_val = REG_RD(bp, reg);
7508         if (new_val != 0) {
7509                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7510                           new_val);
7511                 BUG();
7512         }
7513
7514         /* From now we are in the "like-E1" mode */
7515         bnx2x_int_disable(bp);
7516
7517         /* Flush all outstanding writes */
7518         mmiowb();
7519
7520         /* Restore the original funtion settings */
7521         REG_WR(bp, reg, orig_func);
7522         new_val = REG_RD(bp, reg);
7523         if (new_val != orig_func) {
7524                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7525                           orig_func, new_val);
7526                 BUG();
7527         }
7528 }
7529
7530 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7531 {
7532         if (CHIP_IS_E1H(bp))
7533                 bnx2x_undi_int_disable_e1h(bp, func);
7534         else
7535                 bnx2x_int_disable(bp);
7536 }
7537
7538 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7539 {
7540         u32 val;
7541
7542         /* Check if there is any driver already loaded */
7543         val = REG_RD(bp, MISC_REG_UNPREPARED);
7544         if (val == 0x1) {
7545                 /* Check if it is the UNDI driver
7546                  * UNDI driver initializes CID offset for normal bell to 0x7
7547                  */
7548                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7549                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7550                 if (val == 0x7) {
7551                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7552                         /* save our func */
7553                         int func = BP_FUNC(bp);
7554                         u32 swap_en;
7555                         u32 swap_val;
7556
7557                         /* clear the UNDI indication */
7558                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7559
7560                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
7561
7562                         /* try unload UNDI on port 0 */
7563                         bp->func = 0;
7564                         bp->fw_seq =
7565                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7566                                 DRV_MSG_SEQ_NUMBER_MASK);
7567                         reset_code = bnx2x_fw_command(bp, reset_code);
7568
7569                         /* if UNDI is loaded on the other port */
7570                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7571
7572                                 /* send "DONE" for previous unload */
7573                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7574
7575                                 /* unload UNDI on port 1 */
7576                                 bp->func = 1;
7577                                 bp->fw_seq =
7578                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7579                                         DRV_MSG_SEQ_NUMBER_MASK);
7580                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7581
7582                                 bnx2x_fw_command(bp, reset_code);
7583                         }
7584
7585                         /* now it's safe to release the lock */
7586                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7587
7588                         bnx2x_undi_int_disable(bp, func);
7589
7590                         /* close input traffic and wait for it */
7591                         /* Do not rcv packets to BRB */
7592                         REG_WR(bp,
7593                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7594                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7595                         /* Do not direct rcv packets that are not for MCP to
7596                          * the BRB */
7597                         REG_WR(bp,
7598                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7599                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7600                         /* clear AEU */
7601                         REG_WR(bp,
7602                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7603                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7604                         msleep(10);
7605
7606                         /* save NIG port swap info */
7607                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7608                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7609                         /* reset device */
7610                         REG_WR(bp,
7611                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7612                                0xd3ffffff);
7613                         REG_WR(bp,
7614                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7615                                0x1403);
7616                         /* take the NIG out of reset and restore swap values */
7617                         REG_WR(bp,
7618                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7619                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
7620                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7621                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7622
7623                         /* send unload done to the MCP */
7624                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7625
7626                         /* restore our func and fw_seq */
7627                         bp->func = func;
7628                         bp->fw_seq =
7629                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7630                                 DRV_MSG_SEQ_NUMBER_MASK);
7631
7632                 } else
7633                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7634         }
7635 }
7636
7637 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7638 {
7639         u32 val, val2, val3, val4, id;
7640         u16 pmc;
7641
7642         /* Get the chip revision id and number. */
7643         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7644         val = REG_RD(bp, MISC_REG_CHIP_NUM);
7645         id = ((val & 0xffff) << 16);
7646         val = REG_RD(bp, MISC_REG_CHIP_REV);
7647         id |= ((val & 0xf) << 12);
7648         val = REG_RD(bp, MISC_REG_CHIP_METAL);
7649         id |= ((val & 0xff) << 4);
7650         val = REG_RD(bp, MISC_REG_BOND_ID);
7651         id |= (val & 0xf);
7652         bp->common.chip_id = id;
7653         bp->link_params.chip_id = bp->common.chip_id;
7654         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7655
7656         val = (REG_RD(bp, 0x2874) & 0x55);
7657         if ((bp->common.chip_id & 0x1) ||
7658             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7659                 bp->flags |= ONE_PORT_FLAG;
7660                 BNX2X_DEV_INFO("single port device\n");
7661         }
7662
7663         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7664         bp->common.flash_size = (NVRAM_1MB_SIZE <<
7665                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
7666         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7667                        bp->common.flash_size, bp->common.flash_size);
7668
7669         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7670         bp->link_params.shmem_base = bp->common.shmem_base;
7671         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7672
7673         if (!bp->common.shmem_base ||
7674             (bp->common.shmem_base < 0xA0000) ||
7675             (bp->common.shmem_base >= 0xC0000)) {
7676                 BNX2X_DEV_INFO("MCP not active\n");
7677                 bp->flags |= NO_MCP_FLAG;
7678                 return;
7679         }
7680
7681         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7682         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7683                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7684                 BNX2X_ERR("BAD MCP validity signature\n");
7685
7686         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7687         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
7688
7689         bp->link_params.hw_led_mode = ((bp->common.hw_config &
7690                                         SHARED_HW_CFG_LED_MODE_MASK) >>
7691                                        SHARED_HW_CFG_LED_MODE_SHIFT);
7692
7693         bp->link_params.feature_config_flags = 0;
7694         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7695         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7696                 bp->link_params.feature_config_flags |=
7697                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7698         else
7699                 bp->link_params.feature_config_flags &=
7700                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7701
7702         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7703         bp->common.bc_ver = val;
7704         BNX2X_DEV_INFO("bc_ver %X\n", val);
7705         if (val < BNX2X_BC_VER) {
7706                 /* for now only warn
7707                  * later we might need to enforce this */
7708                 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7709                           " please upgrade BC\n", BNX2X_BC_VER, val);
7710         }
7711         bp->link_params.feature_config_flags |=
7712                 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
7713                 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
7714
7715         if (BP_E1HVN(bp) == 0) {
7716                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7717                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7718         } else {
7719                 /* no WOL capability for E1HVN != 0 */
7720                 bp->flags |= NO_WOL_FLAG;
7721         }
7722         BNX2X_DEV_INFO("%sWoL capable\n",
7723                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
7724
7725         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7726         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7727         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7728         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7729
7730         printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7731                val, val2, val3, val4);
7732 }
7733
7734 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7735                                                     u32 switch_cfg)
7736 {
7737         int port = BP_PORT(bp);
7738         u32 ext_phy_type;
7739
7740         switch (switch_cfg) {
7741         case SWITCH_CFG_1G:
7742                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7743
7744                 ext_phy_type =
7745                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7746                 switch (ext_phy_type) {
7747                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7748                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7749                                        ext_phy_type);
7750
7751                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7752                                                SUPPORTED_10baseT_Full |
7753                                                SUPPORTED_100baseT_Half |
7754                                                SUPPORTED_100baseT_Full |
7755                                                SUPPORTED_1000baseT_Full |
7756                                                SUPPORTED_2500baseX_Full |
7757                                                SUPPORTED_TP |
7758                                                SUPPORTED_FIBRE |
7759                                                SUPPORTED_Autoneg |
7760                                                SUPPORTED_Pause |
7761                                                SUPPORTED_Asym_Pause);
7762                         break;
7763
7764                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7765                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7766                                        ext_phy_type);
7767
7768                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7769                                                SUPPORTED_10baseT_Full |
7770                                                SUPPORTED_100baseT_Half |
7771                                                SUPPORTED_100baseT_Full |
7772                                                SUPPORTED_1000baseT_Full |
7773                                                SUPPORTED_TP |
7774                                                SUPPORTED_FIBRE |
7775                                                SUPPORTED_Autoneg |
7776                                                SUPPORTED_Pause |
7777                                                SUPPORTED_Asym_Pause);
7778                         break;
7779
7780                 default:
7781                         BNX2X_ERR("NVRAM config error. "
7782                                   "BAD SerDes ext_phy_config 0x%x\n",
7783                                   bp->link_params.ext_phy_config);
7784                         return;
7785                 }
7786
7787                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7788                                            port*0x10);
7789                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7790                 break;
7791
7792         case SWITCH_CFG_10G:
7793                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7794
7795                 ext_phy_type =
7796                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7797                 switch (ext_phy_type) {
7798                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7799                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7800                                        ext_phy_type);
7801
7802                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7803                                                SUPPORTED_10baseT_Full |
7804                                                SUPPORTED_100baseT_Half |
7805                                                SUPPORTED_100baseT_Full |
7806                                                SUPPORTED_1000baseT_Full |
7807                                                SUPPORTED_2500baseX_Full |
7808                                                SUPPORTED_10000baseT_Full |
7809                                                SUPPORTED_TP |
7810                                                SUPPORTED_FIBRE |
7811                                                SUPPORTED_Autoneg |
7812                                                SUPPORTED_Pause |
7813                                                SUPPORTED_Asym_Pause);
7814                         break;
7815
7816                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7817                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7818                                        ext_phy_type);
7819
7820                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7821                                                SUPPORTED_1000baseT_Full |
7822                                                SUPPORTED_FIBRE |
7823                                                SUPPORTED_Autoneg |
7824                                                SUPPORTED_Pause |
7825                                                SUPPORTED_Asym_Pause);
7826                         break;
7827
7828                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7829                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7830                                        ext_phy_type);
7831
7832                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7833                                                SUPPORTED_2500baseX_Full |
7834                                                SUPPORTED_1000baseT_Full |
7835                                                SUPPORTED_FIBRE |
7836                                                SUPPORTED_Autoneg |
7837                                                SUPPORTED_Pause |
7838                                                SUPPORTED_Asym_Pause);
7839                         break;
7840
7841                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7842                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7843                                        ext_phy_type);
7844
7845                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7846                                                SUPPORTED_FIBRE |
7847                                                SUPPORTED_Pause |
7848                                                SUPPORTED_Asym_Pause);
7849                         break;
7850
7851                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7852                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7853                                        ext_phy_type);
7854
7855                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7856                                                SUPPORTED_1000baseT_Full |
7857                                                SUPPORTED_FIBRE |
7858                                                SUPPORTED_Pause |
7859                                                SUPPORTED_Asym_Pause);
7860                         break;
7861
7862                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7863                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
7864                                        ext_phy_type);
7865
7866                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7867                                                SUPPORTED_1000baseT_Full |
7868                                                SUPPORTED_Autoneg |
7869                                                SUPPORTED_FIBRE |
7870                                                SUPPORTED_Pause |
7871                                                SUPPORTED_Asym_Pause);
7872                         break;
7873
7874                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
7875                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
7876                                        ext_phy_type);
7877
7878                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7879                                                SUPPORTED_1000baseT_Full |
7880                                                SUPPORTED_Autoneg |
7881                                                SUPPORTED_FIBRE |
7882                                                SUPPORTED_Pause |
7883                                                SUPPORTED_Asym_Pause);
7884                         break;
7885
7886                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7887                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7888                                        ext_phy_type);
7889
7890                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7891                                                SUPPORTED_TP |
7892                                                SUPPORTED_Autoneg |
7893                                                SUPPORTED_Pause |
7894                                                SUPPORTED_Asym_Pause);
7895                         break;
7896
7897                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7898                         BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7899                                        ext_phy_type);
7900
7901                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7902                                                SUPPORTED_10baseT_Full |
7903                                                SUPPORTED_100baseT_Half |
7904                                                SUPPORTED_100baseT_Full |
7905                                                SUPPORTED_1000baseT_Full |
7906                                                SUPPORTED_10000baseT_Full |
7907                                                SUPPORTED_TP |
7908                                                SUPPORTED_Autoneg |
7909                                                SUPPORTED_Pause |
7910                                                SUPPORTED_Asym_Pause);
7911                         break;
7912
7913                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7914                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7915                                   bp->link_params.ext_phy_config);
7916                         break;
7917
7918                 default:
7919                         BNX2X_ERR("NVRAM config error. "
7920                                   "BAD XGXS ext_phy_config 0x%x\n",
7921                                   bp->link_params.ext_phy_config);
7922                         return;
7923                 }
7924
7925                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7926                                            port*0x18);
7927                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7928
7929                 break;
7930
7931         default:
7932                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7933                           bp->port.link_config);
7934                 return;
7935         }
7936         bp->link_params.phy_addr = bp->port.phy_addr;
7937
7938         /* mask what we support according to speed_cap_mask */
7939         if (!(bp->link_params.speed_cap_mask &
7940                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7941                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7942
7943         if (!(bp->link_params.speed_cap_mask &
7944                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7945                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7946
7947         if (!(bp->link_params.speed_cap_mask &
7948                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7949                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7950
7951         if (!(bp->link_params.speed_cap_mask &
7952                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7953                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7954
7955         if (!(bp->link_params.speed_cap_mask &
7956                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7957                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7958                                         SUPPORTED_1000baseT_Full);
7959
7960         if (!(bp->link_params.speed_cap_mask &
7961                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7962                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7963
7964         if (!(bp->link_params.speed_cap_mask &
7965                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7966                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7967
7968         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7969 }
7970
7971 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7972 {
7973         bp->link_params.req_duplex = DUPLEX_FULL;
7974
7975         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7976         case PORT_FEATURE_LINK_SPEED_AUTO:
7977                 if (bp->port.supported & SUPPORTED_Autoneg) {
7978                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7979                         bp->port.advertising = bp->port.supported;
7980                 } else {
7981                         u32 ext_phy_type =
7982                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7983
7984                         if ((ext_phy_type ==
7985                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7986                             (ext_phy_type ==
7987                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7988                                 /* force 10G, no AN */
7989                                 bp->link_params.req_line_speed = SPEED_10000;
7990                                 bp->port.advertising =
7991                                                 (ADVERTISED_10000baseT_Full |
7992                                                  ADVERTISED_FIBRE);
7993                                 break;
7994                         }
7995                         BNX2X_ERR("NVRAM config error. "
7996                                   "Invalid link_config 0x%x"
7997                                   "  Autoneg not supported\n",
7998                                   bp->port.link_config);
7999                         return;
8000                 }
8001                 break;
8002
8003         case PORT_FEATURE_LINK_SPEED_10M_FULL:
8004                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
8005                         bp->link_params.req_line_speed = SPEED_10;
8006                         bp->port.advertising = (ADVERTISED_10baseT_Full |
8007                                                 ADVERTISED_TP);
8008                 } else {
8009                         BNX2X_ERR("NVRAM config error. "
8010                                   "Invalid link_config 0x%x"
8011                                   "  speed_cap_mask 0x%x\n",
8012                                   bp->port.link_config,
8013                                   bp->link_params.speed_cap_mask);
8014                         return;
8015                 }
8016                 break;
8017
8018         case PORT_FEATURE_LINK_SPEED_10M_HALF:
8019                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
8020                         bp->link_params.req_line_speed = SPEED_10;
8021                         bp->link_params.req_duplex = DUPLEX_HALF;
8022                         bp->port.advertising = (ADVERTISED_10baseT_Half |
8023                                                 ADVERTISED_TP);
8024                 } else {
8025                         BNX2X_ERR("NVRAM config error. "
8026                                   "Invalid link_config 0x%x"
8027                                   "  speed_cap_mask 0x%x\n",
8028                                   bp->port.link_config,
8029                                   bp->link_params.speed_cap_mask);
8030                         return;
8031                 }
8032                 break;
8033
8034         case PORT_FEATURE_LINK_SPEED_100M_FULL:
8035                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
8036                         bp->link_params.req_line_speed = SPEED_100;
8037                         bp->port.advertising = (ADVERTISED_100baseT_Full |
8038                                                 ADVERTISED_TP);
8039                 } else {
8040                         BNX2X_ERR("NVRAM config error. "
8041                                   "Invalid link_config 0x%x"
8042                                   "  speed_cap_mask 0x%x\n",
8043                                   bp->port.link_config,
8044                                   bp->link_params.speed_cap_mask);
8045                         return;
8046                 }
8047                 break;
8048
8049         case PORT_FEATURE_LINK_SPEED_100M_HALF:
8050                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
8051                         bp->link_params.req_line_speed = SPEED_100;
8052                         bp->link_params.req_duplex = DUPLEX_HALF;
8053                         bp->port.advertising = (ADVERTISED_100baseT_Half |
8054                                                 ADVERTISED_TP);
8055                 } else {
8056                         BNX2X_ERR("NVRAM config error. "
8057                                   "Invalid link_config 0x%x"
8058                                   "  speed_cap_mask 0x%x\n",
8059                                   bp->port.link_config,
8060                                   bp->link_params.speed_cap_mask);
8061                         return;
8062                 }
8063                 break;
8064
8065         case PORT_FEATURE_LINK_SPEED_1G:
8066                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
8067                         bp->link_params.req_line_speed = SPEED_1000;
8068                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
8069                                                 ADVERTISED_TP);
8070                 } else {
8071                         BNX2X_ERR("NVRAM config error. "
8072                                   "Invalid link_config 0x%x"
8073                                   "  speed_cap_mask 0x%x\n",
8074                                   bp->port.link_config,
8075                                   bp->link_params.speed_cap_mask);
8076                         return;
8077                 }
8078                 break;
8079
8080         case PORT_FEATURE_LINK_SPEED_2_5G:
8081                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
8082                         bp->link_params.req_line_speed = SPEED_2500;
8083                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
8084                                                 ADVERTISED_TP);
8085                 } else {
8086                         BNX2X_ERR("NVRAM config error. "
8087                                   "Invalid link_config 0x%x"
8088                                   "  speed_cap_mask 0x%x\n",
8089                                   bp->port.link_config,
8090                                   bp->link_params.speed_cap_mask);
8091                         return;
8092                 }
8093                 break;
8094
8095         case PORT_FEATURE_LINK_SPEED_10G_CX4:
8096         case PORT_FEATURE_LINK_SPEED_10G_KX4:
8097         case PORT_FEATURE_LINK_SPEED_10G_KR:
8098                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
8099                         bp->link_params.req_line_speed = SPEED_10000;
8100                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
8101                                                 ADVERTISED_FIBRE);
8102                 } else {
8103                         BNX2X_ERR("NVRAM config error. "
8104                                   "Invalid link_config 0x%x"
8105                                   "  speed_cap_mask 0x%x\n",
8106                                   bp->port.link_config,
8107                                   bp->link_params.speed_cap_mask);
8108                         return;
8109                 }
8110                 break;
8111
8112         default:
8113                 BNX2X_ERR("NVRAM config error. "
8114                           "BAD link speed link_config 0x%x\n",
8115                           bp->port.link_config);
8116                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8117                 bp->port.advertising = bp->port.supported;
8118                 break;
8119         }
8120
8121         bp->link_params.req_flow_ctrl = (bp->port.link_config &
8122                                          PORT_FEATURE_FLOW_CONTROL_MASK);
8123         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8124             !(bp->port.supported & SUPPORTED_Autoneg))
8125                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8126
8127         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
8128                        "  advertising 0x%x\n",
8129                        bp->link_params.req_line_speed,
8130                        bp->link_params.req_duplex,
8131                        bp->link_params.req_flow_ctrl, bp->port.advertising);
8132 }
8133
8134 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8135 {
8136         int port = BP_PORT(bp);
8137         u32 val, val2;
8138         u32 config;
8139         u16 i;
8140
8141         bp->link_params.bp = bp;
8142         bp->link_params.port = port;
8143
8144         bp->link_params.lane_config =
8145                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8146         bp->link_params.ext_phy_config =
8147                 SHMEM_RD(bp,
8148                          dev_info.port_hw_config[port].external_phy_config);
8149         /* BCM8727_NOC => BCM8727 no over current */
8150         if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8151             PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8152                 bp->link_params.ext_phy_config &=
8153                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8154                 bp->link_params.ext_phy_config |=
8155                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8156                 bp->link_params.feature_config_flags |=
8157                         FEATURE_CONFIG_BCM8727_NOC;
8158         }
8159
8160         bp->link_params.speed_cap_mask =
8161                 SHMEM_RD(bp,
8162                          dev_info.port_hw_config[port].speed_capability_mask);
8163
8164         bp->port.link_config =
8165                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8166
8167         /* Get the 4 lanes xgxs config rx and tx */
8168         for (i = 0; i < 2; i++) {
8169                 val = SHMEM_RD(bp,
8170                            dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8171                 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8172                 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8173
8174                 val = SHMEM_RD(bp,
8175                            dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8176                 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8177                 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8178         }
8179
8180         /* If the device is capable of WoL, set the default state according
8181          * to the HW
8182          */
8183         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8184         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8185                    (config & PORT_FEATURE_WOL_ENABLED));
8186
8187         BNX2X_DEV_INFO("lane_config 0x%08x  ext_phy_config 0x%08x"
8188                        "  speed_cap_mask 0x%08x  link_config 0x%08x\n",
8189                        bp->link_params.lane_config,
8190                        bp->link_params.ext_phy_config,
8191                        bp->link_params.speed_cap_mask, bp->port.link_config);
8192
8193         bp->link_params.switch_cfg |= (bp->port.link_config &
8194                                        PORT_FEATURE_CONNECTED_SWITCH_MASK);
8195         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8196
8197         bnx2x_link_settings_requested(bp);
8198
8199         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8200         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8201         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8202         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8203         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8204         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8205         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
8206         bp->dev->dev_addr[5] = (u8)(val & 0xff);
8207         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8208         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8209 }
8210
8211 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8212 {
8213         int func = BP_FUNC(bp);
8214         u32 val, val2;
8215         int rc = 0;
8216
8217         bnx2x_get_common_hwinfo(bp);
8218
8219         bp->e1hov = 0;
8220         bp->e1hmf = 0;
8221         if (CHIP_IS_E1H(bp)) {
8222                 bp->mf_config =
8223                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8224
8225                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8226                        FUNC_MF_CFG_E1HOV_TAG_MASK);
8227                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8228
8229                         bp->e1hov = val;
8230                         bp->e1hmf = 1;
8231                         BNX2X_DEV_INFO("MF mode  E1HOV for func %d is %d "
8232                                        "(0x%04x)\n",
8233                                        func, bp->e1hov, bp->e1hov);
8234                 } else {
8235                         BNX2X_DEV_INFO("single function mode\n");
8236                         if (BP_E1HVN(bp)) {
8237                                 BNX2X_ERR("!!!  No valid E1HOV for func %d,"
8238                                           "  aborting\n", func);
8239                                 rc = -EPERM;
8240                         }
8241                 }
8242         }
8243
8244         if (!BP_NOMCP(bp)) {
8245                 bnx2x_get_port_hwinfo(bp);
8246
8247                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8248                               DRV_MSG_SEQ_NUMBER_MASK);
8249                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8250         }
8251
8252         if (IS_E1HMF(bp)) {
8253                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8254                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
8255                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8256                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8257                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8258                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8259                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8260                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8261                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
8262                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
8263                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8264                                ETH_ALEN);
8265                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8266                                ETH_ALEN);
8267                 }
8268
8269                 return rc;
8270         }
8271
8272         if (BP_NOMCP(bp)) {
8273                 /* only supposed to happen on emulation/FPGA */
8274                 BNX2X_ERR("warning random MAC workaround active\n");
8275                 random_ether_addr(bp->dev->dev_addr);
8276                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8277         }
8278
8279         return rc;
8280 }
8281
8282 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8283 {
8284         int func = BP_FUNC(bp);
8285         int timer_interval;
8286         int rc;
8287
8288         /* Disable interrupt handling until HW is initialized */
8289         atomic_set(&bp->intr_sem, 1);
8290         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8291
8292         mutex_init(&bp->port.phy_mutex);
8293
8294         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8295         INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8296
8297         rc = bnx2x_get_hwinfo(bp);
8298
8299         /* need to reset chip if undi was active */
8300         if (!BP_NOMCP(bp))
8301                 bnx2x_undi_unload(bp);
8302
8303         if (CHIP_REV_IS_FPGA(bp))
8304                 printk(KERN_ERR PFX "FPGA detected\n");
8305
8306         if (BP_NOMCP(bp) && (func == 0))
8307                 printk(KERN_ERR PFX
8308                        "MCP disabled, must load devices in order!\n");
8309
8310         /* Set multi queue mode */
8311         if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8312             ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8313                 printk(KERN_ERR PFX
8314                       "Multi disabled since int_mode requested is not MSI-X\n");
8315                 multi_mode = ETH_RSS_MODE_DISABLED;
8316         }
8317         bp->multi_mode = multi_mode;
8318
8319
8320         /* Set TPA flags */
8321         if (disable_tpa) {
8322                 bp->flags &= ~TPA_ENABLE_FLAG;
8323                 bp->dev->features &= ~NETIF_F_LRO;
8324         } else {
8325                 bp->flags |= TPA_ENABLE_FLAG;
8326                 bp->dev->features |= NETIF_F_LRO;
8327         }
8328
8329         bp->mrrs = mrrs;
8330
8331         bp->tx_ring_size = MAX_TX_AVAIL;
8332         bp->rx_ring_size = MAX_RX_AVAIL;
8333
8334         bp->rx_csum = 1;
8335
8336         bp->tx_ticks = 50;
8337         bp->rx_ticks = 25;
8338
8339         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8340         bp->current_interval = (poll ? poll : timer_interval);
8341
8342         init_timer(&bp->timer);
8343         bp->timer.expires = jiffies + bp->current_interval;
8344         bp->timer.data = (unsigned long) bp;
8345         bp->timer.function = bnx2x_timer;
8346
8347         return rc;
8348 }
8349
8350 /*
8351  * ethtool service functions
8352  */
8353
8354 /* All ethtool functions called with rtnl_lock */
8355
8356 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8357 {
8358         struct bnx2x *bp = netdev_priv(dev);
8359
8360         cmd->supported = bp->port.supported;
8361         cmd->advertising = bp->port.advertising;
8362
8363         if (netif_carrier_ok(dev)) {
8364                 cmd->speed = bp->link_vars.line_speed;
8365                 cmd->duplex = bp->link_vars.duplex;
8366         } else {
8367                 cmd->speed = bp->link_params.req_line_speed;
8368                 cmd->duplex = bp->link_params.req_duplex;
8369         }
8370         if (IS_E1HMF(bp)) {
8371                 u16 vn_max_rate;
8372
8373                 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8374                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8375                 if (vn_max_rate < cmd->speed)
8376                         cmd->speed = vn_max_rate;
8377         }
8378
8379         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8380                 u32 ext_phy_type =
8381                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8382
8383                 switch (ext_phy_type) {
8384                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8385                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8386                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8387                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8388                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8389                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8390                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8391                         cmd->port = PORT_FIBRE;
8392                         break;
8393
8394                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8395                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8396                         cmd->port = PORT_TP;
8397                         break;
8398
8399                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8400                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8401                                   bp->link_params.ext_phy_config);
8402                         break;
8403
8404                 default:
8405                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
8406                            bp->link_params.ext_phy_config);
8407                         break;
8408                 }
8409         } else
8410                 cmd->port = PORT_TP;
8411
8412         cmd->phy_address = bp->port.phy_addr;
8413         cmd->transceiver = XCVR_INTERNAL;
8414
8415         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8416                 cmd->autoneg = AUTONEG_ENABLE;
8417         else
8418                 cmd->autoneg = AUTONEG_DISABLE;
8419
8420         cmd->maxtxpkt = 0;
8421         cmd->maxrxpkt = 0;
8422
8423         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8424            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
8425            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
8426            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
8427            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8428            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8429            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8430
8431         return 0;
8432 }
8433
8434 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8435 {
8436         struct bnx2x *bp = netdev_priv(dev);
8437         u32 advertising;
8438
8439         if (IS_E1HMF(bp))
8440                 return 0;
8441
8442         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8443            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
8444            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
8445            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
8446            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8447            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8448            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8449
8450         if (cmd->autoneg == AUTONEG_ENABLE) {
8451                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8452                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8453                         return -EINVAL;
8454                 }
8455
8456                 /* advertise the requested speed and duplex if supported */
8457                 cmd->advertising &= bp->port.supported;
8458
8459                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8460                 bp->link_params.req_duplex = DUPLEX_FULL;
8461                 bp->port.advertising |= (ADVERTISED_Autoneg |
8462                                          cmd->advertising);
8463
8464         } else { /* forced speed */
8465                 /* advertise the requested speed and duplex if supported */
8466                 switch (cmd->speed) {
8467                 case SPEED_10:
8468                         if (cmd->duplex == DUPLEX_FULL) {
8469                                 if (!(bp->port.supported &
8470                                       SUPPORTED_10baseT_Full)) {
8471                                         DP(NETIF_MSG_LINK,
8472                                            "10M full not supported\n");
8473                                         return -EINVAL;
8474                                 }
8475
8476                                 advertising = (ADVERTISED_10baseT_Full |
8477                                                ADVERTISED_TP);
8478                         } else {
8479                                 if (!(bp->port.supported &
8480                                       SUPPORTED_10baseT_Half)) {
8481                                         DP(NETIF_MSG_LINK,
8482                                            "10M half not supported\n");
8483                                         return -EINVAL;
8484                                 }
8485
8486                                 advertising = (ADVERTISED_10baseT_Half |
8487                                                ADVERTISED_TP);
8488                         }
8489                         break;
8490
8491                 case SPEED_100:
8492                         if (cmd->duplex == DUPLEX_FULL) {
8493                                 if (!(bp->port.supported &
8494                                                 SUPPORTED_100baseT_Full)) {
8495                                         DP(NETIF_MSG_LINK,
8496                                            "100M full not supported\n");
8497                                         return -EINVAL;
8498                                 }
8499
8500                                 advertising = (ADVERTISED_100baseT_Full |
8501                                                ADVERTISED_TP);
8502                         } else {
8503                                 if (!(bp->port.supported &
8504                                                 SUPPORTED_100baseT_Half)) {
8505                                         DP(NETIF_MSG_LINK,
8506                                            "100M half not supported\n");
8507                                         return -EINVAL;
8508                                 }
8509
8510                                 advertising = (ADVERTISED_100baseT_Half |
8511                                                ADVERTISED_TP);
8512                         }
8513                         break;
8514
8515                 case SPEED_1000:
8516                         if (cmd->duplex != DUPLEX_FULL) {
8517                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
8518                                 return -EINVAL;
8519                         }
8520
8521                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
8522                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
8523                                 return -EINVAL;
8524                         }
8525
8526                         advertising = (ADVERTISED_1000baseT_Full |
8527                                        ADVERTISED_TP);
8528                         break;
8529
8530                 case SPEED_2500:
8531                         if (cmd->duplex != DUPLEX_FULL) {
8532                                 DP(NETIF_MSG_LINK,
8533                                    "2.5G half not supported\n");
8534                                 return -EINVAL;
8535                         }
8536
8537                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
8538                                 DP(NETIF_MSG_LINK,
8539                                    "2.5G full not supported\n");
8540                                 return -EINVAL;
8541                         }
8542
8543                         advertising = (ADVERTISED_2500baseX_Full |
8544                                        ADVERTISED_TP);
8545                         break;
8546
8547                 case SPEED_10000:
8548                         if (cmd->duplex != DUPLEX_FULL) {
8549                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
8550                                 return -EINVAL;
8551                         }
8552
8553                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
8554                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
8555                                 return -EINVAL;
8556                         }
8557
8558                         advertising = (ADVERTISED_10000baseT_Full |
8559                                        ADVERTISED_FIBRE);
8560                         break;
8561
8562                 default:
8563                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
8564                         return -EINVAL;
8565                 }
8566
8567                 bp->link_params.req_line_speed = cmd->speed;
8568                 bp->link_params.req_duplex = cmd->duplex;
8569                 bp->port.advertising = advertising;
8570         }
8571
8572         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
8573            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
8574            bp->link_params.req_line_speed, bp->link_params.req_duplex,
8575            bp->port.advertising);
8576
8577         if (netif_running(dev)) {
8578                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8579                 bnx2x_link_set(bp);
8580         }
8581
8582         return 0;
8583 }
8584
8585 #define PHY_FW_VER_LEN                  10
8586
8587 static void bnx2x_get_drvinfo(struct net_device *dev,
8588                               struct ethtool_drvinfo *info)
8589 {
8590         struct bnx2x *bp = netdev_priv(dev);
8591         u8 phy_fw_ver[PHY_FW_VER_LEN];
8592
8593         strcpy(info->driver, DRV_MODULE_NAME);
8594         strcpy(info->version, DRV_MODULE_VERSION);
8595
8596         phy_fw_ver[0] = '\0';
8597         if (bp->port.pmf) {
8598                 bnx2x_acquire_phy_lock(bp);
8599                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8600                                              (bp->state != BNX2X_STATE_CLOSED),
8601                                              phy_fw_ver, PHY_FW_VER_LEN);
8602                 bnx2x_release_phy_lock(bp);
8603         }
8604
8605         snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8606                  (bp->common.bc_ver & 0xff0000) >> 16,
8607                  (bp->common.bc_ver & 0xff00) >> 8,
8608                  (bp->common.bc_ver & 0xff),
8609                  ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
8610         strcpy(info->bus_info, pci_name(bp->pdev));
8611         info->n_stats = BNX2X_NUM_STATS;
8612         info->testinfo_len = BNX2X_NUM_TESTS;
8613         info->eedump_len = bp->common.flash_size;
8614         info->regdump_len = 0;
8615 }
8616
8617 #define IS_E1_ONLINE(info)      (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8618 #define IS_E1H_ONLINE(info)     (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8619
8620 static int bnx2x_get_regs_len(struct net_device *dev)
8621 {
8622         static u32 regdump_len;
8623         struct bnx2x *bp = netdev_priv(dev);
8624         int i;
8625
8626         if (regdump_len)
8627                 return regdump_len;
8628
8629         if (CHIP_IS_E1(bp)) {
8630                 for (i = 0; i < REGS_COUNT; i++)
8631                         if (IS_E1_ONLINE(reg_addrs[i].info))
8632                                 regdump_len += reg_addrs[i].size;
8633
8634                 for (i = 0; i < WREGS_COUNT_E1; i++)
8635                         if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
8636                                 regdump_len += wreg_addrs_e1[i].size *
8637                                         (1 + wreg_addrs_e1[i].read_regs_count);
8638
8639         } else { /* E1H */
8640                 for (i = 0; i < REGS_COUNT; i++)
8641                         if (IS_E1H_ONLINE(reg_addrs[i].info))
8642                                 regdump_len += reg_addrs[i].size;
8643
8644                 for (i = 0; i < WREGS_COUNT_E1H; i++)
8645                         if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
8646                                 regdump_len += wreg_addrs_e1h[i].size *
8647                                         (1 + wreg_addrs_e1h[i].read_regs_count);
8648         }
8649         regdump_len *= 4;
8650         regdump_len += sizeof(struct dump_hdr);
8651
8652         return regdump_len;
8653 }
8654
8655 static void bnx2x_get_regs(struct net_device *dev,
8656                            struct ethtool_regs *regs, void *_p)
8657 {
8658         u32 *p = _p, i, j;
8659         struct bnx2x *bp = netdev_priv(dev);
8660         struct dump_hdr dump_hdr = {0};
8661
8662         regs->version = 0;
8663         memset(p, 0, regs->len);
8664
8665         if (!netif_running(bp->dev))
8666                 return;
8667
8668         dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
8669         dump_hdr.dump_sign = dump_sign_all;
8670         dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
8671         dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
8672         dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
8673         dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
8674         dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
8675
8676         memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
8677         p += dump_hdr.hdr_size + 1;
8678
8679         if (CHIP_IS_E1(bp)) {
8680                 for (i = 0; i < REGS_COUNT; i++)
8681                         if (IS_E1_ONLINE(reg_addrs[i].info))
8682                                 for (j = 0; j < reg_addrs[i].size; j++)
8683                                         *p++ = REG_RD(bp,
8684                                                       reg_addrs[i].addr + j*4);
8685
8686         } else { /* E1H */
8687                 for (i = 0; i < REGS_COUNT; i++)
8688                         if (IS_E1H_ONLINE(reg_addrs[i].info))
8689                                 for (j = 0; j < reg_addrs[i].size; j++)
8690                                         *p++ = REG_RD(bp,
8691                                                       reg_addrs[i].addr + j*4);
8692         }
8693 }
8694
8695 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8696 {
8697         struct bnx2x *bp = netdev_priv(dev);
8698
8699         if (bp->flags & NO_WOL_FLAG) {
8700                 wol->supported = 0;
8701                 wol->wolopts = 0;
8702         } else {
8703                 wol->supported = WAKE_MAGIC;
8704                 if (bp->wol)
8705                         wol->wolopts = WAKE_MAGIC;
8706                 else
8707                         wol->wolopts = 0;
8708         }
8709         memset(&wol->sopass, 0, sizeof(wol->sopass));
8710 }
8711
8712 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8713 {
8714         struct bnx2x *bp = netdev_priv(dev);
8715
8716         if (wol->wolopts & ~WAKE_MAGIC)
8717                 return -EINVAL;
8718
8719         if (wol->wolopts & WAKE_MAGIC) {
8720                 if (bp->flags & NO_WOL_FLAG)
8721                         return -EINVAL;
8722
8723                 bp->wol = 1;
8724         } else
8725                 bp->wol = 0;
8726
8727         return 0;
8728 }
8729
8730 static u32 bnx2x_get_msglevel(struct net_device *dev)
8731 {
8732         struct bnx2x *bp = netdev_priv(dev);
8733
8734         return bp->msglevel;
8735 }
8736
8737 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8738 {
8739         struct bnx2x *bp = netdev_priv(dev);
8740
8741         if (capable(CAP_NET_ADMIN))
8742                 bp->msglevel = level;
8743 }
8744
8745 static int bnx2x_nway_reset(struct net_device *dev)
8746 {
8747         struct bnx2x *bp = netdev_priv(dev);
8748
8749         if (!bp->port.pmf)
8750                 return 0;
8751
8752         if (netif_running(dev)) {
8753                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8754                 bnx2x_link_set(bp);
8755         }
8756
8757         return 0;
8758 }
8759
8760 static u32
8761 bnx2x_get_link(struct net_device *dev)
8762 {
8763         struct bnx2x *bp = netdev_priv(dev);
8764
8765         return bp->link_vars.link_up;
8766 }
8767
8768 static int bnx2x_get_eeprom_len(struct net_device *dev)
8769 {
8770         struct bnx2x *bp = netdev_priv(dev);
8771
8772         return bp->common.flash_size;
8773 }
8774
8775 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8776 {
8777         int port = BP_PORT(bp);
8778         int count, i;
8779         u32 val = 0;
8780
8781         /* adjust timeout for emulation/FPGA */
8782         count = NVRAM_TIMEOUT_COUNT;
8783         if (CHIP_REV_IS_SLOW(bp))
8784                 count *= 100;
8785
8786         /* request access to nvram interface */
8787         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8788                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8789
8790         for (i = 0; i < count*10; i++) {
8791                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8792                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8793                         break;
8794
8795                 udelay(5);
8796         }
8797
8798         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
8799                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
8800                 return -EBUSY;
8801         }
8802
8803         return 0;
8804 }
8805
8806 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8807 {
8808         int port = BP_PORT(bp);
8809         int count, i;
8810         u32 val = 0;
8811
8812         /* adjust timeout for emulation/FPGA */
8813         count = NVRAM_TIMEOUT_COUNT;
8814         if (CHIP_REV_IS_SLOW(bp))
8815                 count *= 100;
8816
8817         /* relinquish nvram interface */
8818         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8819                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8820
8821         for (i = 0; i < count*10; i++) {
8822                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8823                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8824                         break;
8825
8826                 udelay(5);
8827         }
8828
8829         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
8830                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
8831                 return -EBUSY;
8832         }
8833
8834         return 0;
8835 }
8836
8837 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8838 {
8839         u32 val;
8840
8841         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8842
8843         /* enable both bits, even on read */
8844         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8845                (val | MCPR_NVM_ACCESS_ENABLE_EN |
8846                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
8847 }
8848
8849 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8850 {
8851         u32 val;
8852
8853         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8854
8855         /* disable both bits, even after read */
8856         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8857                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8858                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8859 }
8860
8861 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
8862                                   u32 cmd_flags)
8863 {
8864         int count, i, rc;
8865         u32 val;
8866
8867         /* build the command word */
8868         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8869
8870         /* need to clear DONE bit separately */
8871         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8872
8873         /* address of the NVRAM to read from */
8874         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8875                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8876
8877         /* issue a read command */
8878         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8879
8880         /* adjust timeout for emulation/FPGA */
8881         count = NVRAM_TIMEOUT_COUNT;
8882         if (CHIP_REV_IS_SLOW(bp))
8883                 count *= 100;
8884
8885         /* wait for completion */
8886         *ret_val = 0;
8887         rc = -EBUSY;
8888         for (i = 0; i < count; i++) {
8889                 udelay(5);
8890                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8891
8892                 if (val & MCPR_NVM_COMMAND_DONE) {
8893                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8894                         /* we read nvram data in cpu order
8895                          * but ethtool sees it as an array of bytes
8896                          * converting to big-endian will do the work */
8897                         *ret_val = cpu_to_be32(val);
8898                         rc = 0;
8899                         break;
8900                 }
8901         }
8902
8903         return rc;
8904 }
8905
8906 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8907                             int buf_size)
8908 {
8909         int rc;
8910         u32 cmd_flags;
8911         __be32 val;
8912
8913         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8914                 DP(BNX2X_MSG_NVM,
8915                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8916                    offset, buf_size);
8917                 return -EINVAL;
8918         }
8919
8920         if (offset + buf_size > bp->common.flash_size) {
8921                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8922                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8923                    offset, buf_size, bp->common.flash_size);
8924                 return -EINVAL;
8925         }
8926
8927         /* request access to nvram interface */
8928         rc = bnx2x_acquire_nvram_lock(bp);
8929         if (rc)
8930                 return rc;
8931
8932         /* enable access to nvram interface */
8933         bnx2x_enable_nvram_access(bp);
8934
8935         /* read the first word(s) */
8936         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8937         while ((buf_size > sizeof(u32)) && (rc == 0)) {
8938                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8939                 memcpy(ret_buf, &val, 4);
8940
8941                 /* advance to the next dword */
8942                 offset += sizeof(u32);
8943                 ret_buf += sizeof(u32);
8944                 buf_size -= sizeof(u32);
8945                 cmd_flags = 0;
8946         }
8947
8948         if (rc == 0) {
8949                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8950                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8951                 memcpy(ret_buf, &val, 4);
8952         }
8953
8954         /* disable access to nvram interface */
8955         bnx2x_disable_nvram_access(bp);
8956         bnx2x_release_nvram_lock(bp);
8957
8958         return rc;
8959 }
8960
8961 static int bnx2x_get_eeprom(struct net_device *dev,
8962                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8963 {
8964         struct bnx2x *bp = netdev_priv(dev);
8965         int rc;
8966
8967         if (!netif_running(dev))
8968                 return -EAGAIN;
8969
8970         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8971            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8972            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8973            eeprom->len, eeprom->len);
8974
8975         /* parameters already validated in ethtool_get_eeprom */
8976
8977         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8978
8979         return rc;
8980 }
8981
8982 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8983                                    u32 cmd_flags)
8984 {
8985         int count, i, rc;
8986
8987         /* build the command word */
8988         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8989
8990         /* need to clear DONE bit separately */
8991         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8992
8993         /* write the data */
8994         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8995
8996         /* address of the NVRAM to write to */
8997         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8998                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8999
9000         /* issue the write command */
9001         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9002
9003         /* adjust timeout for emulation/FPGA */
9004         count = NVRAM_TIMEOUT_COUNT;
9005         if (CHIP_REV_IS_SLOW(bp))
9006                 count *= 100;
9007
9008         /* wait for completion */
9009         rc = -EBUSY;
9010         for (i = 0; i < count; i++) {
9011                 udelay(5);
9012                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9013                 if (val & MCPR_NVM_COMMAND_DONE) {
9014                         rc = 0;
9015                         break;
9016                 }
9017         }
9018
9019         return rc;
9020 }
9021
9022 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
9023
9024 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9025                               int buf_size)
9026 {
9027         int rc;
9028         u32 cmd_flags;
9029         u32 align_offset;
9030         __be32 val;
9031
9032         if (offset + buf_size > bp->common.flash_size) {
9033                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9034                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9035                    offset, buf_size, bp->common.flash_size);
9036                 return -EINVAL;
9037         }
9038
9039         /* request access to nvram interface */
9040         rc = bnx2x_acquire_nvram_lock(bp);
9041         if (rc)
9042                 return rc;
9043
9044         /* enable access to nvram interface */
9045         bnx2x_enable_nvram_access(bp);
9046
9047         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9048         align_offset = (offset & ~0x03);
9049         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9050
9051         if (rc == 0) {
9052                 val &= ~(0xff << BYTE_OFFSET(offset));
9053                 val |= (*data_buf << BYTE_OFFSET(offset));
9054
9055                 /* nvram data is returned as an array of bytes
9056                  * convert it back to cpu order */
9057                 val = be32_to_cpu(val);
9058
9059                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9060                                              cmd_flags);
9061         }
9062
9063         /* disable access to nvram interface */
9064         bnx2x_disable_nvram_access(bp);
9065         bnx2x_release_nvram_lock(bp);
9066
9067         return rc;
9068 }
9069
9070 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9071                              int buf_size)
9072 {
9073         int rc;
9074         u32 cmd_flags;
9075         u32 val;
9076         u32 written_so_far;
9077
9078         if (buf_size == 1)      /* ethtool */
9079                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
9080
9081         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9082                 DP(BNX2X_MSG_NVM,
9083                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
9084                    offset, buf_size);
9085                 return -EINVAL;
9086         }
9087
9088         if (offset + buf_size > bp->common.flash_size) {
9089                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9090                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9091                    offset, buf_size, bp->common.flash_size);
9092                 return -EINVAL;
9093         }
9094
9095         /* request access to nvram interface */
9096         rc = bnx2x_acquire_nvram_lock(bp);
9097         if (rc)
9098                 return rc;
9099
9100         /* enable access to nvram interface */
9101         bnx2x_enable_nvram_access(bp);
9102
9103         written_so_far = 0;
9104         cmd_flags = MCPR_NVM_COMMAND_FIRST;
9105         while ((written_so_far < buf_size) && (rc == 0)) {
9106                 if (written_so_far == (buf_size - sizeof(u32)))
9107                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
9108                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9109                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
9110                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9111                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9112
9113                 memcpy(&val, data_buf, 4);
9114
9115                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9116
9117                 /* advance to the next dword */
9118                 offset += sizeof(u32);
9119                 data_buf += sizeof(u32);
9120                 written_so_far += sizeof(u32);
9121                 cmd_flags = 0;
9122         }
9123
9124         /* disable access to nvram interface */
9125         bnx2x_disable_nvram_access(bp);
9126         bnx2x_release_nvram_lock(bp);
9127
9128         return rc;
9129 }
9130
9131 static int bnx2x_set_eeprom(struct net_device *dev,
9132                             struct ethtool_eeprom *eeprom, u8 *eebuf)
9133 {
9134         struct bnx2x *bp = netdev_priv(dev);
9135         int rc;
9136
9137         if (!netif_running(dev))
9138                 return -EAGAIN;
9139
9140         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9141            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
9142            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9143            eeprom->len, eeprom->len);
9144
9145         /* parameters already validated in ethtool_set_eeprom */
9146
9147         /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
9148         if (eeprom->magic == 0x00504859)
9149                 if (bp->port.pmf) {
9150
9151                         bnx2x_acquire_phy_lock(bp);
9152                         rc = bnx2x_flash_download(bp, BP_PORT(bp),
9153                                              bp->link_params.ext_phy_config,
9154                                              (bp->state != BNX2X_STATE_CLOSED),
9155                                              eebuf, eeprom->len);
9156                         if ((bp->state == BNX2X_STATE_OPEN) ||
9157                             (bp->state == BNX2X_STATE_DISABLED)) {
9158                                 rc |= bnx2x_link_reset(&bp->link_params,
9159                                                        &bp->link_vars, 1);
9160                                 rc |= bnx2x_phy_init(&bp->link_params,
9161                                                      &bp->link_vars);
9162                         }
9163                         bnx2x_release_phy_lock(bp);
9164
9165                 } else /* Only the PMF can access the PHY */
9166                         return -EINVAL;
9167         else
9168                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
9169
9170         return rc;
9171 }
9172
9173 static int bnx2x_get_coalesce(struct net_device *dev,
9174                               struct ethtool_coalesce *coal)
9175 {
9176         struct bnx2x *bp = netdev_priv(dev);
9177
9178         memset(coal, 0, sizeof(struct ethtool_coalesce));
9179
9180         coal->rx_coalesce_usecs = bp->rx_ticks;
9181         coal->tx_coalesce_usecs = bp->tx_ticks;
9182
9183         return 0;
9184 }
9185
9186 static int bnx2x_set_coalesce(struct net_device *dev,
9187                               struct ethtool_coalesce *coal)
9188 {
9189         struct bnx2x *bp = netdev_priv(dev);
9190
9191         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
9192         if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
9193                 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
9194
9195         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
9196         if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
9197                 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
9198
9199         if (netif_running(dev))
9200                 bnx2x_update_coalesce(bp);
9201
9202         return 0;
9203 }
9204
9205 static void bnx2x_get_ringparam(struct net_device *dev,
9206                                 struct ethtool_ringparam *ering)
9207 {
9208         struct bnx2x *bp = netdev_priv(dev);
9209
9210         ering->rx_max_pending = MAX_RX_AVAIL;
9211         ering->rx_mini_max_pending = 0;
9212         ering->rx_jumbo_max_pending = 0;
9213
9214         ering->rx_pending = bp->rx_ring_size;
9215         ering->rx_mini_pending = 0;
9216         ering->rx_jumbo_pending = 0;
9217
9218         ering->tx_max_pending = MAX_TX_AVAIL;
9219         ering->tx_pending = bp->tx_ring_size;
9220 }
9221
9222 static int bnx2x_set_ringparam(struct net_device *dev,
9223                                struct ethtool_ringparam *ering)
9224 {
9225         struct bnx2x *bp = netdev_priv(dev);
9226         int rc = 0;
9227
9228         if ((ering->rx_pending > MAX_RX_AVAIL) ||
9229             (ering->tx_pending > MAX_TX_AVAIL) ||
9230             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9231                 return -EINVAL;
9232
9233         bp->rx_ring_size = ering->rx_pending;
9234         bp->tx_ring_size = ering->tx_pending;
9235
9236         if (netif_running(dev)) {
9237                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9238                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9239         }
9240
9241         return rc;
9242 }
9243
9244 static void bnx2x_get_pauseparam(struct net_device *dev,
9245                                  struct ethtool_pauseparam *epause)
9246 {
9247         struct bnx2x *bp = netdev_priv(dev);
9248
9249         epause->autoneg = (bp->link_params.req_flow_ctrl ==
9250                            BNX2X_FLOW_CTRL_AUTO) &&
9251                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9252
9253         epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9254                             BNX2X_FLOW_CTRL_RX);
9255         epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9256                             BNX2X_FLOW_CTRL_TX);
9257
9258         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9259            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9260            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9261 }
9262
9263 static int bnx2x_set_pauseparam(struct net_device *dev,
9264                                 struct ethtool_pauseparam *epause)
9265 {
9266         struct bnx2x *bp = netdev_priv(dev);
9267
9268         if (IS_E1HMF(bp))
9269                 return 0;
9270
9271         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9272            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9273            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9274
9275         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9276
9277         if (epause->rx_pause)
9278                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9279
9280         if (epause->tx_pause)
9281                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9282
9283         if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9284                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9285
9286         if (epause->autoneg) {
9287                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9288                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
9289                         return -EINVAL;
9290                 }
9291
9292                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9293                         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9294         }
9295
9296         DP(NETIF_MSG_LINK,
9297            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9298
9299         if (netif_running(dev)) {
9300                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9301                 bnx2x_link_set(bp);
9302         }
9303
9304         return 0;
9305 }
9306
9307 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9308 {
9309         struct bnx2x *bp = netdev_priv(dev);
9310         int changed = 0;
9311         int rc = 0;
9312
9313         /* TPA requires Rx CSUM offloading */
9314         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9315                 if (!(dev->features & NETIF_F_LRO)) {
9316                         dev->features |= NETIF_F_LRO;
9317                         bp->flags |= TPA_ENABLE_FLAG;
9318                         changed = 1;
9319                 }
9320
9321         } else if (dev->features & NETIF_F_LRO) {
9322                 dev->features &= ~NETIF_F_LRO;
9323                 bp->flags &= ~TPA_ENABLE_FLAG;
9324                 changed = 1;
9325         }
9326
9327         if (changed && netif_running(dev)) {
9328                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9329                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9330         }
9331
9332         return rc;
9333 }
9334
9335 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9336 {
9337         struct bnx2x *bp = netdev_priv(dev);
9338
9339         return bp->rx_csum;
9340 }
9341
9342 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9343 {
9344         struct bnx2x *bp = netdev_priv(dev);
9345         int rc = 0;
9346
9347         bp->rx_csum = data;
9348
9349         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9350            TPA'ed packets will be discarded due to wrong TCP CSUM */
9351         if (!data) {
9352                 u32 flags = ethtool_op_get_flags(dev);
9353
9354                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9355         }
9356
9357         return rc;
9358 }
9359
9360 static int bnx2x_set_tso(struct net_device *dev, u32 data)
9361 {
9362         if (data) {
9363                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9364                 dev->features |= NETIF_F_TSO6;
9365         } else {
9366                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
9367                 dev->features &= ~NETIF_F_TSO6;
9368         }
9369
9370         return 0;
9371 }
9372
9373 static const struct {
9374         char string[ETH_GSTRING_LEN];
9375 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
9376         { "register_test (offline)" },
9377         { "memory_test (offline)" },
9378         { "loopback_test (offline)" },
9379         { "nvram_test (online)" },
9380         { "interrupt_test (online)" },
9381         { "link_test (online)" },
9382         { "idle check (online)" }
9383 };
9384
9385 static int bnx2x_self_test_count(struct net_device *dev)
9386 {
9387         return BNX2X_NUM_TESTS;
9388 }
9389
9390 static int bnx2x_test_registers(struct bnx2x *bp)
9391 {
9392         int idx, i, rc = -ENODEV;
9393         u32 wr_val = 0;
9394         int port = BP_PORT(bp);
9395         static const struct {
9396                 u32  offset0;
9397                 u32  offset1;
9398                 u32  mask;
9399         } reg_tbl[] = {
9400 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
9401                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
9402                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
9403                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
9404                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
9405                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
9406                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
9407                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
9408                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
9409                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
9410 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
9411                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
9412                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
9413                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
9414                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
9415                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9416                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
9417                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
9418                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
9419                 { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
9420 /* 20 */        { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
9421                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
9422                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
9423                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
9424                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
9425                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
9426                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
9427                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
9428                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
9429                 { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
9430 /* 30 */        { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
9431                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
9432                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
9433                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9434                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
9435                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9436                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
9437
9438                 { 0xffffffff, 0, 0x00000000 }
9439         };
9440
9441         if (!netif_running(bp->dev))
9442                 return rc;
9443
9444         /* Repeat the test twice:
9445            First by writing 0x00000000, second by writing 0xffffffff */
9446         for (idx = 0; idx < 2; idx++) {
9447
9448                 switch (idx) {
9449                 case 0:
9450                         wr_val = 0;
9451                         break;
9452                 case 1:
9453                         wr_val = 0xffffffff;
9454                         break;
9455                 }
9456
9457                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9458                         u32 offset, mask, save_val, val;
9459
9460                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9461                         mask = reg_tbl[i].mask;
9462
9463                         save_val = REG_RD(bp, offset);
9464
9465                         REG_WR(bp, offset, wr_val);
9466                         val = REG_RD(bp, offset);
9467
9468                         /* Restore the original register's value */
9469                         REG_WR(bp, offset, save_val);
9470
9471                         /* verify that value is as expected value */
9472                         if ((val & mask) != (wr_val & mask))
9473                                 goto test_reg_exit;
9474                 }
9475         }
9476
9477         rc = 0;
9478
9479 test_reg_exit:
9480         return rc;
9481 }
9482
9483 static int bnx2x_test_memory(struct bnx2x *bp)
9484 {
9485         int i, j, rc = -ENODEV;
9486         u32 val;
9487         static const struct {
9488                 u32 offset;
9489                 int size;
9490         } mem_tbl[] = {
9491                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
9492                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9493                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
9494                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
9495                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
9496                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
9497                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
9498
9499                 { 0xffffffff, 0 }
9500         };
9501         static const struct {
9502                 char *name;
9503                 u32 offset;
9504                 u32 e1_mask;
9505                 u32 e1h_mask;
9506         } prty_tbl[] = {
9507                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
9508                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
9509                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
9510                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
9511                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
9512                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
9513
9514                 { NULL, 0xffffffff, 0, 0 }
9515         };
9516
9517         if (!netif_running(bp->dev))
9518                 return rc;
9519
9520         /* Go through all the memories */
9521         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9522                 for (j = 0; j < mem_tbl[i].size; j++)
9523                         REG_RD(bp, mem_tbl[i].offset + j*4);
9524
9525         /* Check the parity status */
9526         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9527                 val = REG_RD(bp, prty_tbl[i].offset);
9528                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9529                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
9530                         DP(NETIF_MSG_HW,
9531                            "%s is 0x%x\n", prty_tbl[i].name, val);
9532                         goto test_mem_exit;
9533                 }
9534         }
9535
9536         rc = 0;
9537
9538 test_mem_exit:
9539         return rc;
9540 }
9541
9542 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9543 {
9544         int cnt = 1000;
9545
9546         if (link_up)
9547                 while (bnx2x_link_test(bp) && cnt--)
9548                         msleep(10);
9549 }
9550
9551 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9552 {
9553         unsigned int pkt_size, num_pkts, i;
9554         struct sk_buff *skb;
9555         unsigned char *packet;
9556         struct bnx2x_fastpath *fp = &bp->fp[0];
9557         u16 tx_start_idx, tx_idx;
9558         u16 rx_start_idx, rx_idx;
9559         u16 pkt_prod;
9560         struct sw_tx_bd *tx_buf;
9561         struct eth_tx_bd *tx_bd;
9562         dma_addr_t mapping;
9563         union eth_rx_cqe *cqe;
9564         u8 cqe_fp_flags;
9565         struct sw_rx_bd *rx_buf;
9566         u16 len;
9567         int rc = -ENODEV;
9568
9569         /* check the loopback mode */
9570         switch (loopback_mode) {
9571         case BNX2X_PHY_LOOPBACK:
9572                 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9573                         return -EINVAL;
9574                 break;
9575         case BNX2X_MAC_LOOPBACK:
9576                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
9577                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
9578                 break;
9579         default:
9580                 return -EINVAL;
9581         }
9582
9583         /* prepare the loopback packet */
9584         pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9585                      bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
9586         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9587         if (!skb) {
9588                 rc = -ENOMEM;
9589                 goto test_loopback_exit;
9590         }
9591         packet = skb_put(skb, pkt_size);
9592         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9593         memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9594         for (i = ETH_HLEN; i < pkt_size; i++)
9595                 packet[i] = (unsigned char) (i & 0xff);
9596
9597         /* send the loopback packet */
9598         num_pkts = 0;
9599         tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9600         rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9601
9602         pkt_prod = fp->tx_pkt_prod++;
9603         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9604         tx_buf->first_bd = fp->tx_bd_prod;
9605         tx_buf->skb = skb;
9606
9607         tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9608         mapping = pci_map_single(bp->pdev, skb->data,
9609                                  skb_headlen(skb), PCI_DMA_TODEVICE);
9610         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9611         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9612         tx_bd->nbd = cpu_to_le16(1);
9613         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9614         tx_bd->vlan = cpu_to_le16(pkt_prod);
9615         tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9616                                        ETH_TX_BD_FLAGS_END_BD);
9617         tx_bd->general_data = ((UNICAST_ADDRESS <<
9618                                 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9619
9620         wmb();
9621
9622         le16_add_cpu(&fp->hw_tx_prods->bds_prod, 1);
9623         mb(); /* FW restriction: must not reorder writing nbd and packets */
9624         le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
9625         DOORBELL(bp, fp->index, 0);
9626
9627         mmiowb();
9628
9629         num_pkts++;
9630         fp->tx_bd_prod++;
9631         bp->dev->trans_start = jiffies;
9632
9633         udelay(100);
9634
9635         tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9636         if (tx_idx != tx_start_idx + num_pkts)
9637                 goto test_loopback_exit;
9638
9639         rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9640         if (rx_idx != rx_start_idx + num_pkts)
9641                 goto test_loopback_exit;
9642
9643         cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9644         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9645         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9646                 goto test_loopback_rx_exit;
9647
9648         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9649         if (len != pkt_size)
9650                 goto test_loopback_rx_exit;
9651
9652         rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9653         skb = rx_buf->skb;
9654         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9655         for (i = ETH_HLEN; i < pkt_size; i++)
9656                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9657                         goto test_loopback_rx_exit;
9658
9659         rc = 0;
9660
9661 test_loopback_rx_exit:
9662
9663         fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9664         fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9665         fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9666         fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9667
9668         /* Update producers */
9669         bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9670                              fp->rx_sge_prod);
9671
9672 test_loopback_exit:
9673         bp->link_params.loopback_mode = LOOPBACK_NONE;
9674
9675         return rc;
9676 }
9677
9678 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9679 {
9680         int rc = 0, res;
9681
9682         if (!netif_running(bp->dev))
9683                 return BNX2X_LOOPBACK_FAILED;
9684
9685         bnx2x_netif_stop(bp, 1);
9686         bnx2x_acquire_phy_lock(bp);
9687
9688         res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9689         if (res) {
9690                 DP(NETIF_MSG_PROBE, "  PHY loopback failed  (res %d)\n", res);
9691                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
9692         }
9693
9694         res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9695         if (res) {
9696                 DP(NETIF_MSG_PROBE, "  MAC loopback failed  (res %d)\n", res);
9697                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
9698         }
9699
9700         bnx2x_release_phy_lock(bp);
9701         bnx2x_netif_start(bp);
9702
9703         return rc;
9704 }
9705
9706 #define CRC32_RESIDUAL                  0xdebb20e3
9707
9708 static int bnx2x_test_nvram(struct bnx2x *bp)
9709 {
9710         static const struct {
9711                 int offset;
9712                 int size;
9713         } nvram_tbl[] = {
9714                 {     0,  0x14 }, /* bootstrap */
9715                 {  0x14,  0xec }, /* dir */
9716                 { 0x100, 0x350 }, /* manuf_info */
9717                 { 0x450,  0xf0 }, /* feature_info */
9718                 { 0x640,  0x64 }, /* upgrade_key_info */
9719                 { 0x6a4,  0x64 },
9720                 { 0x708,  0x70 }, /* manuf_key_info */
9721                 { 0x778,  0x70 },
9722                 {     0,     0 }
9723         };
9724         __be32 buf[0x350 / 4];
9725         u8 *data = (u8 *)buf;
9726         int i, rc;
9727         u32 magic, csum;
9728
9729         rc = bnx2x_nvram_read(bp, 0, data, 4);
9730         if (rc) {
9731                 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
9732                 goto test_nvram_exit;
9733         }
9734
9735         magic = be32_to_cpu(buf[0]);
9736         if (magic != 0x669955aa) {
9737                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9738                 rc = -ENODEV;
9739                 goto test_nvram_exit;
9740         }
9741
9742         for (i = 0; nvram_tbl[i].size; i++) {
9743
9744                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9745                                       nvram_tbl[i].size);
9746                 if (rc) {
9747                         DP(NETIF_MSG_PROBE,
9748                            "nvram_tbl[%d] read data (rc %d)\n", i, rc);
9749                         goto test_nvram_exit;
9750                 }
9751
9752                 csum = ether_crc_le(nvram_tbl[i].size, data);
9753                 if (csum != CRC32_RESIDUAL) {
9754                         DP(NETIF_MSG_PROBE,
9755                            "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9756                         rc = -ENODEV;
9757                         goto test_nvram_exit;
9758                 }
9759         }
9760
9761 test_nvram_exit:
9762         return rc;
9763 }
9764
9765 static int bnx2x_test_intr(struct bnx2x *bp)
9766 {
9767         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9768         int i, rc;
9769
9770         if (!netif_running(bp->dev))
9771                 return -ENODEV;
9772
9773         config->hdr.length = 0;
9774         if (CHIP_IS_E1(bp))
9775                 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9776         else
9777                 config->hdr.offset = BP_FUNC(bp);
9778         config->hdr.client_id = bp->fp->cl_id;
9779         config->hdr.reserved1 = 0;
9780
9781         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9782                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9783                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9784         if (rc == 0) {
9785                 bp->set_mac_pending++;
9786                 for (i = 0; i < 10; i++) {
9787                         if (!bp->set_mac_pending)
9788                                 break;
9789                         msleep_interruptible(10);
9790                 }
9791                 if (i == 10)
9792                         rc = -ENODEV;
9793         }
9794
9795         return rc;
9796 }
9797
9798 static void bnx2x_self_test(struct net_device *dev,
9799                             struct ethtool_test *etest, u64 *buf)
9800 {
9801         struct bnx2x *bp = netdev_priv(dev);
9802
9803         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9804
9805         if (!netif_running(dev))
9806                 return;
9807
9808         /* offline tests are not supported in MF mode */
9809         if (IS_E1HMF(bp))
9810                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9811
9812         if (etest->flags & ETH_TEST_FL_OFFLINE) {
9813                 int port = BP_PORT(bp);
9814                 u32 val;
9815                 u8 link_up;
9816
9817                 /* save current value of input enable for TX port IF */
9818                 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
9819                 /* disable input for TX port IF */
9820                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
9821
9822                 link_up = bp->link_vars.link_up;
9823                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9824                 bnx2x_nic_load(bp, LOAD_DIAG);
9825                 /* wait until link state is restored */
9826                 bnx2x_wait_for_link(bp, link_up);
9827
9828                 if (bnx2x_test_registers(bp) != 0) {
9829                         buf[0] = 1;
9830                         etest->flags |= ETH_TEST_FL_FAILED;
9831                 }
9832                 if (bnx2x_test_memory(bp) != 0) {
9833                         buf[1] = 1;
9834                         etest->flags |= ETH_TEST_FL_FAILED;
9835                 }
9836                 buf[2] = bnx2x_test_loopback(bp, link_up);
9837                 if (buf[2] != 0)
9838                         etest->flags |= ETH_TEST_FL_FAILED;
9839
9840                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9841
9842                 /* restore input for TX port IF */
9843                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
9844
9845                 bnx2x_nic_load(bp, LOAD_NORMAL);
9846                 /* wait until link state is restored */
9847                 bnx2x_wait_for_link(bp, link_up);
9848         }
9849         if (bnx2x_test_nvram(bp) != 0) {
9850                 buf[3] = 1;
9851                 etest->flags |= ETH_TEST_FL_FAILED;
9852         }
9853         if (bnx2x_test_intr(bp) != 0) {
9854                 buf[4] = 1;
9855                 etest->flags |= ETH_TEST_FL_FAILED;
9856         }
9857         if (bp->port.pmf)
9858                 if (bnx2x_link_test(bp) != 0) {
9859                         buf[5] = 1;
9860                         etest->flags |= ETH_TEST_FL_FAILED;
9861                 }
9862
9863 #ifdef BNX2X_EXTRA_DEBUG
9864         bnx2x_panic_dump(bp);
9865 #endif
9866 }
9867
9868 static const struct {
9869         long offset;
9870         int size;
9871         u8 string[ETH_GSTRING_LEN];
9872 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9873 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9874         { Q_STATS_OFFSET32(error_bytes_received_hi),
9875                                                 8, "[%d]: rx_error_bytes" },
9876         { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9877                                                 8, "[%d]: rx_ucast_packets" },
9878         { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9879                                                 8, "[%d]: rx_mcast_packets" },
9880         { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9881                                                 8, "[%d]: rx_bcast_packets" },
9882         { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9883         { Q_STATS_OFFSET32(rx_err_discard_pkt),
9884                                          4, "[%d]: rx_phy_ip_err_discards"},
9885         { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9886                                          4, "[%d]: rx_skb_alloc_discard" },
9887         { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9888
9889 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9890         { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9891                                                         8, "[%d]: tx_packets" }
9892 };
9893
9894 static const struct {
9895         long offset;
9896         int size;
9897         u32 flags;
9898 #define STATS_FLAGS_PORT                1
9899 #define STATS_FLAGS_FUNC                2
9900 #define STATS_FLAGS_BOTH                (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
9901         u8 string[ETH_GSTRING_LEN];
9902 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9903 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9904                                 8, STATS_FLAGS_BOTH, "rx_bytes" },
9905         { STATS_OFFSET32(error_bytes_received_hi),
9906                                 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
9907         { STATS_OFFSET32(total_unicast_packets_received_hi),
9908                                 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
9909         { STATS_OFFSET32(total_multicast_packets_received_hi),
9910                                 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
9911         { STATS_OFFSET32(total_broadcast_packets_received_hi),
9912                                 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
9913         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9914                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9915         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9916                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
9917         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9918                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9919         { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9920                                 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9921 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9922                                 8, STATS_FLAGS_PORT, "rx_fragments" },
9923         { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9924                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
9925         { STATS_OFFSET32(no_buff_discard_hi),
9926                                 8, STATS_FLAGS_BOTH, "rx_discards" },
9927         { STATS_OFFSET32(mac_filter_discard),
9928                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9929         { STATS_OFFSET32(xxoverflow_discard),
9930                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9931         { STATS_OFFSET32(brb_drop_hi),
9932                                 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9933         { STATS_OFFSET32(brb_truncate_hi),
9934                                 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9935         { STATS_OFFSET32(pause_frames_received_hi),
9936                                 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9937         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9938                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9939         { STATS_OFFSET32(nig_timer_max),
9940                         4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9941 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9942                                 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9943         { STATS_OFFSET32(rx_skb_alloc_failed),
9944                                 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9945         { STATS_OFFSET32(hw_csum_err),
9946                                 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9947
9948         { STATS_OFFSET32(total_bytes_transmitted_hi),
9949                                 8, STATS_FLAGS_BOTH, "tx_bytes" },
9950         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9951                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9952         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9953                                 8, STATS_FLAGS_BOTH, "tx_packets" },
9954         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9955                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9956         { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9957                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9958         { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9959                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9960         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9961                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9962 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9963                                 8, STATS_FLAGS_PORT, "tx_deferred" },
9964         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9965                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9966         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9967                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9968         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9969                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9970         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9971                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9972         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9973                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9974         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9975                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9976         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9977                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9978         { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9979                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9980         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9981                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9982 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
9983                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9984         { STATS_OFFSET32(pause_frames_sent_hi),
9985                                 8, STATS_FLAGS_PORT, "tx_pause_frames" }
9986 };
9987
9988 #define IS_PORT_STAT(i) \
9989         ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9990 #define IS_FUNC_STAT(i)         (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9991 #define IS_E1HMF_MODE_STAT(bp) \
9992                         (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
9993
9994 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9995 {
9996         struct bnx2x *bp = netdev_priv(dev);
9997         int i, j, k;
9998
9999         switch (stringset) {
10000         case ETH_SS_STATS:
10001                 if (is_multi(bp)) {
10002                         k = 0;
10003                         for_each_queue(bp, i) {
10004                                 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10005                                         sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10006                                                 bnx2x_q_stats_arr[j].string, i);
10007                                 k += BNX2X_NUM_Q_STATS;
10008                         }
10009                         if (IS_E1HMF_MODE_STAT(bp))
10010                                 break;
10011                         for (j = 0; j < BNX2X_NUM_STATS; j++)
10012                                 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10013                                        bnx2x_stats_arr[j].string);
10014                 } else {
10015                         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10016                                 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10017                                         continue;
10018                                 strcpy(buf + j*ETH_GSTRING_LEN,
10019                                        bnx2x_stats_arr[i].string);
10020                                 j++;
10021                         }
10022                 }
10023                 break;
10024
10025         case ETH_SS_TEST:
10026                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10027                 break;
10028         }
10029 }
10030
10031 static int bnx2x_get_stats_count(struct net_device *dev)
10032 {
10033         struct bnx2x *bp = netdev_priv(dev);
10034         int i, num_stats;
10035
10036         if (is_multi(bp)) {
10037                 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
10038                 if (!IS_E1HMF_MODE_STAT(bp))
10039                         num_stats += BNX2X_NUM_STATS;
10040         } else {
10041                 if (IS_E1HMF_MODE_STAT(bp)) {
10042                         num_stats = 0;
10043                         for (i = 0; i < BNX2X_NUM_STATS; i++)
10044                                 if (IS_FUNC_STAT(i))
10045                                         num_stats++;
10046                 } else
10047                         num_stats = BNX2X_NUM_STATS;
10048         }
10049
10050         return num_stats;
10051 }
10052
10053 static void bnx2x_get_ethtool_stats(struct net_device *dev,
10054                                     struct ethtool_stats *stats, u64 *buf)
10055 {
10056         struct bnx2x *bp = netdev_priv(dev);
10057         u32 *hw_stats, *offset;
10058         int i, j, k;
10059
10060         if (is_multi(bp)) {
10061                 k = 0;
10062                 for_each_queue(bp, i) {
10063                         hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10064                         for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10065                                 if (bnx2x_q_stats_arr[j].size == 0) {
10066                                         /* skip this counter */
10067                                         buf[k + j] = 0;
10068                                         continue;
10069                                 }
10070                                 offset = (hw_stats +
10071                                           bnx2x_q_stats_arr[j].offset);
10072                                 if (bnx2x_q_stats_arr[j].size == 4) {
10073                                         /* 4-byte counter */
10074                                         buf[k + j] = (u64) *offset;
10075                                         continue;
10076                                 }
10077                                 /* 8-byte counter */
10078                                 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10079                         }
10080                         k += BNX2X_NUM_Q_STATS;
10081                 }
10082                 if (IS_E1HMF_MODE_STAT(bp))
10083                         return;
10084                 hw_stats = (u32 *)&bp->eth_stats;
10085                 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10086                         if (bnx2x_stats_arr[j].size == 0) {
10087                                 /* skip this counter */
10088                                 buf[k + j] = 0;
10089                                 continue;
10090                         }
10091                         offset = (hw_stats + bnx2x_stats_arr[j].offset);
10092                         if (bnx2x_stats_arr[j].size == 4) {
10093                                 /* 4-byte counter */
10094                                 buf[k + j] = (u64) *offset;
10095                                 continue;
10096                         }
10097                         /* 8-byte counter */
10098                         buf[k + j] = HILO_U64(*offset, *(offset + 1));
10099                 }
10100         } else {
10101                 hw_stats = (u32 *)&bp->eth_stats;
10102                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10103                         if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10104                                 continue;
10105                         if (bnx2x_stats_arr[i].size == 0) {
10106                                 /* skip this counter */
10107                                 buf[j] = 0;
10108                                 j++;
10109                                 continue;
10110                         }
10111                         offset = (hw_stats + bnx2x_stats_arr[i].offset);
10112                         if (bnx2x_stats_arr[i].size == 4) {
10113                                 /* 4-byte counter */
10114                                 buf[j] = (u64) *offset;
10115                                 j++;
10116                                 continue;
10117                         }
10118                         /* 8-byte counter */
10119                         buf[j] = HILO_U64(*offset, *(offset + 1));
10120                         j++;
10121                 }
10122         }
10123 }
10124
10125 static int bnx2x_phys_id(struct net_device *dev, u32 data)
10126 {
10127         struct bnx2x *bp = netdev_priv(dev);
10128         int port = BP_PORT(bp);
10129         int i;
10130
10131         if (!netif_running(dev))
10132                 return 0;
10133
10134         if (!bp->port.pmf)
10135                 return 0;
10136
10137         if (data == 0)
10138                 data = 2;
10139
10140         for (i = 0; i < (data * 2); i++) {
10141                 if ((i % 2) == 0)
10142                         bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
10143                                       bp->link_params.hw_led_mode,
10144                                       bp->link_params.chip_id);
10145                 else
10146                         bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
10147                                       bp->link_params.hw_led_mode,
10148                                       bp->link_params.chip_id);
10149
10150                 msleep_interruptible(500);
10151                 if (signal_pending(current))
10152                         break;
10153         }
10154
10155         if (bp->link_vars.link_up)
10156                 bnx2x_set_led(bp, port, LED_MODE_OPER,
10157                               bp->link_vars.line_speed,
10158                               bp->link_params.hw_led_mode,
10159                               bp->link_params.chip_id);
10160
10161         return 0;
10162 }
10163
10164 static struct ethtool_ops bnx2x_ethtool_ops = {
10165         .get_settings           = bnx2x_get_settings,
10166         .set_settings           = bnx2x_set_settings,
10167         .get_drvinfo            = bnx2x_get_drvinfo,
10168         .get_regs_len           = bnx2x_get_regs_len,
10169         .get_regs               = bnx2x_get_regs,
10170         .get_wol                = bnx2x_get_wol,
10171         .set_wol                = bnx2x_set_wol,
10172         .get_msglevel           = bnx2x_get_msglevel,
10173         .set_msglevel           = bnx2x_set_msglevel,
10174         .nway_reset             = bnx2x_nway_reset,
10175         .get_link               = bnx2x_get_link,
10176         .get_eeprom_len         = bnx2x_get_eeprom_len,
10177         .get_eeprom             = bnx2x_get_eeprom,
10178         .set_eeprom             = bnx2x_set_eeprom,
10179         .get_coalesce           = bnx2x_get_coalesce,
10180         .set_coalesce           = bnx2x_set_coalesce,
10181         .get_ringparam          = bnx2x_get_ringparam,
10182         .set_ringparam          = bnx2x_set_ringparam,
10183         .get_pauseparam         = bnx2x_get_pauseparam,
10184         .set_pauseparam         = bnx2x_set_pauseparam,
10185         .get_rx_csum            = bnx2x_get_rx_csum,
10186         .set_rx_csum            = bnx2x_set_rx_csum,
10187         .get_tx_csum            = ethtool_op_get_tx_csum,
10188         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
10189         .set_flags              = bnx2x_set_flags,
10190         .get_flags              = ethtool_op_get_flags,
10191         .get_sg                 = ethtool_op_get_sg,
10192         .set_sg                 = ethtool_op_set_sg,
10193         .get_tso                = ethtool_op_get_tso,
10194         .set_tso                = bnx2x_set_tso,
10195         .self_test_count        = bnx2x_self_test_count,
10196         .self_test              = bnx2x_self_test,
10197         .get_strings            = bnx2x_get_strings,
10198         .phys_id                = bnx2x_phys_id,
10199         .get_stats_count        = bnx2x_get_stats_count,
10200         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
10201 };
10202
10203 /* end of ethtool_ops */
10204
10205 /****************************************************************************
10206 * General service functions
10207 ****************************************************************************/
10208
10209 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10210 {
10211         u16 pmcsr;
10212
10213         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10214
10215         switch (state) {
10216         case PCI_D0:
10217                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10218                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10219                                        PCI_PM_CTRL_PME_STATUS));
10220
10221                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10222                         /* delay required during transition out of D3hot */
10223                         msleep(20);
10224                 break;
10225
10226         case PCI_D3hot:
10227                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10228                 pmcsr |= 3;
10229
10230                 if (bp->wol)
10231                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10232
10233                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10234                                       pmcsr);
10235
10236                 /* No more memory access after this point until
10237                 * device is brought back to D0.
10238                 */
10239                 break;
10240
10241         default:
10242                 return -EINVAL;
10243         }
10244         return 0;
10245 }
10246
10247 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10248 {
10249         u16 rx_cons_sb;
10250
10251         /* Tell compiler that status block fields can change */
10252         barrier();
10253         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10254         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10255                 rx_cons_sb++;
10256         return (fp->rx_comp_cons != rx_cons_sb);
10257 }
10258
10259 /*
10260  * net_device service functions
10261  */
10262
10263 static int bnx2x_poll(struct napi_struct *napi, int budget)
10264 {
10265         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10266                                                  napi);
10267         struct bnx2x *bp = fp->bp;
10268         int work_done = 0;
10269
10270 #ifdef BNX2X_STOP_ON_ERROR
10271         if (unlikely(bp->panic))
10272                 goto poll_panic;
10273 #endif
10274
10275         prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
10276         prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10277         prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10278
10279         bnx2x_update_fpsb_idx(fp);
10280
10281         if (bnx2x_has_tx_work(fp))
10282                 bnx2x_tx_int(fp);
10283
10284         if (bnx2x_has_rx_work(fp)) {
10285                 work_done = bnx2x_rx_int(fp, budget);
10286
10287                 /* must not complete if we consumed full budget */
10288                 if (work_done >= budget)
10289                         goto poll_again;
10290         }
10291
10292         /* BNX2X_HAS_WORK() reads the status block, thus we need to
10293          * ensure that status block indices have been actually read
10294          * (bnx2x_update_fpsb_idx) prior to this check (BNX2X_HAS_WORK)
10295          * so that we won't write the "newer" value of the status block to IGU
10296          * (if there was a DMA right after BNX2X_HAS_WORK and
10297          * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10298          * may be postponed to right before bnx2x_ack_sb). In this case
10299          * there will never be another interrupt until there is another update
10300          * of the status block, while there is still unhandled work.
10301          */
10302         rmb();
10303
10304         if (!BNX2X_HAS_WORK(fp)) {
10305 #ifdef BNX2X_STOP_ON_ERROR
10306 poll_panic:
10307 #endif
10308                 napi_complete(napi);
10309
10310                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10311                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
10312                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10313                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10314         }
10315
10316 poll_again:
10317         return work_done;
10318 }
10319
10320
10321 /* we split the first BD into headers and data BDs
10322  * to ease the pain of our fellow microcode engineers
10323  * we use one mapping for both BDs
10324  * So far this has only been observed to happen
10325  * in Other Operating Systems(TM)
10326  */
10327 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10328                                    struct bnx2x_fastpath *fp,
10329                                    struct eth_tx_bd **tx_bd, u16 hlen,
10330                                    u16 bd_prod, int nbd)
10331 {
10332         struct eth_tx_bd *h_tx_bd = *tx_bd;
10333         struct eth_tx_bd *d_tx_bd;
10334         dma_addr_t mapping;
10335         int old_len = le16_to_cpu(h_tx_bd->nbytes);
10336
10337         /* first fix first BD */
10338         h_tx_bd->nbd = cpu_to_le16(nbd);
10339         h_tx_bd->nbytes = cpu_to_le16(hlen);
10340
10341         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10342            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10343            h_tx_bd->addr_lo, h_tx_bd->nbd);
10344
10345         /* now get a new data BD
10346          * (after the pbd) and fill it */
10347         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10348         d_tx_bd = &fp->tx_desc_ring[bd_prod];
10349
10350         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10351                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10352
10353         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10354         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10355         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10356         d_tx_bd->vlan = 0;
10357         /* this marks the BD as one that has no individual mapping
10358          * the FW ignores this flag in a BD not marked start
10359          */
10360         d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10361         DP(NETIF_MSG_TX_QUEUED,
10362            "TSO split data size is %d (%x:%x)\n",
10363            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10364
10365         /* update tx_bd for marking the last BD flag */
10366         *tx_bd = d_tx_bd;
10367
10368         return bd_prod;
10369 }
10370
10371 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10372 {
10373         if (fix > 0)
10374                 csum = (u16) ~csum_fold(csum_sub(csum,
10375                                 csum_partial(t_header - fix, fix, 0)));
10376
10377         else if (fix < 0)
10378                 csum = (u16) ~csum_fold(csum_add(csum,
10379                                 csum_partial(t_header, -fix, 0)));
10380
10381         return swab16(csum);
10382 }
10383
10384 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10385 {
10386         u32 rc;
10387
10388         if (skb->ip_summed != CHECKSUM_PARTIAL)
10389                 rc = XMIT_PLAIN;
10390
10391         else {
10392                 if (skb->protocol == htons(ETH_P_IPV6)) {
10393                         rc = XMIT_CSUM_V6;
10394                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10395                                 rc |= XMIT_CSUM_TCP;
10396
10397                 } else {
10398                         rc = XMIT_CSUM_V4;
10399                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10400                                 rc |= XMIT_CSUM_TCP;
10401                 }
10402         }
10403
10404         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10405                 rc |= XMIT_GSO_V4;
10406
10407         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10408                 rc |= XMIT_GSO_V6;
10409
10410         return rc;
10411 }
10412
10413 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10414 /* check if packet requires linearization (packet is too fragmented)
10415    no need to check fragmentation if page size > 8K (there will be no
10416    violation to FW restrictions) */
10417 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10418                              u32 xmit_type)
10419 {
10420         int to_copy = 0;
10421         int hlen = 0;
10422         int first_bd_sz = 0;
10423
10424         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10425         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10426
10427                 if (xmit_type & XMIT_GSO) {
10428                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10429                         /* Check if LSO packet needs to be copied:
10430                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10431                         int wnd_size = MAX_FETCH_BD - 3;
10432                         /* Number of windows to check */
10433                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10434                         int wnd_idx = 0;
10435                         int frag_idx = 0;
10436                         u32 wnd_sum = 0;
10437
10438                         /* Headers length */
10439                         hlen = (int)(skb_transport_header(skb) - skb->data) +
10440                                 tcp_hdrlen(skb);
10441
10442                         /* Amount of data (w/o headers) on linear part of SKB*/
10443                         first_bd_sz = skb_headlen(skb) - hlen;
10444
10445                         wnd_sum  = first_bd_sz;
10446
10447                         /* Calculate the first sum - it's special */
10448                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10449                                 wnd_sum +=
10450                                         skb_shinfo(skb)->frags[frag_idx].size;
10451
10452                         /* If there was data on linear skb data - check it */
10453                         if (first_bd_sz > 0) {
10454                                 if (unlikely(wnd_sum < lso_mss)) {
10455                                         to_copy = 1;
10456                                         goto exit_lbl;
10457                                 }
10458
10459                                 wnd_sum -= first_bd_sz;
10460                         }
10461
10462                         /* Others are easier: run through the frag list and
10463                            check all windows */
10464                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10465                                 wnd_sum +=
10466                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10467
10468                                 if (unlikely(wnd_sum < lso_mss)) {
10469                                         to_copy = 1;
10470                                         break;
10471                                 }
10472                                 wnd_sum -=
10473                                         skb_shinfo(skb)->frags[wnd_idx].size;
10474                         }
10475                 } else {
10476                         /* in non-LSO too fragmented packet should always
10477                            be linearized */
10478                         to_copy = 1;
10479                 }
10480         }
10481
10482 exit_lbl:
10483         if (unlikely(to_copy))
10484                 DP(NETIF_MSG_TX_QUEUED,
10485                    "Linearization IS REQUIRED for %s packet. "
10486                    "num_frags %d  hlen %d  first_bd_sz %d\n",
10487                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10488                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10489
10490         return to_copy;
10491 }
10492 #endif
10493
10494 /* called with netif_tx_lock
10495  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
10496  * netif_wake_queue()
10497  */
10498 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10499 {
10500         struct bnx2x *bp = netdev_priv(dev);
10501         struct bnx2x_fastpath *fp;
10502         struct netdev_queue *txq;
10503         struct sw_tx_bd *tx_buf;
10504         struct eth_tx_bd *tx_bd;
10505         struct eth_tx_parse_bd *pbd = NULL;
10506         u16 pkt_prod, bd_prod;
10507         int nbd, fp_index;
10508         dma_addr_t mapping;
10509         u32 xmit_type = bnx2x_xmit_type(bp, skb);
10510         int vlan_off = (bp->e1hov ? 4 : 0);
10511         int i;
10512         u8 hlen = 0;
10513
10514 #ifdef BNX2X_STOP_ON_ERROR
10515         if (unlikely(bp->panic))
10516                 return NETDEV_TX_BUSY;
10517 #endif
10518
10519         fp_index = skb_get_queue_mapping(skb);
10520         txq = netdev_get_tx_queue(dev, fp_index);
10521
10522         fp = &bp->fp[fp_index];
10523
10524         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
10525                 fp->eth_q_stats.driver_xoff++,
10526                 netif_tx_stop_queue(txq);
10527                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10528                 return NETDEV_TX_BUSY;
10529         }
10530
10531         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
10532            "  gso type %x  xmit_type %x\n",
10533            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10534            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10535
10536 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10537         /* First, check if we need to linearize the skb (due to FW
10538            restrictions). No need to check fragmentation if page size > 8K
10539            (there will be no violation to FW restrictions) */
10540         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10541                 /* Statistics of linearization */
10542                 bp->lin_cnt++;
10543                 if (skb_linearize(skb) != 0) {
10544                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10545                            "silently dropping this SKB\n");
10546                         dev_kfree_skb_any(skb);
10547                         return NETDEV_TX_OK;
10548                 }
10549         }
10550 #endif
10551
10552         /*
10553         Please read carefully. First we use one BD which we mark as start,
10554         then for TSO or xsum we have a parsing info BD,
10555         and only then we have the rest of the TSO BDs.
10556         (don't forget to mark the last one as last,
10557         and to unmap only AFTER you write to the BD ...)
10558         And above all, all pdb sizes are in words - NOT DWORDS!
10559         */
10560
10561         pkt_prod = fp->tx_pkt_prod++;
10562         bd_prod = TX_BD(fp->tx_bd_prod);
10563
10564         /* get a tx_buf and first BD */
10565         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10566         tx_bd = &fp->tx_desc_ring[bd_prod];
10567
10568         tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10569         tx_bd->general_data = (UNICAST_ADDRESS <<
10570                                ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
10571         /* header nbd */
10572         tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
10573
10574         /* remember the first BD of the packet */
10575         tx_buf->first_bd = fp->tx_bd_prod;
10576         tx_buf->skb = skb;
10577
10578         DP(NETIF_MSG_TX_QUEUED,
10579            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
10580            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10581
10582 #ifdef BCM_VLAN
10583         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10584             (bp->flags & HW_VLAN_TX_FLAG)) {
10585                 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10586                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10587                 vlan_off += 4;
10588         } else
10589 #endif
10590                 tx_bd->vlan = cpu_to_le16(pkt_prod);
10591
10592         if (xmit_type) {
10593                 /* turn on parsing and get a BD */
10594                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10595                 pbd = (void *)&fp->tx_desc_ring[bd_prod];
10596
10597                 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10598         }
10599
10600         if (xmit_type & XMIT_CSUM) {
10601                 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
10602
10603                 /* for now NS flag is not used in Linux */
10604                 pbd->global_data =
10605                         (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10606                                  ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
10607
10608                 pbd->ip_hlen = (skb_transport_header(skb) -
10609                                 skb_network_header(skb)) / 2;
10610
10611                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
10612
10613                 pbd->total_hlen = cpu_to_le16(hlen);
10614                 hlen = hlen*2 - vlan_off;
10615
10616                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10617
10618                 if (xmit_type & XMIT_CSUM_V4)
10619                         tx_bd->bd_flags.as_bitfield |=
10620                                                 ETH_TX_BD_FLAGS_IP_CSUM;
10621                 else
10622                         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10623
10624                 if (xmit_type & XMIT_CSUM_TCP) {
10625                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10626
10627                 } else {
10628                         s8 fix = SKB_CS_OFF(skb); /* signed! */
10629
10630                         pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
10631                         pbd->cs_offset = fix / 2;
10632
10633                         DP(NETIF_MSG_TX_QUEUED,
10634                            "hlen %d  offset %d  fix %d  csum before fix %x\n",
10635                            le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10636                            SKB_CS(skb));
10637
10638                         /* HW bug: fixup the CSUM */
10639                         pbd->tcp_pseudo_csum =
10640                                 bnx2x_csum_fix(skb_transport_header(skb),
10641                                                SKB_CS(skb), fix);
10642
10643                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10644                            pbd->tcp_pseudo_csum);
10645                 }
10646         }
10647
10648         mapping = pci_map_single(bp->pdev, skb->data,
10649                                  skb_headlen(skb), PCI_DMA_TODEVICE);
10650
10651         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10652         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10653         nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
10654         tx_bd->nbd = cpu_to_le16(nbd);
10655         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10656
10657         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
10658            "  nbytes %d  flags %x  vlan %x\n",
10659            tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10660            le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10661            le16_to_cpu(tx_bd->vlan));
10662
10663         if (xmit_type & XMIT_GSO) {
10664
10665                 DP(NETIF_MSG_TX_QUEUED,
10666                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
10667                    skb->len, hlen, skb_headlen(skb),
10668                    skb_shinfo(skb)->gso_size);
10669
10670                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10671
10672                 if (unlikely(skb_headlen(skb) > hlen))
10673                         bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10674                                                  bd_prod, ++nbd);
10675
10676                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10677                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
10678                 pbd->tcp_flags = pbd_tcp_flags(skb);
10679
10680                 if (xmit_type & XMIT_GSO_V4) {
10681                         pbd->ip_id = swab16(ip_hdr(skb)->id);
10682                         pbd->tcp_pseudo_csum =
10683                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10684                                                           ip_hdr(skb)->daddr,
10685                                                           0, IPPROTO_TCP, 0));
10686
10687                 } else
10688                         pbd->tcp_pseudo_csum =
10689                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10690                                                         &ipv6_hdr(skb)->daddr,
10691                                                         0, IPPROTO_TCP, 0));
10692
10693                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10694         }
10695
10696         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10697                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
10698
10699                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10700                 tx_bd = &fp->tx_desc_ring[bd_prod];
10701
10702                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10703                                        frag->size, PCI_DMA_TODEVICE);
10704
10705                 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10706                 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10707                 tx_bd->nbytes = cpu_to_le16(frag->size);
10708                 tx_bd->vlan = cpu_to_le16(pkt_prod);
10709                 tx_bd->bd_flags.as_bitfield = 0;
10710
10711                 DP(NETIF_MSG_TX_QUEUED,
10712                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d  flags %x\n",
10713                    i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10714                    le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
10715         }
10716
10717         /* now at last mark the BD as the last BD */
10718         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10719
10720         DP(NETIF_MSG_TX_QUEUED, "last bd @%p  flags %x\n",
10721            tx_bd, tx_bd->bd_flags.as_bitfield);
10722
10723         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10724
10725         /* now send a tx doorbell, counting the next BD
10726          * if the packet contains or ends with it
10727          */
10728         if (TX_BD_POFF(bd_prod) < nbd)
10729                 nbd++;
10730
10731         if (pbd)
10732                 DP(NETIF_MSG_TX_QUEUED,
10733                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
10734                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
10735                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10736                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
10737                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
10738
10739         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
10740
10741         /*
10742          * Make sure that the BD data is updated before updating the producer
10743          * since FW might read the BD right after the producer is updated.
10744          * This is only applicable for weak-ordered memory model archs such
10745          * as IA-64. The following barrier is also mandatory since FW will
10746          * assumes packets must have BDs.
10747          */
10748         wmb();
10749
10750         le16_add_cpu(&fp->hw_tx_prods->bds_prod, nbd);
10751         mb(); /* FW restriction: must not reorder writing nbd and packets */
10752         le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
10753         DOORBELL(bp, fp->index, 0);
10754
10755         mmiowb();
10756
10757         fp->tx_bd_prod += nbd;
10758
10759         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
10760                 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10761                    if we put Tx into XOFF state. */
10762                 smp_mb();
10763                 netif_tx_stop_queue(txq);
10764                 fp->eth_q_stats.driver_xoff++;
10765                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
10766                         netif_tx_wake_queue(txq);
10767         }
10768         fp->tx_pkt++;
10769
10770         return NETDEV_TX_OK;
10771 }
10772
10773 /* called with rtnl_lock */
10774 static int bnx2x_open(struct net_device *dev)
10775 {
10776         struct bnx2x *bp = netdev_priv(dev);
10777
10778         netif_carrier_off(dev);
10779
10780         bnx2x_set_power_state(bp, PCI_D0);
10781
10782         return bnx2x_nic_load(bp, LOAD_OPEN);
10783 }
10784
10785 /* called with rtnl_lock */
10786 static int bnx2x_close(struct net_device *dev)
10787 {
10788         struct bnx2x *bp = netdev_priv(dev);
10789
10790         /* Unload the driver, release IRQs */
10791         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10792         if (atomic_read(&bp->pdev->enable_cnt) == 1)
10793                 if (!CHIP_REV_IS_SLOW(bp))
10794                         bnx2x_set_power_state(bp, PCI_D3hot);
10795
10796         return 0;
10797 }
10798
10799 /* called with netif_tx_lock from dev_mcast.c */
10800 static void bnx2x_set_rx_mode(struct net_device *dev)
10801 {
10802         struct bnx2x *bp = netdev_priv(dev);
10803         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10804         int port = BP_PORT(bp);
10805
10806         if (bp->state != BNX2X_STATE_OPEN) {
10807                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10808                 return;
10809         }
10810
10811         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10812
10813         if (dev->flags & IFF_PROMISC)
10814                 rx_mode = BNX2X_RX_MODE_PROMISC;
10815
10816         else if ((dev->flags & IFF_ALLMULTI) ||
10817                  ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10818                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10819
10820         else { /* some multicasts */
10821                 if (CHIP_IS_E1(bp)) {
10822                         int i, old, offset;
10823                         struct dev_mc_list *mclist;
10824                         struct mac_configuration_cmd *config =
10825                                                 bnx2x_sp(bp, mcast_config);
10826
10827                         for (i = 0, mclist = dev->mc_list;
10828                              mclist && (i < dev->mc_count);
10829                              i++, mclist = mclist->next) {
10830
10831                                 config->config_table[i].
10832                                         cam_entry.msb_mac_addr =
10833                                         swab16(*(u16 *)&mclist->dmi_addr[0]);
10834                                 config->config_table[i].
10835                                         cam_entry.middle_mac_addr =
10836                                         swab16(*(u16 *)&mclist->dmi_addr[2]);
10837                                 config->config_table[i].
10838                                         cam_entry.lsb_mac_addr =
10839                                         swab16(*(u16 *)&mclist->dmi_addr[4]);
10840                                 config->config_table[i].cam_entry.flags =
10841                                                         cpu_to_le16(port);
10842                                 config->config_table[i].
10843                                         target_table_entry.flags = 0;
10844                                 config->config_table[i].
10845                                         target_table_entry.client_id = 0;
10846                                 config->config_table[i].
10847                                         target_table_entry.vlan_id = 0;
10848
10849                                 DP(NETIF_MSG_IFUP,
10850                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10851                                    config->config_table[i].
10852                                                 cam_entry.msb_mac_addr,
10853                                    config->config_table[i].
10854                                                 cam_entry.middle_mac_addr,
10855                                    config->config_table[i].
10856                                                 cam_entry.lsb_mac_addr);
10857                         }
10858                         old = config->hdr.length;
10859                         if (old > i) {
10860                                 for (; i < old; i++) {
10861                                         if (CAM_IS_INVALID(config->
10862                                                            config_table[i])) {
10863                                                 /* already invalidated */
10864                                                 break;
10865                                         }
10866                                         /* invalidate */
10867                                         CAM_INVALIDATE(config->
10868                                                        config_table[i]);
10869                                 }
10870                         }
10871
10872                         if (CHIP_REV_IS_SLOW(bp))
10873                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10874                         else
10875                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
10876
10877                         config->hdr.length = i;
10878                         config->hdr.offset = offset;
10879                         config->hdr.client_id = bp->fp->cl_id;
10880                         config->hdr.reserved1 = 0;
10881
10882                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10883                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10884                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10885                                       0);
10886                 } else { /* E1H */
10887                         /* Accept one or more multicasts */
10888                         struct dev_mc_list *mclist;
10889                         u32 mc_filter[MC_HASH_SIZE];
10890                         u32 crc, bit, regidx;
10891                         int i;
10892
10893                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10894
10895                         for (i = 0, mclist = dev->mc_list;
10896                              mclist && (i < dev->mc_count);
10897                              i++, mclist = mclist->next) {
10898
10899                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10900                                    mclist->dmi_addr);
10901
10902                                 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10903                                 bit = (crc >> 24) & 0xff;
10904                                 regidx = bit >> 5;
10905                                 bit &= 0x1f;
10906                                 mc_filter[regidx] |= (1 << bit);
10907                         }
10908
10909                         for (i = 0; i < MC_HASH_SIZE; i++)
10910                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10911                                        mc_filter[i]);
10912                 }
10913         }
10914
10915         bp->rx_mode = rx_mode;
10916         bnx2x_set_storm_rx_mode(bp);
10917 }
10918
10919 /* called with rtnl_lock */
10920 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10921 {
10922         struct sockaddr *addr = p;
10923         struct bnx2x *bp = netdev_priv(dev);
10924
10925         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
10926                 return -EINVAL;
10927
10928         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
10929         if (netif_running(dev)) {
10930                 if (CHIP_IS_E1(bp))
10931                         bnx2x_set_mac_addr_e1(bp, 1);
10932                 else
10933                         bnx2x_set_mac_addr_e1h(bp, 1);
10934         }
10935
10936         return 0;
10937 }
10938
10939 /* called with rtnl_lock */
10940 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10941 {
10942         struct mii_ioctl_data *data = if_mii(ifr);
10943         struct bnx2x *bp = netdev_priv(dev);
10944         int port = BP_PORT(bp);
10945         int err;
10946
10947         switch (cmd) {
10948         case SIOCGMIIPHY:
10949                 data->phy_id = bp->port.phy_addr;
10950
10951                 /* fallthrough */
10952
10953         case SIOCGMIIREG: {
10954                 u16 mii_regval;
10955
10956                 if (!netif_running(dev))
10957                         return -EAGAIN;
10958
10959                 mutex_lock(&bp->port.phy_mutex);
10960                 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
10961                                       DEFAULT_PHY_DEV_ADDR,
10962                                       (data->reg_num & 0x1f), &mii_regval);
10963                 data->val_out = mii_regval;
10964                 mutex_unlock(&bp->port.phy_mutex);
10965                 return err;
10966         }
10967
10968         case SIOCSMIIREG:
10969                 if (!capable(CAP_NET_ADMIN))
10970                         return -EPERM;
10971
10972                 if (!netif_running(dev))
10973                         return -EAGAIN;
10974
10975                 mutex_lock(&bp->port.phy_mutex);
10976                 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
10977                                        DEFAULT_PHY_DEV_ADDR,
10978                                        (data->reg_num & 0x1f), data->val_in);
10979                 mutex_unlock(&bp->port.phy_mutex);
10980                 return err;
10981
10982         default:
10983                 /* do nothing */
10984                 break;
10985         }
10986
10987         return -EOPNOTSUPP;
10988 }
10989
10990 /* called with rtnl_lock */
10991 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10992 {
10993         struct bnx2x *bp = netdev_priv(dev);
10994         int rc = 0;
10995
10996         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10997             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10998                 return -EINVAL;
10999
11000         /* This does not race with packet allocation
11001          * because the actual alloc size is
11002          * only updated as part of load
11003          */
11004         dev->mtu = new_mtu;
11005
11006         if (netif_running(dev)) {
11007                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11008                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11009         }
11010
11011         return rc;
11012 }
11013
11014 static void bnx2x_tx_timeout(struct net_device *dev)
11015 {
11016         struct bnx2x *bp = netdev_priv(dev);
11017
11018 #ifdef BNX2X_STOP_ON_ERROR
11019         if (!bp->panic)
11020                 bnx2x_panic();
11021 #endif
11022         /* This allows the netif to be shutdown gracefully before resetting */
11023         schedule_work(&bp->reset_task);
11024 }
11025
11026 #ifdef BCM_VLAN
11027 /* called with rtnl_lock */
11028 static void bnx2x_vlan_rx_register(struct net_device *dev,
11029                                    struct vlan_group *vlgrp)
11030 {
11031         struct bnx2x *bp = netdev_priv(dev);
11032
11033         bp->vlgrp = vlgrp;
11034
11035         /* Set flags according to the required capabilities */
11036         bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11037
11038         if (dev->features & NETIF_F_HW_VLAN_TX)
11039                 bp->flags |= HW_VLAN_TX_FLAG;
11040
11041         if (dev->features & NETIF_F_HW_VLAN_RX)
11042                 bp->flags |= HW_VLAN_RX_FLAG;
11043
11044         if (netif_running(dev))
11045                 bnx2x_set_client_config(bp);
11046 }
11047
11048 #endif
11049
11050 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11051 static void poll_bnx2x(struct net_device *dev)
11052 {
11053         struct bnx2x *bp = netdev_priv(dev);
11054
11055         disable_irq(bp->pdev->irq);
11056         bnx2x_interrupt(bp->pdev->irq, dev);
11057         enable_irq(bp->pdev->irq);
11058 }
11059 #endif
11060
11061 static const struct net_device_ops bnx2x_netdev_ops = {
11062         .ndo_open               = bnx2x_open,
11063         .ndo_stop               = bnx2x_close,
11064         .ndo_start_xmit         = bnx2x_start_xmit,
11065         .ndo_set_multicast_list = bnx2x_set_rx_mode,
11066         .ndo_set_mac_address    = bnx2x_change_mac_addr,
11067         .ndo_validate_addr      = eth_validate_addr,
11068         .ndo_do_ioctl           = bnx2x_ioctl,
11069         .ndo_change_mtu         = bnx2x_change_mtu,
11070         .ndo_tx_timeout         = bnx2x_tx_timeout,
11071 #ifdef BCM_VLAN
11072         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
11073 #endif
11074 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11075         .ndo_poll_controller    = poll_bnx2x,
11076 #endif
11077 };
11078
11079 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11080                                     struct net_device *dev)
11081 {
11082         struct bnx2x *bp;
11083         int rc;
11084
11085         SET_NETDEV_DEV(dev, &pdev->dev);
11086         bp = netdev_priv(dev);
11087
11088         bp->dev = dev;
11089         bp->pdev = pdev;
11090         bp->flags = 0;
11091         bp->func = PCI_FUNC(pdev->devfn);
11092
11093         rc = pci_enable_device(pdev);
11094         if (rc) {
11095                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11096                 goto err_out;
11097         }
11098
11099         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11100                 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11101                        " aborting\n");
11102                 rc = -ENODEV;
11103                 goto err_out_disable;
11104         }
11105
11106         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11107                 printk(KERN_ERR PFX "Cannot find second PCI device"
11108                        " base address, aborting\n");
11109                 rc = -ENODEV;
11110                 goto err_out_disable;
11111         }
11112
11113         if (atomic_read(&pdev->enable_cnt) == 1) {
11114                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11115                 if (rc) {
11116                         printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11117                                " aborting\n");
11118                         goto err_out_disable;
11119                 }
11120
11121                 pci_set_master(pdev);
11122                 pci_save_state(pdev);
11123         }
11124
11125         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11126         if (bp->pm_cap == 0) {
11127                 printk(KERN_ERR PFX "Cannot find power management"
11128                        " capability, aborting\n");
11129                 rc = -EIO;
11130                 goto err_out_release;
11131         }
11132
11133         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11134         if (bp->pcie_cap == 0) {
11135                 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11136                        " aborting\n");
11137                 rc = -EIO;
11138                 goto err_out_release;
11139         }
11140
11141         if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
11142                 bp->flags |= USING_DAC_FLAG;
11143                 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
11144                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11145                                " failed, aborting\n");
11146                         rc = -EIO;
11147                         goto err_out_release;
11148                 }
11149
11150         } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
11151                 printk(KERN_ERR PFX "System does not support DMA,"
11152                        " aborting\n");
11153                 rc = -EIO;
11154                 goto err_out_release;
11155         }
11156
11157         dev->mem_start = pci_resource_start(pdev, 0);
11158         dev->base_addr = dev->mem_start;
11159         dev->mem_end = pci_resource_end(pdev, 0);
11160
11161         dev->irq = pdev->irq;
11162
11163         bp->regview = pci_ioremap_bar(pdev, 0);
11164         if (!bp->regview) {
11165                 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11166                 rc = -ENOMEM;
11167                 goto err_out_release;
11168         }
11169
11170         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11171                                         min_t(u64, BNX2X_DB_SIZE,
11172                                               pci_resource_len(pdev, 2)));
11173         if (!bp->doorbells) {
11174                 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11175                 rc = -ENOMEM;
11176                 goto err_out_unmap;
11177         }
11178
11179         bnx2x_set_power_state(bp, PCI_D0);
11180
11181         /* clean indirect addresses */
11182         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11183                                PCICFG_VENDOR_ID_OFFSET);
11184         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11185         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11186         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11187         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11188
11189         dev->watchdog_timeo = TX_TIMEOUT;
11190
11191         dev->netdev_ops = &bnx2x_netdev_ops;
11192         dev->ethtool_ops = &bnx2x_ethtool_ops;
11193         dev->features |= NETIF_F_SG;
11194         dev->features |= NETIF_F_HW_CSUM;
11195         if (bp->flags & USING_DAC_FLAG)
11196                 dev->features |= NETIF_F_HIGHDMA;
11197         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11198         dev->features |= NETIF_F_TSO6;
11199 #ifdef BCM_VLAN
11200         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
11201         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11202
11203         dev->vlan_features |= NETIF_F_SG;
11204         dev->vlan_features |= NETIF_F_HW_CSUM;
11205         if (bp->flags & USING_DAC_FLAG)
11206                 dev->vlan_features |= NETIF_F_HIGHDMA;
11207         dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11208         dev->vlan_features |= NETIF_F_TSO6;
11209 #endif
11210
11211         return 0;
11212
11213 err_out_unmap:
11214         if (bp->regview) {
11215                 iounmap(bp->regview);
11216                 bp->regview = NULL;
11217         }
11218         if (bp->doorbells) {
11219                 iounmap(bp->doorbells);
11220                 bp->doorbells = NULL;
11221         }
11222
11223 err_out_release:
11224         if (atomic_read(&pdev->enable_cnt) == 1)
11225                 pci_release_regions(pdev);
11226
11227 err_out_disable:
11228         pci_disable_device(pdev);
11229         pci_set_drvdata(pdev, NULL);
11230
11231 err_out:
11232         return rc;
11233 }
11234
11235 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
11236 {
11237         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11238
11239         val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11240         return val;
11241 }
11242
11243 /* return value of 1=2.5GHz 2=5GHz */
11244 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11245 {
11246         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11247
11248         val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11249         return val;
11250 }
11251 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11252 {
11253         struct bnx2x_fw_file_hdr *fw_hdr;
11254         struct bnx2x_fw_file_section *sections;
11255         u16 *ops_offsets;
11256         u32 offset, len, num_ops;
11257         int i;
11258         const struct firmware *firmware = bp->firmware;
11259         const u8 * fw_ver;
11260
11261         if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11262                 return -EINVAL;
11263
11264         fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11265         sections = (struct bnx2x_fw_file_section *)fw_hdr;
11266
11267         /* Make sure none of the offsets and sizes make us read beyond
11268          * the end of the firmware data */
11269         for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11270                 offset = be32_to_cpu(sections[i].offset);
11271                 len = be32_to_cpu(sections[i].len);
11272                 if (offset + len > firmware->size) {
11273                         printk(KERN_ERR PFX "Section %d length is out of bounds\n", i);
11274                         return -EINVAL;
11275                 }
11276         }
11277
11278         /* Likewise for the init_ops offsets */
11279         offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11280         ops_offsets = (u16 *)(firmware->data + offset);
11281         num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11282
11283         for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11284                 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11285                         printk(KERN_ERR PFX "Section offset %d is out of bounds\n", i);
11286                         return -EINVAL;
11287                 }
11288         }
11289
11290         /* Check FW version */
11291         offset = be32_to_cpu(fw_hdr->fw_version.offset);
11292         fw_ver = firmware->data + offset;
11293         if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11294             (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11295             (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11296             (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11297                 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11298                                     " Should be %d.%d.%d.%d\n",
11299                        fw_ver[0], fw_ver[1], fw_ver[2],
11300                        fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11301                        BCM_5710_FW_MINOR_VERSION,
11302                        BCM_5710_FW_REVISION_VERSION,
11303                        BCM_5710_FW_ENGINEERING_VERSION);
11304                 return -EINVAL;
11305         }
11306
11307         return 0;
11308 }
11309
11310 static void inline be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11311 {
11312         u32 i;
11313         const __be32 *source = (const __be32*)_source;
11314         u32 *target = (u32*)_target;
11315
11316         for (i = 0; i < n/4; i++)
11317                 target[i] = be32_to_cpu(source[i]);
11318 }
11319
11320 /*
11321    Ops array is stored in the following format:
11322    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11323  */
11324 static void inline bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11325 {
11326         u32 i, j, tmp;
11327         const __be32 *source = (const __be32*)_source;
11328         struct raw_op *target = (struct raw_op*)_target;
11329
11330         for (i = 0, j = 0; i < n/8; i++, j+=2) {
11331                 tmp = be32_to_cpu(source[j]);
11332                 target[i].op = (tmp >> 24) & 0xff;
11333                 target[i].offset =  tmp & 0xffffff;
11334                 target[i].raw_data = be32_to_cpu(source[j+1]);
11335         }
11336 }
11337 static void inline be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11338 {
11339         u32 i;
11340         u16 *target = (u16*)_target;
11341         const __be16 *source = (const __be16*)_source;
11342
11343         for (i = 0; i < n/2; i++)
11344                 target[i] = be16_to_cpu(source[i]);
11345 }
11346
11347 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11348         do {   \
11349                 u32 len = be32_to_cpu(fw_hdr->arr.len);   \
11350                 bp->arr = kmalloc(len, GFP_KERNEL);  \
11351                 if (!bp->arr) { \
11352                         printk(KERN_ERR PFX "Failed to allocate %d bytes for "#arr"\n", len); \
11353                         goto lbl; \
11354                 } \
11355                 func(bp->firmware->data + \
11356                         be32_to_cpu(fw_hdr->arr.offset), \
11357                         (u8*)bp->arr, len); \
11358         } while (0)
11359
11360
11361 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11362 {
11363         char fw_file_name[40] = {0};
11364         int rc, offset;
11365         struct bnx2x_fw_file_hdr *fw_hdr;
11366
11367         /* Create a FW file name */
11368         if (CHIP_IS_E1(bp))
11369                 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
11370         else
11371                 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
11372
11373         sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11374                 BCM_5710_FW_MAJOR_VERSION,
11375                 BCM_5710_FW_MINOR_VERSION,
11376                 BCM_5710_FW_REVISION_VERSION,
11377                 BCM_5710_FW_ENGINEERING_VERSION);
11378
11379         printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11380
11381         rc = request_firmware(&bp->firmware, fw_file_name, dev);
11382         if (rc) {
11383                 printk(KERN_ERR PFX "Can't load firmware file %s\n", fw_file_name);
11384                 goto request_firmware_exit;
11385         }
11386
11387         rc = bnx2x_check_firmware(bp);
11388         if (rc) {
11389                 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
11390                 goto request_firmware_exit;
11391         }
11392
11393         fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11394
11395         /* Initialize the pointers to the init arrays */
11396         /* Blob */
11397         BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11398
11399         /* Opcodes */
11400         BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11401
11402         /* Offsets */
11403         BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, be16_to_cpu_n);
11404
11405         /* STORMs firmware */
11406         bp->tsem_int_table_data = bp->firmware->data +
11407                 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11408         bp->tsem_pram_data      = bp->firmware->data +
11409                 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11410         bp->usem_int_table_data = bp->firmware->data +
11411                 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11412         bp->usem_pram_data      = bp->firmware->data +
11413                 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11414         bp->xsem_int_table_data = bp->firmware->data +
11415                 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11416         bp->xsem_pram_data      = bp->firmware->data +
11417                 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11418         bp->csem_int_table_data = bp->firmware->data +
11419                 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11420         bp->csem_pram_data      = bp->firmware->data +
11421                 be32_to_cpu(fw_hdr->csem_pram_data.offset);
11422
11423         return 0;
11424 init_offsets_alloc_err:
11425         kfree(bp->init_ops);
11426 init_ops_alloc_err:
11427         kfree(bp->init_data);
11428 request_firmware_exit:
11429         release_firmware(bp->firmware);
11430
11431         return rc;
11432 }
11433
11434
11435
11436 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11437                                     const struct pci_device_id *ent)
11438 {
11439         static int version_printed;
11440         struct net_device *dev = NULL;
11441         struct bnx2x *bp;
11442         int rc;
11443
11444         if (version_printed++ == 0)
11445                 printk(KERN_INFO "%s", version);
11446
11447         /* dev zeroed in init_etherdev */
11448         dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
11449         if (!dev) {
11450                 printk(KERN_ERR PFX "Cannot allocate net device\n");
11451                 return -ENOMEM;
11452         }
11453
11454         bp = netdev_priv(dev);
11455         bp->msglevel = debug;
11456
11457         rc = bnx2x_init_dev(pdev, dev);
11458         if (rc < 0) {
11459                 free_netdev(dev);
11460                 return rc;
11461         }
11462
11463         pci_set_drvdata(pdev, dev);
11464
11465         rc = bnx2x_init_bp(bp);
11466         if (rc)
11467                 goto init_one_exit;
11468
11469         /* Set init arrays */
11470         rc = bnx2x_init_firmware(bp, &pdev->dev);
11471         if (rc) {
11472                 printk(KERN_ERR PFX "Error loading firmware\n");
11473                 goto init_one_exit;
11474         }
11475
11476         rc = register_netdev(dev);
11477         if (rc) {
11478                 dev_err(&pdev->dev, "Cannot register net device\n");
11479                 goto init_one_exit;
11480         }
11481
11482         printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
11483                " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
11484                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
11485                bnx2x_get_pcie_width(bp),
11486                (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11487                dev->base_addr, bp->pdev->irq);
11488         printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
11489
11490         return 0;
11491
11492 init_one_exit:
11493         if (bp->regview)
11494                 iounmap(bp->regview);
11495
11496         if (bp->doorbells)
11497                 iounmap(bp->doorbells);
11498
11499         free_netdev(dev);
11500
11501         if (atomic_read(&pdev->enable_cnt) == 1)
11502                 pci_release_regions(pdev);
11503
11504         pci_disable_device(pdev);
11505         pci_set_drvdata(pdev, NULL);
11506
11507         return rc;
11508 }
11509
11510 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11511 {
11512         struct net_device *dev = pci_get_drvdata(pdev);
11513         struct bnx2x *bp;
11514
11515         if (!dev) {
11516                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11517                 return;
11518         }
11519         bp = netdev_priv(dev);
11520
11521         unregister_netdev(dev);
11522
11523         kfree(bp->init_ops_offsets);
11524         kfree(bp->init_ops);
11525         kfree(bp->init_data);
11526         release_firmware(bp->firmware);
11527
11528         if (bp->regview)
11529                 iounmap(bp->regview);
11530
11531         if (bp->doorbells)
11532                 iounmap(bp->doorbells);
11533
11534         free_netdev(dev);
11535
11536         if (atomic_read(&pdev->enable_cnt) == 1)
11537                 pci_release_regions(pdev);
11538
11539         pci_disable_device(pdev);
11540         pci_set_drvdata(pdev, NULL);
11541 }
11542
11543 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11544 {
11545         struct net_device *dev = pci_get_drvdata(pdev);
11546         struct bnx2x *bp;
11547
11548         if (!dev) {
11549                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11550                 return -ENODEV;
11551         }
11552         bp = netdev_priv(dev);
11553
11554         rtnl_lock();
11555
11556         pci_save_state(pdev);
11557
11558         if (!netif_running(dev)) {
11559                 rtnl_unlock();
11560                 return 0;
11561         }
11562
11563         netif_device_detach(dev);
11564
11565         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11566
11567         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
11568
11569         rtnl_unlock();
11570
11571         return 0;
11572 }
11573
11574 static int bnx2x_resume(struct pci_dev *pdev)
11575 {
11576         struct net_device *dev = pci_get_drvdata(pdev);
11577         struct bnx2x *bp;
11578         int rc;
11579
11580         if (!dev) {
11581                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11582                 return -ENODEV;
11583         }
11584         bp = netdev_priv(dev);
11585
11586         rtnl_lock();
11587
11588         pci_restore_state(pdev);
11589
11590         if (!netif_running(dev)) {
11591                 rtnl_unlock();
11592                 return 0;
11593         }
11594
11595         bnx2x_set_power_state(bp, PCI_D0);
11596         netif_device_attach(dev);
11597
11598         rc = bnx2x_nic_load(bp, LOAD_OPEN);
11599
11600         rtnl_unlock();
11601
11602         return rc;
11603 }
11604
11605 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11606 {
11607         int i;
11608
11609         bp->state = BNX2X_STATE_ERROR;
11610
11611         bp->rx_mode = BNX2X_RX_MODE_NONE;
11612
11613         bnx2x_netif_stop(bp, 0);
11614
11615         del_timer_sync(&bp->timer);
11616         bp->stats_state = STATS_STATE_DISABLED;
11617         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11618
11619         /* Release IRQs */
11620         bnx2x_free_irq(bp);
11621
11622         if (CHIP_IS_E1(bp)) {
11623                 struct mac_configuration_cmd *config =
11624                                                 bnx2x_sp(bp, mcast_config);
11625
11626                 for (i = 0; i < config->hdr.length; i++)
11627                         CAM_INVALIDATE(config->config_table[i]);
11628         }
11629
11630         /* Free SKBs, SGEs, TPA pool and driver internals */
11631         bnx2x_free_skbs(bp);
11632         for_each_rx_queue(bp, i)
11633                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
11634         for_each_rx_queue(bp, i)
11635                 netif_napi_del(&bnx2x_fp(bp, i, napi));
11636         bnx2x_free_mem(bp);
11637
11638         bp->state = BNX2X_STATE_CLOSED;
11639
11640         netif_carrier_off(bp->dev);
11641
11642         return 0;
11643 }
11644
11645 static void bnx2x_eeh_recover(struct bnx2x *bp)
11646 {
11647         u32 val;
11648
11649         mutex_init(&bp->port.phy_mutex);
11650
11651         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11652         bp->link_params.shmem_base = bp->common.shmem_base;
11653         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11654
11655         if (!bp->common.shmem_base ||
11656             (bp->common.shmem_base < 0xA0000) ||
11657             (bp->common.shmem_base >= 0xC0000)) {
11658                 BNX2X_DEV_INFO("MCP not active\n");
11659                 bp->flags |= NO_MCP_FLAG;
11660                 return;
11661         }
11662
11663         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11664         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11665                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11666                 BNX2X_ERR("BAD MCP validity signature\n");
11667
11668         if (!BP_NOMCP(bp)) {
11669                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11670                               & DRV_MSG_SEQ_NUMBER_MASK);
11671                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11672         }
11673 }
11674
11675 /**
11676  * bnx2x_io_error_detected - called when PCI error is detected
11677  * @pdev: Pointer to PCI device
11678  * @state: The current pci connection state
11679  *
11680  * This function is called after a PCI bus error affecting
11681  * this device has been detected.
11682  */
11683 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11684                                                 pci_channel_state_t state)
11685 {
11686         struct net_device *dev = pci_get_drvdata(pdev);
11687         struct bnx2x *bp = netdev_priv(dev);
11688
11689         rtnl_lock();
11690
11691         netif_device_detach(dev);
11692
11693         if (netif_running(dev))
11694                 bnx2x_eeh_nic_unload(bp);
11695
11696         pci_disable_device(pdev);
11697
11698         rtnl_unlock();
11699
11700         /* Request a slot reset */
11701         return PCI_ERS_RESULT_NEED_RESET;
11702 }
11703
11704 /**
11705  * bnx2x_io_slot_reset - called after the PCI bus has been reset
11706  * @pdev: Pointer to PCI device
11707  *
11708  * Restart the card from scratch, as if from a cold-boot.
11709  */
11710 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11711 {
11712         struct net_device *dev = pci_get_drvdata(pdev);
11713         struct bnx2x *bp = netdev_priv(dev);
11714
11715         rtnl_lock();
11716
11717         if (pci_enable_device(pdev)) {
11718                 dev_err(&pdev->dev,
11719                         "Cannot re-enable PCI device after reset\n");
11720                 rtnl_unlock();
11721                 return PCI_ERS_RESULT_DISCONNECT;
11722         }
11723
11724         pci_set_master(pdev);
11725         pci_restore_state(pdev);
11726
11727         if (netif_running(dev))
11728                 bnx2x_set_power_state(bp, PCI_D0);
11729
11730         rtnl_unlock();
11731
11732         return PCI_ERS_RESULT_RECOVERED;
11733 }
11734
11735 /**
11736  * bnx2x_io_resume - called when traffic can start flowing again
11737  * @pdev: Pointer to PCI device
11738  *
11739  * This callback is called when the error recovery driver tells us that
11740  * its OK to resume normal operation.
11741  */
11742 static void bnx2x_io_resume(struct pci_dev *pdev)
11743 {
11744         struct net_device *dev = pci_get_drvdata(pdev);
11745         struct bnx2x *bp = netdev_priv(dev);
11746
11747         rtnl_lock();
11748
11749         bnx2x_eeh_recover(bp);
11750
11751         if (netif_running(dev))
11752                 bnx2x_nic_load(bp, LOAD_NORMAL);
11753
11754         netif_device_attach(dev);
11755
11756         rtnl_unlock();
11757 }
11758
11759 static struct pci_error_handlers bnx2x_err_handler = {
11760         .error_detected = bnx2x_io_error_detected,
11761         .slot_reset     = bnx2x_io_slot_reset,
11762         .resume         = bnx2x_io_resume,
11763 };
11764
11765 static struct pci_driver bnx2x_pci_driver = {
11766         .name        = DRV_MODULE_NAME,
11767         .id_table    = bnx2x_pci_tbl,
11768         .probe       = bnx2x_init_one,
11769         .remove      = __devexit_p(bnx2x_remove_one),
11770         .suspend     = bnx2x_suspend,
11771         .resume      = bnx2x_resume,
11772         .err_handler = &bnx2x_err_handler,
11773 };
11774
11775 static int __init bnx2x_init(void)
11776 {
11777         int ret;
11778
11779         bnx2x_wq = create_singlethread_workqueue("bnx2x");
11780         if (bnx2x_wq == NULL) {
11781                 printk(KERN_ERR PFX "Cannot create workqueue\n");
11782                 return -ENOMEM;
11783         }
11784
11785         ret = pci_register_driver(&bnx2x_pci_driver);
11786         if (ret) {
11787                 printk(KERN_ERR PFX "Cannot register driver\n");
11788                 destroy_workqueue(bnx2x_wq);
11789         }
11790         return ret;
11791 }
11792
11793 static void __exit bnx2x_cleanup(void)
11794 {
11795         pci_unregister_driver(&bnx2x_pci_driver);
11796
11797         destroy_workqueue(bnx2x_wq);
11798 }
11799
11800 module_init(bnx2x_init);
11801 module_exit(bnx2x_cleanup);
11802
11803