bnx2x: SMP-safe inter_sem
[safe/jmp/linux-2.6] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52
53
54 #include "bnx2x.h"
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_dump.h"
58
59 #define DRV_MODULE_VERSION      "1.48.105-1"
60 #define DRV_MODULE_RELDATE      "2009/04/22"
61 #define BNX2X_BC_VER            0x040200
62
63 #include <linux/firmware.h>
64 #include "bnx2x_fw_file_hdr.h"
65 /* FW files */
66 #define FW_FILE_PREFIX_E1               "bnx2x-e1-"
67 #define FW_FILE_PREFIX_E1H              "bnx2x-e1h-"
68
69 /* Time in jiffies before concluding the transmitter is hung */
70 #define TX_TIMEOUT              (5*HZ)
71
72 static char version[] __devinitdata =
73         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
74         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
76 MODULE_AUTHOR("Eliezer Tamir");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
80
81 static int multi_mode = 1;
82 module_param(multi_mode, int, 0);
83 MODULE_PARM_DESC(multi_mode, " Use per-CPU queues");
84
85 static int disable_tpa;
86 module_param(disable_tpa, int, 0);
87 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
88
89 static int int_mode;
90 module_param(int_mode, int, 0);
91 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
92
93 static int poll;
94 module_param(poll, int, 0);
95 MODULE_PARM_DESC(poll, " Use polling (for debug)");
96
97 static int mrrs = -1;
98 module_param(mrrs, int, 0);
99 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
100
101 static int debug;
102 module_param(debug, int, 0);
103 MODULE_PARM_DESC(debug, " Default debug msglevel");
104
105 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
106
107 static struct workqueue_struct *bnx2x_wq;
108
109 enum bnx2x_board_type {
110         BCM57710 = 0,
111         BCM57711 = 1,
112         BCM57711E = 2,
113 };
114
115 /* indexed by board_type, above */
116 static struct {
117         char *name;
118 } board_info[] __devinitdata = {
119         { "Broadcom NetXtreme II BCM57710 XGb" },
120         { "Broadcom NetXtreme II BCM57711 XGb" },
121         { "Broadcom NetXtreme II BCM57711E XGb" }
122 };
123
124
125 static const struct pci_device_id bnx2x_pci_tbl[] = {
126         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
127                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
128         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
129                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
130         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
131                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
132         { 0 }
133 };
134
135 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
136
137 /****************************************************************************
138 * General service functions
139 ****************************************************************************/
140
141 /* used only at init
142  * locking is done by mcp
143  */
144 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
145 {
146         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
147         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
148         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
149                                PCICFG_VENDOR_ID_OFFSET);
150 }
151
152 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
153 {
154         u32 val;
155
156         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
157         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
158         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
159                                PCICFG_VENDOR_ID_OFFSET);
160
161         return val;
162 }
163
164 static const u32 dmae_reg_go_c[] = {
165         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
166         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
167         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
168         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
169 };
170
171 /* copy command into DMAE command memory and set DMAE command go */
172 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
173                             int idx)
174 {
175         u32 cmd_offset;
176         int i;
177
178         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
179         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
180                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
181
182                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
183                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
184         }
185         REG_WR(bp, dmae_reg_go_c[idx], 1);
186 }
187
188 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
189                       u32 len32)
190 {
191         struct dmae_command *dmae = &bp->init_dmae;
192         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
193         int cnt = 200;
194
195         if (!bp->dmae_ready) {
196                 u32 *data = bnx2x_sp(bp, wb_data[0]);
197
198                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
199                    "  using indirect\n", dst_addr, len32);
200                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
201                 return;
202         }
203
204         mutex_lock(&bp->dmae_mutex);
205
206         memset(dmae, 0, sizeof(struct dmae_command));
207
208         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
209                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
210                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
211 #ifdef __BIG_ENDIAN
212                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
213 #else
214                         DMAE_CMD_ENDIANITY_DW_SWAP |
215 #endif
216                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
217                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
218         dmae->src_addr_lo = U64_LO(dma_addr);
219         dmae->src_addr_hi = U64_HI(dma_addr);
220         dmae->dst_addr_lo = dst_addr >> 2;
221         dmae->dst_addr_hi = 0;
222         dmae->len = len32;
223         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
224         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
225         dmae->comp_val = DMAE_COMP_VAL;
226
227         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
228            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
229                     "dst_addr [%x:%08x (%08x)]\n"
230            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
231            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
232            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
233            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
234         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
235            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
236            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
237
238         *wb_comp = 0;
239
240         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
241
242         udelay(5);
243
244         while (*wb_comp != DMAE_COMP_VAL) {
245                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
246
247                 if (!cnt) {
248                         BNX2X_ERR("DMAE timeout!\n");
249                         break;
250                 }
251                 cnt--;
252                 /* adjust delay for emulation/FPGA */
253                 if (CHIP_REV_IS_SLOW(bp))
254                         msleep(100);
255                 else
256                         udelay(5);
257         }
258
259         mutex_unlock(&bp->dmae_mutex);
260 }
261
262 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
263 {
264         struct dmae_command *dmae = &bp->init_dmae;
265         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
266         int cnt = 200;
267
268         if (!bp->dmae_ready) {
269                 u32 *data = bnx2x_sp(bp, wb_data[0]);
270                 int i;
271
272                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
273                    "  using indirect\n", src_addr, len32);
274                 for (i = 0; i < len32; i++)
275                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
276                 return;
277         }
278
279         mutex_lock(&bp->dmae_mutex);
280
281         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
282         memset(dmae, 0, sizeof(struct dmae_command));
283
284         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
285                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
286                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
287 #ifdef __BIG_ENDIAN
288                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
289 #else
290                         DMAE_CMD_ENDIANITY_DW_SWAP |
291 #endif
292                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
293                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
294         dmae->src_addr_lo = src_addr >> 2;
295         dmae->src_addr_hi = 0;
296         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
297         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
298         dmae->len = len32;
299         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
300         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
301         dmae->comp_val = DMAE_COMP_VAL;
302
303         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
304            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
305                     "dst_addr [%x:%08x (%08x)]\n"
306            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
307            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
308            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
309            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
310
311         *wb_comp = 0;
312
313         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
314
315         udelay(5);
316
317         while (*wb_comp != DMAE_COMP_VAL) {
318
319                 if (!cnt) {
320                         BNX2X_ERR("DMAE timeout!\n");
321                         break;
322                 }
323                 cnt--;
324                 /* adjust delay for emulation/FPGA */
325                 if (CHIP_REV_IS_SLOW(bp))
326                         msleep(100);
327                 else
328                         udelay(5);
329         }
330         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
331            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
332            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
333
334         mutex_unlock(&bp->dmae_mutex);
335 }
336
337 /* used only for slowpath so not inlined */
338 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
339 {
340         u32 wb_write[2];
341
342         wb_write[0] = val_hi;
343         wb_write[1] = val_lo;
344         REG_WR_DMAE(bp, reg, wb_write, 2);
345 }
346
347 #ifdef USE_WB_RD
348 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
349 {
350         u32 wb_data[2];
351
352         REG_RD_DMAE(bp, reg, wb_data, 2);
353
354         return HILO_U64(wb_data[0], wb_data[1]);
355 }
356 #endif
357
358 static int bnx2x_mc_assert(struct bnx2x *bp)
359 {
360         char last_idx;
361         int i, rc = 0;
362         u32 row0, row1, row2, row3;
363
364         /* XSTORM */
365         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
366                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
367         if (last_idx)
368                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
369
370         /* print the asserts */
371         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
372
373                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
374                               XSTORM_ASSERT_LIST_OFFSET(i));
375                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
376                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
377                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
378                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
379                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
380                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
381
382                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
383                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
384                                   " 0x%08x 0x%08x 0x%08x\n",
385                                   i, row3, row2, row1, row0);
386                         rc++;
387                 } else {
388                         break;
389                 }
390         }
391
392         /* TSTORM */
393         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
394                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
395         if (last_idx)
396                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
397
398         /* print the asserts */
399         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
400
401                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
402                               TSTORM_ASSERT_LIST_OFFSET(i));
403                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
404                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
405                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
406                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
407                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
408                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
409
410                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
411                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
412                                   " 0x%08x 0x%08x 0x%08x\n",
413                                   i, row3, row2, row1, row0);
414                         rc++;
415                 } else {
416                         break;
417                 }
418         }
419
420         /* CSTORM */
421         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
422                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
423         if (last_idx)
424                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
425
426         /* print the asserts */
427         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
428
429                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
430                               CSTORM_ASSERT_LIST_OFFSET(i));
431                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
432                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
433                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
434                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
435                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
436                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
437
438                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
439                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
440                                   " 0x%08x 0x%08x 0x%08x\n",
441                                   i, row3, row2, row1, row0);
442                         rc++;
443                 } else {
444                         break;
445                 }
446         }
447
448         /* USTORM */
449         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
450                            USTORM_ASSERT_LIST_INDEX_OFFSET);
451         if (last_idx)
452                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
453
454         /* print the asserts */
455         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
456
457                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
458                               USTORM_ASSERT_LIST_OFFSET(i));
459                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
460                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
461                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
462                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
463                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
464                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
465
466                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
467                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
468                                   " 0x%08x 0x%08x 0x%08x\n",
469                                   i, row3, row2, row1, row0);
470                         rc++;
471                 } else {
472                         break;
473                 }
474         }
475
476         return rc;
477 }
478
479 static void bnx2x_fw_dump(struct bnx2x *bp)
480 {
481         u32 mark, offset;
482         __be32 data[9];
483         int word;
484
485         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
486         mark = ((mark + 0x3) & ~0x3);
487         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
488
489         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
490                 for (word = 0; word < 8; word++)
491                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
492                                                   offset + 4*word));
493                 data[8] = 0x0;
494                 printk(KERN_CONT "%s", (char *)data);
495         }
496         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
497                 for (word = 0; word < 8; word++)
498                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
499                                                   offset + 4*word));
500                 data[8] = 0x0;
501                 printk(KERN_CONT "%s", (char *)data);
502         }
503         printk("\n" KERN_ERR PFX "end of fw dump\n");
504 }
505
506 static void bnx2x_panic_dump(struct bnx2x *bp)
507 {
508         int i;
509         u16 j, start, end;
510
511         bp->stats_state = STATS_STATE_DISABLED;
512         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
513
514         BNX2X_ERR("begin crash dump -----------------\n");
515
516         /* Indices */
517         /* Common */
518         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
519                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
520                   "  spq_prod_idx(%u)\n",
521                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
522                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
523
524         /* Rx */
525         for_each_rx_queue(bp, i) {
526                 struct bnx2x_fastpath *fp = &bp->fp[i];
527
528                 BNX2X_ERR("fp%d: rx_bd_prod(%x)  rx_bd_cons(%x)"
529                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
530                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
531                           i, fp->rx_bd_prod, fp->rx_bd_cons,
532                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
533                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
534                 BNX2X_ERR("      rx_sge_prod(%x)  last_max_sge(%x)"
535                           "  fp_u_idx(%x) *sb_u_idx(%x)\n",
536                           fp->rx_sge_prod, fp->last_max_sge,
537                           le16_to_cpu(fp->fp_u_idx),
538                           fp->status_blk->u_status_block.status_block_index);
539         }
540
541         /* Tx */
542         for_each_tx_queue(bp, i) {
543                 struct bnx2x_fastpath *fp = &bp->fp[i];
544                 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
545
546                 BNX2X_ERR("fp%d: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
547                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
548                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
549                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
550                 BNX2X_ERR("      fp_c_idx(%x)  *sb_c_idx(%x)"
551                           "  bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx),
552                           fp->status_blk->c_status_block.status_block_index,
553                           hw_prods->packets_prod, hw_prods->bds_prod);
554         }
555
556         /* Rings */
557         /* Rx */
558         for_each_rx_queue(bp, i) {
559                 struct bnx2x_fastpath *fp = &bp->fp[i];
560
561                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
562                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
563                 for (j = start; j != end; j = RX_BD(j + 1)) {
564                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
565                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
566
567                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
568                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
569                 }
570
571                 start = RX_SGE(fp->rx_sge_prod);
572                 end = RX_SGE(fp->last_max_sge);
573                 for (j = start; j != end; j = RX_SGE(j + 1)) {
574                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
575                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
576
577                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
578                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
579                 }
580
581                 start = RCQ_BD(fp->rx_comp_cons - 10);
582                 end = RCQ_BD(fp->rx_comp_cons + 503);
583                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
584                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
585
586                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
587                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
588                 }
589         }
590
591         /* Tx */
592         for_each_tx_queue(bp, i) {
593                 struct bnx2x_fastpath *fp = &bp->fp[i];
594
595                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
596                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
597                 for (j = start; j != end; j = TX_BD(j + 1)) {
598                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
599
600                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
601                                   i, j, sw_bd->skb, sw_bd->first_bd);
602                 }
603
604                 start = TX_BD(fp->tx_bd_cons - 10);
605                 end = TX_BD(fp->tx_bd_cons + 254);
606                 for (j = start; j != end; j = TX_BD(j + 1)) {
607                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
608
609                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
610                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
611                 }
612         }
613
614         bnx2x_fw_dump(bp);
615         bnx2x_mc_assert(bp);
616         BNX2X_ERR("end crash dump -----------------\n");
617 }
618
619 static void bnx2x_int_enable(struct bnx2x *bp)
620 {
621         int port = BP_PORT(bp);
622         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
623         u32 val = REG_RD(bp, addr);
624         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
625         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
626
627         if (msix) {
628                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
629                          HC_CONFIG_0_REG_INT_LINE_EN_0);
630                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
631                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
632         } else if (msi) {
633                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
634                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
635                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
636                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
637         } else {
638                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
639                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
640                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
641                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
642
643                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
644                    val, port, addr);
645
646                 REG_WR(bp, addr, val);
647
648                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
649         }
650
651         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
652            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
653
654         REG_WR(bp, addr, val);
655         /*
656          * Ensure that HC_CONFIG is written before leading/trailing edge config
657          */
658         mmiowb();
659         barrier();
660
661         if (CHIP_IS_E1H(bp)) {
662                 /* init leading/trailing edge */
663                 if (IS_E1HMF(bp)) {
664                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
665                         if (bp->port.pmf)
666                                 /* enable nig and gpio3 attention */
667                                 val |= 0x1100;
668                 } else
669                         val = 0xffff;
670
671                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
672                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
673         }
674
675         /* Make sure that interrupts are indeed enabled from here on */
676         mmiowb();
677 }
678
679 static void bnx2x_int_disable(struct bnx2x *bp)
680 {
681         int port = BP_PORT(bp);
682         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
683         u32 val = REG_RD(bp, addr);
684
685         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
686                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
687                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
688                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
689
690         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
691            val, port, addr);
692
693         /* flush all outstanding writes */
694         mmiowb();
695
696         REG_WR(bp, addr, val);
697         if (REG_RD(bp, addr) != val)
698                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
699
700 }
701
702 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
703 {
704         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
705         int i, offset;
706
707         /* disable interrupt handling */
708         atomic_inc(&bp->intr_sem);
709         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
710
711         if (disable_hw)
712                 /* prevent the HW from sending interrupts */
713                 bnx2x_int_disable(bp);
714
715         /* make sure all ISRs are done */
716         if (msix) {
717                 synchronize_irq(bp->msix_table[0].vector);
718                 offset = 1;
719                 for_each_queue(bp, i)
720                         synchronize_irq(bp->msix_table[i + offset].vector);
721         } else
722                 synchronize_irq(bp->pdev->irq);
723
724         /* make sure sp_task is not running */
725         cancel_delayed_work(&bp->sp_task);
726         flush_workqueue(bnx2x_wq);
727 }
728
729 /* fast path */
730
731 /*
732  * General service functions
733  */
734
735 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
736                                 u8 storm, u16 index, u8 op, u8 update)
737 {
738         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
739                        COMMAND_REG_INT_ACK);
740         struct igu_ack_register igu_ack;
741
742         igu_ack.status_block_index = index;
743         igu_ack.sb_id_and_flags =
744                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
745                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
746                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
747                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
748
749         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
750            (*(u32 *)&igu_ack), hc_addr);
751         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
752
753         /* Make sure that ACK is written */
754         mmiowb();
755         barrier();
756 }
757
758 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
759 {
760         struct host_status_block *fpsb = fp->status_blk;
761         u16 rc = 0;
762
763         barrier(); /* status block is written to by the chip */
764         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
765                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
766                 rc |= 1;
767         }
768         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
769                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
770                 rc |= 2;
771         }
772         return rc;
773 }
774
775 static u16 bnx2x_ack_int(struct bnx2x *bp)
776 {
777         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
778                        COMMAND_REG_SIMD_MASK);
779         u32 result = REG_RD(bp, hc_addr);
780
781         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
782            result, hc_addr);
783
784         return result;
785 }
786
787
788 /*
789  * fast path service functions
790  */
791
792 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
793 {
794         u16 tx_cons_sb;
795
796         /* Tell compiler that status block fields can change */
797         barrier();
798         tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
799         return (fp->tx_pkt_cons != tx_cons_sb);
800 }
801
802 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
803 {
804         /* Tell compiler that consumer and producer can change */
805         barrier();
806         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
807 }
808
809 /* free skb in the packet ring at pos idx
810  * return idx of last bd freed
811  */
812 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
813                              u16 idx)
814 {
815         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
816         struct eth_tx_bd *tx_bd;
817         struct sk_buff *skb = tx_buf->skb;
818         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
819         int nbd;
820
821         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
822            idx, tx_buf, skb);
823
824         /* unmap first bd */
825         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
826         tx_bd = &fp->tx_desc_ring[bd_idx];
827         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
828                          BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
829
830         nbd = le16_to_cpu(tx_bd->nbd) - 1;
831         new_cons = nbd + tx_buf->first_bd;
832 #ifdef BNX2X_STOP_ON_ERROR
833         if (nbd > (MAX_SKB_FRAGS + 2)) {
834                 BNX2X_ERR("BAD nbd!\n");
835                 bnx2x_panic();
836         }
837 #endif
838
839         /* Skip a parse bd and the TSO split header bd
840            since they have no mapping */
841         if (nbd)
842                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
843
844         if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
845                                            ETH_TX_BD_FLAGS_TCP_CSUM |
846                                            ETH_TX_BD_FLAGS_SW_LSO)) {
847                 if (--nbd)
848                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
849                 tx_bd = &fp->tx_desc_ring[bd_idx];
850                 /* is this a TSO split header bd? */
851                 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
852                         if (--nbd)
853                                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
854                 }
855         }
856
857         /* now free frags */
858         while (nbd > 0) {
859
860                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
861                 tx_bd = &fp->tx_desc_ring[bd_idx];
862                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
863                                BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
864                 if (--nbd)
865                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
866         }
867
868         /* release skb */
869         WARN_ON(!skb);
870         dev_kfree_skb(skb);
871         tx_buf->first_bd = 0;
872         tx_buf->skb = NULL;
873
874         return new_cons;
875 }
876
877 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
878 {
879         s16 used;
880         u16 prod;
881         u16 cons;
882
883         barrier(); /* Tell compiler that prod and cons can change */
884         prod = fp->tx_bd_prod;
885         cons = fp->tx_bd_cons;
886
887         /* NUM_TX_RINGS = number of "next-page" entries
888            It will be used as a threshold */
889         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
890
891 #ifdef BNX2X_STOP_ON_ERROR
892         WARN_ON(used < 0);
893         WARN_ON(used > fp->bp->tx_ring_size);
894         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
895 #endif
896
897         return (s16)(fp->bp->tx_ring_size) - used;
898 }
899
900 static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
901 {
902         struct bnx2x *bp = fp->bp;
903         struct netdev_queue *txq;
904         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
905         int done = 0;
906
907 #ifdef BNX2X_STOP_ON_ERROR
908         if (unlikely(bp->panic))
909                 return;
910 #endif
911
912         txq = netdev_get_tx_queue(bp->dev, fp->index);
913         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
914         sw_cons = fp->tx_pkt_cons;
915
916         while (sw_cons != hw_cons) {
917                 u16 pkt_cons;
918
919                 pkt_cons = TX_BD(sw_cons);
920
921                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
922
923                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
924                    hw_cons, sw_cons, pkt_cons);
925
926 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
927                         rmb();
928                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
929                 }
930 */
931                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
932                 sw_cons++;
933                 done++;
934         }
935
936         fp->tx_pkt_cons = sw_cons;
937         fp->tx_bd_cons = bd_cons;
938
939         /* TBD need a thresh? */
940         if (unlikely(netif_tx_queue_stopped(txq))) {
941
942                 __netif_tx_lock(txq, smp_processor_id());
943
944                 /* Need to make the tx_bd_cons update visible to start_xmit()
945                  * before checking for netif_tx_queue_stopped().  Without the
946                  * memory barrier, there is a small possibility that
947                  * start_xmit() will miss it and cause the queue to be stopped
948                  * forever.
949                  */
950                 smp_mb();
951
952                 if ((netif_tx_queue_stopped(txq)) &&
953                     (bp->state == BNX2X_STATE_OPEN) &&
954                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
955                         netif_tx_wake_queue(txq);
956
957                 __netif_tx_unlock(txq);
958         }
959 }
960
961
962 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
963                            union eth_rx_cqe *rr_cqe)
964 {
965         struct bnx2x *bp = fp->bp;
966         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
967         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
968
969         DP(BNX2X_MSG_SP,
970            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
971            fp->index, cid, command, bp->state,
972            rr_cqe->ramrod_cqe.ramrod_type);
973
974         bp->spq_left++;
975
976         if (fp->index) {
977                 switch (command | fp->state) {
978                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
979                                                 BNX2X_FP_STATE_OPENING):
980                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
981                            cid);
982                         fp->state = BNX2X_FP_STATE_OPEN;
983                         break;
984
985                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
986                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
987                            cid);
988                         fp->state = BNX2X_FP_STATE_HALTED;
989                         break;
990
991                 default:
992                         BNX2X_ERR("unexpected MC reply (%d)  "
993                                   "fp->state is %x\n", command, fp->state);
994                         break;
995                 }
996                 mb(); /* force bnx2x_wait_ramrod() to see the change */
997                 return;
998         }
999
1000         switch (command | bp->state) {
1001         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1002                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1003                 bp->state = BNX2X_STATE_OPEN;
1004                 break;
1005
1006         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1007                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1008                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1009                 fp->state = BNX2X_FP_STATE_HALTED;
1010                 break;
1011
1012         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1013                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1014                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1015                 break;
1016
1017
1018         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1019         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1020                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1021                 bp->set_mac_pending = 0;
1022                 break;
1023
1024         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1025                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1026                 break;
1027
1028         default:
1029                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
1030                           command, bp->state);
1031                 break;
1032         }
1033         mb(); /* force bnx2x_wait_ramrod() to see the change */
1034 }
1035
1036 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1037                                      struct bnx2x_fastpath *fp, u16 index)
1038 {
1039         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1040         struct page *page = sw_buf->page;
1041         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1042
1043         /* Skip "next page" elements */
1044         if (!page)
1045                 return;
1046
1047         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1048                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1049         __free_pages(page, PAGES_PER_SGE_SHIFT);
1050
1051         sw_buf->page = NULL;
1052         sge->addr_hi = 0;
1053         sge->addr_lo = 0;
1054 }
1055
1056 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1057                                            struct bnx2x_fastpath *fp, int last)
1058 {
1059         int i;
1060
1061         for (i = 0; i < last; i++)
1062                 bnx2x_free_rx_sge(bp, fp, i);
1063 }
1064
1065 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1066                                      struct bnx2x_fastpath *fp, u16 index)
1067 {
1068         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1069         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1070         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1071         dma_addr_t mapping;
1072
1073         if (unlikely(page == NULL))
1074                 return -ENOMEM;
1075
1076         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1077                                PCI_DMA_FROMDEVICE);
1078         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1079                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1080                 return -ENOMEM;
1081         }
1082
1083         sw_buf->page = page;
1084         pci_unmap_addr_set(sw_buf, mapping, mapping);
1085
1086         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1087         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1088
1089         return 0;
1090 }
1091
1092 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1093                                      struct bnx2x_fastpath *fp, u16 index)
1094 {
1095         struct sk_buff *skb;
1096         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1097         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1098         dma_addr_t mapping;
1099
1100         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1101         if (unlikely(skb == NULL))
1102                 return -ENOMEM;
1103
1104         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1105                                  PCI_DMA_FROMDEVICE);
1106         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1107                 dev_kfree_skb(skb);
1108                 return -ENOMEM;
1109         }
1110
1111         rx_buf->skb = skb;
1112         pci_unmap_addr_set(rx_buf, mapping, mapping);
1113
1114         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1115         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1116
1117         return 0;
1118 }
1119
1120 /* note that we are not allocating a new skb,
1121  * we are just moving one from cons to prod
1122  * we are not creating a new mapping,
1123  * so there is no need to check for dma_mapping_error().
1124  */
1125 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1126                                struct sk_buff *skb, u16 cons, u16 prod)
1127 {
1128         struct bnx2x *bp = fp->bp;
1129         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1130         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1131         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1132         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1133
1134         pci_dma_sync_single_for_device(bp->pdev,
1135                                        pci_unmap_addr(cons_rx_buf, mapping),
1136                                        RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1137
1138         prod_rx_buf->skb = cons_rx_buf->skb;
1139         pci_unmap_addr_set(prod_rx_buf, mapping,
1140                            pci_unmap_addr(cons_rx_buf, mapping));
1141         *prod_bd = *cons_bd;
1142 }
1143
1144 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1145                                              u16 idx)
1146 {
1147         u16 last_max = fp->last_max_sge;
1148
1149         if (SUB_S16(idx, last_max) > 0)
1150                 fp->last_max_sge = idx;
1151 }
1152
1153 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1154 {
1155         int i, j;
1156
1157         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1158                 int idx = RX_SGE_CNT * i - 1;
1159
1160                 for (j = 0; j < 2; j++) {
1161                         SGE_MASK_CLEAR_BIT(fp, idx);
1162                         idx--;
1163                 }
1164         }
1165 }
1166
1167 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1168                                   struct eth_fast_path_rx_cqe *fp_cqe)
1169 {
1170         struct bnx2x *bp = fp->bp;
1171         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1172                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1173                       SGE_PAGE_SHIFT;
1174         u16 last_max, last_elem, first_elem;
1175         u16 delta = 0;
1176         u16 i;
1177
1178         if (!sge_len)
1179                 return;
1180
1181         /* First mark all used pages */
1182         for (i = 0; i < sge_len; i++)
1183                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1184
1185         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1186            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1187
1188         /* Here we assume that the last SGE index is the biggest */
1189         prefetch((void *)(fp->sge_mask));
1190         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1191
1192         last_max = RX_SGE(fp->last_max_sge);
1193         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1194         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1195
1196         /* If ring is not full */
1197         if (last_elem + 1 != first_elem)
1198                 last_elem++;
1199
1200         /* Now update the prod */
1201         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1202                 if (likely(fp->sge_mask[i]))
1203                         break;
1204
1205                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1206                 delta += RX_SGE_MASK_ELEM_SZ;
1207         }
1208
1209         if (delta > 0) {
1210                 fp->rx_sge_prod += delta;
1211                 /* clear page-end entries */
1212                 bnx2x_clear_sge_mask_next_elems(fp);
1213         }
1214
1215         DP(NETIF_MSG_RX_STATUS,
1216            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1217            fp->last_max_sge, fp->rx_sge_prod);
1218 }
1219
1220 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1221 {
1222         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1223         memset(fp->sge_mask, 0xff,
1224                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1225
1226         /* Clear the two last indices in the page to 1:
1227            these are the indices that correspond to the "next" element,
1228            hence will never be indicated and should be removed from
1229            the calculations. */
1230         bnx2x_clear_sge_mask_next_elems(fp);
1231 }
1232
1233 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1234                             struct sk_buff *skb, u16 cons, u16 prod)
1235 {
1236         struct bnx2x *bp = fp->bp;
1237         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1238         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1239         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1240         dma_addr_t mapping;
1241
1242         /* move empty skb from pool to prod and map it */
1243         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1244         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1245                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1246         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1247
1248         /* move partial skb from cons to pool (don't unmap yet) */
1249         fp->tpa_pool[queue] = *cons_rx_buf;
1250
1251         /* mark bin state as start - print error if current state != stop */
1252         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1253                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1254
1255         fp->tpa_state[queue] = BNX2X_TPA_START;
1256
1257         /* point prod_bd to new skb */
1258         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1259         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1260
1261 #ifdef BNX2X_STOP_ON_ERROR
1262         fp->tpa_queue_used |= (1 << queue);
1263 #ifdef __powerpc64__
1264         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1265 #else
1266         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1267 #endif
1268            fp->tpa_queue_used);
1269 #endif
1270 }
1271
1272 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1273                                struct sk_buff *skb,
1274                                struct eth_fast_path_rx_cqe *fp_cqe,
1275                                u16 cqe_idx)
1276 {
1277         struct sw_rx_page *rx_pg, old_rx_pg;
1278         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1279         u32 i, frag_len, frag_size, pages;
1280         int err;
1281         int j;
1282
1283         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1284         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1285
1286         /* This is needed in order to enable forwarding support */
1287         if (frag_size)
1288                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1289                                                max(frag_size, (u32)len_on_bd));
1290
1291 #ifdef BNX2X_STOP_ON_ERROR
1292         if (pages >
1293             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1294                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1295                           pages, cqe_idx);
1296                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1297                           fp_cqe->pkt_len, len_on_bd);
1298                 bnx2x_panic();
1299                 return -EINVAL;
1300         }
1301 #endif
1302
1303         /* Run through the SGL and compose the fragmented skb */
1304         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1305                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1306
1307                 /* FW gives the indices of the SGE as if the ring is an array
1308                    (meaning that "next" element will consume 2 indices) */
1309                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1310                 rx_pg = &fp->rx_page_ring[sge_idx];
1311                 old_rx_pg = *rx_pg;
1312
1313                 /* If we fail to allocate a substitute page, we simply stop
1314                    where we are and drop the whole packet */
1315                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1316                 if (unlikely(err)) {
1317                         fp->eth_q_stats.rx_skb_alloc_failed++;
1318                         return err;
1319                 }
1320
1321                 /* Unmap the page as we r going to pass it to the stack */
1322                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1323                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1324
1325                 /* Add one frag and update the appropriate fields in the skb */
1326                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1327
1328                 skb->data_len += frag_len;
1329                 skb->truesize += frag_len;
1330                 skb->len += frag_len;
1331
1332                 frag_size -= frag_len;
1333         }
1334
1335         return 0;
1336 }
1337
1338 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1339                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1340                            u16 cqe_idx)
1341 {
1342         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1343         struct sk_buff *skb = rx_buf->skb;
1344         /* alloc new skb */
1345         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1346
1347         /* Unmap skb in the pool anyway, as we are going to change
1348            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1349            fails. */
1350         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1351                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1352
1353         if (likely(new_skb)) {
1354                 /* fix ip xsum and give it to the stack */
1355                 /* (no need to map the new skb) */
1356 #ifdef BCM_VLAN
1357                 int is_vlan_cqe =
1358                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1359                          PARSING_FLAGS_VLAN);
1360                 int is_not_hwaccel_vlan_cqe =
1361                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1362 #endif
1363
1364                 prefetch(skb);
1365                 prefetch(((char *)(skb)) + 128);
1366
1367 #ifdef BNX2X_STOP_ON_ERROR
1368                 if (pad + len > bp->rx_buf_size) {
1369                         BNX2X_ERR("skb_put is about to fail...  "
1370                                   "pad %d  len %d  rx_buf_size %d\n",
1371                                   pad, len, bp->rx_buf_size);
1372                         bnx2x_panic();
1373                         return;
1374                 }
1375 #endif
1376
1377                 skb_reserve(skb, pad);
1378                 skb_put(skb, len);
1379
1380                 skb->protocol = eth_type_trans(skb, bp->dev);
1381                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1382
1383                 {
1384                         struct iphdr *iph;
1385
1386                         iph = (struct iphdr *)skb->data;
1387 #ifdef BCM_VLAN
1388                         /* If there is no Rx VLAN offloading -
1389                            take VLAN tag into an account */
1390                         if (unlikely(is_not_hwaccel_vlan_cqe))
1391                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1392 #endif
1393                         iph->check = 0;
1394                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1395                 }
1396
1397                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1398                                          &cqe->fast_path_cqe, cqe_idx)) {
1399 #ifdef BCM_VLAN
1400                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1401                             (!is_not_hwaccel_vlan_cqe))
1402                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1403                                                 le16_to_cpu(cqe->fast_path_cqe.
1404                                                             vlan_tag));
1405                         else
1406 #endif
1407                                 netif_receive_skb(skb);
1408                 } else {
1409                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1410                            " - dropping packet!\n");
1411                         dev_kfree_skb(skb);
1412                 }
1413
1414
1415                 /* put new skb in bin */
1416                 fp->tpa_pool[queue].skb = new_skb;
1417
1418         } else {
1419                 /* else drop the packet and keep the buffer in the bin */
1420                 DP(NETIF_MSG_RX_STATUS,
1421                    "Failed to allocate new skb - dropping packet!\n");
1422                 fp->eth_q_stats.rx_skb_alloc_failed++;
1423         }
1424
1425         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1426 }
1427
1428 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1429                                         struct bnx2x_fastpath *fp,
1430                                         u16 bd_prod, u16 rx_comp_prod,
1431                                         u16 rx_sge_prod)
1432 {
1433         struct ustorm_eth_rx_producers rx_prods = {0};
1434         int i;
1435
1436         /* Update producers */
1437         rx_prods.bd_prod = bd_prod;
1438         rx_prods.cqe_prod = rx_comp_prod;
1439         rx_prods.sge_prod = rx_sge_prod;
1440
1441         /*
1442          * Make sure that the BD and SGE data is updated before updating the
1443          * producers since FW might read the BD/SGE right after the producer
1444          * is updated.
1445          * This is only applicable for weak-ordered memory model archs such
1446          * as IA-64. The following barrier is also mandatory since FW will
1447          * assumes BDs must have buffers.
1448          */
1449         wmb();
1450
1451         for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1452                 REG_WR(bp, BAR_USTRORM_INTMEM +
1453                        USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1454                        ((u32 *)&rx_prods)[i]);
1455
1456         mmiowb(); /* keep prod updates ordered */
1457
1458         DP(NETIF_MSG_RX_STATUS,
1459            "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
1460            fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1461 }
1462
1463 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1464 {
1465         struct bnx2x *bp = fp->bp;
1466         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1467         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1468         int rx_pkt = 0;
1469
1470 #ifdef BNX2X_STOP_ON_ERROR
1471         if (unlikely(bp->panic))
1472                 return 0;
1473 #endif
1474
1475         /* CQ "next element" is of the size of the regular element,
1476            that's why it's ok here */
1477         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1478         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1479                 hw_comp_cons++;
1480
1481         bd_cons = fp->rx_bd_cons;
1482         bd_prod = fp->rx_bd_prod;
1483         bd_prod_fw = bd_prod;
1484         sw_comp_cons = fp->rx_comp_cons;
1485         sw_comp_prod = fp->rx_comp_prod;
1486
1487         /* Memory barrier necessary as speculative reads of the rx
1488          * buffer can be ahead of the index in the status block
1489          */
1490         rmb();
1491
1492         DP(NETIF_MSG_RX_STATUS,
1493            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1494            fp->index, hw_comp_cons, sw_comp_cons);
1495
1496         while (sw_comp_cons != hw_comp_cons) {
1497                 struct sw_rx_bd *rx_buf = NULL;
1498                 struct sk_buff *skb;
1499                 union eth_rx_cqe *cqe;
1500                 u8 cqe_fp_flags;
1501                 u16 len, pad;
1502
1503                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1504                 bd_prod = RX_BD(bd_prod);
1505                 bd_cons = RX_BD(bd_cons);
1506
1507                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1508                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1509
1510                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1511                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1512                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1513                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1514                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1515                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1516
1517                 /* is this a slowpath msg? */
1518                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1519                         bnx2x_sp_event(fp, cqe);
1520                         goto next_cqe;
1521
1522                 /* this is an rx packet */
1523                 } else {
1524                         rx_buf = &fp->rx_buf_ring[bd_cons];
1525                         skb = rx_buf->skb;
1526                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1527                         pad = cqe->fast_path_cqe.placement_offset;
1528
1529                         /* If CQE is marked both TPA_START and TPA_END
1530                            it is a non-TPA CQE */
1531                         if ((!fp->disable_tpa) &&
1532                             (TPA_TYPE(cqe_fp_flags) !=
1533                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1534                                 u16 queue = cqe->fast_path_cqe.queue_index;
1535
1536                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1537                                         DP(NETIF_MSG_RX_STATUS,
1538                                            "calling tpa_start on queue %d\n",
1539                                            queue);
1540
1541                                         bnx2x_tpa_start(fp, queue, skb,
1542                                                         bd_cons, bd_prod);
1543                                         goto next_rx;
1544                                 }
1545
1546                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1547                                         DP(NETIF_MSG_RX_STATUS,
1548                                            "calling tpa_stop on queue %d\n",
1549                                            queue);
1550
1551                                         if (!BNX2X_RX_SUM_FIX(cqe))
1552                                                 BNX2X_ERR("STOP on none TCP "
1553                                                           "data\n");
1554
1555                                         /* This is a size of the linear data
1556                                            on this skb */
1557                                         len = le16_to_cpu(cqe->fast_path_cqe.
1558                                                                 len_on_bd);
1559                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1560                                                     len, cqe, comp_ring_cons);
1561 #ifdef BNX2X_STOP_ON_ERROR
1562                                         if (bp->panic)
1563                                                 return 0;
1564 #endif
1565
1566                                         bnx2x_update_sge_prod(fp,
1567                                                         &cqe->fast_path_cqe);
1568                                         goto next_cqe;
1569                                 }
1570                         }
1571
1572                         pci_dma_sync_single_for_device(bp->pdev,
1573                                         pci_unmap_addr(rx_buf, mapping),
1574                                                        pad + RX_COPY_THRESH,
1575                                                        PCI_DMA_FROMDEVICE);
1576                         prefetch(skb);
1577                         prefetch(((char *)(skb)) + 128);
1578
1579                         /* is this an error packet? */
1580                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1581                                 DP(NETIF_MSG_RX_ERR,
1582                                    "ERROR  flags %x  rx packet %u\n",
1583                                    cqe_fp_flags, sw_comp_cons);
1584                                 fp->eth_q_stats.rx_err_discard_pkt++;
1585                                 goto reuse_rx;
1586                         }
1587
1588                         /* Since we don't have a jumbo ring
1589                          * copy small packets if mtu > 1500
1590                          */
1591                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1592                             (len <= RX_COPY_THRESH)) {
1593                                 struct sk_buff *new_skb;
1594
1595                                 new_skb = netdev_alloc_skb(bp->dev,
1596                                                            len + pad);
1597                                 if (new_skb == NULL) {
1598                                         DP(NETIF_MSG_RX_ERR,
1599                                            "ERROR  packet dropped "
1600                                            "because of alloc failure\n");
1601                                         fp->eth_q_stats.rx_skb_alloc_failed++;
1602                                         goto reuse_rx;
1603                                 }
1604
1605                                 /* aligned copy */
1606                                 skb_copy_from_linear_data_offset(skb, pad,
1607                                                     new_skb->data + pad, len);
1608                                 skb_reserve(new_skb, pad);
1609                                 skb_put(new_skb, len);
1610
1611                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1612
1613                                 skb = new_skb;
1614
1615                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1616                                 pci_unmap_single(bp->pdev,
1617                                         pci_unmap_addr(rx_buf, mapping),
1618                                                  bp->rx_buf_size,
1619                                                  PCI_DMA_FROMDEVICE);
1620                                 skb_reserve(skb, pad);
1621                                 skb_put(skb, len);
1622
1623                         } else {
1624                                 DP(NETIF_MSG_RX_ERR,
1625                                    "ERROR  packet dropped because "
1626                                    "of alloc failure\n");
1627                                 fp->eth_q_stats.rx_skb_alloc_failed++;
1628 reuse_rx:
1629                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1630                                 goto next_rx;
1631                         }
1632
1633                         skb->protocol = eth_type_trans(skb, bp->dev);
1634
1635                         skb->ip_summed = CHECKSUM_NONE;
1636                         if (bp->rx_csum) {
1637                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1638                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1639                                 else
1640                                         fp->eth_q_stats.hw_csum_err++;
1641                         }
1642                 }
1643
1644                 skb_record_rx_queue(skb, fp->index);
1645 #ifdef BCM_VLAN
1646                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1647                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1648                      PARSING_FLAGS_VLAN))
1649                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1650                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1651                 else
1652 #endif
1653                         netif_receive_skb(skb);
1654
1655
1656 next_rx:
1657                 rx_buf->skb = NULL;
1658
1659                 bd_cons = NEXT_RX_IDX(bd_cons);
1660                 bd_prod = NEXT_RX_IDX(bd_prod);
1661                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1662                 rx_pkt++;
1663 next_cqe:
1664                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1665                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1666
1667                 if (rx_pkt == budget)
1668                         break;
1669         } /* while */
1670
1671         fp->rx_bd_cons = bd_cons;
1672         fp->rx_bd_prod = bd_prod_fw;
1673         fp->rx_comp_cons = sw_comp_cons;
1674         fp->rx_comp_prod = sw_comp_prod;
1675
1676         /* Update producers */
1677         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1678                              fp->rx_sge_prod);
1679
1680         fp->rx_pkt += rx_pkt;
1681         fp->rx_calls++;
1682
1683         return rx_pkt;
1684 }
1685
1686 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1687 {
1688         struct bnx2x_fastpath *fp = fp_cookie;
1689         struct bnx2x *bp = fp->bp;
1690         int index = fp->index;
1691
1692         /* Return here if interrupt is disabled */
1693         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1694                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1695                 return IRQ_HANDLED;
1696         }
1697
1698         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1699            index, fp->sb_id);
1700         bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1701
1702 #ifdef BNX2X_STOP_ON_ERROR
1703         if (unlikely(bp->panic))
1704                 return IRQ_HANDLED;
1705 #endif
1706
1707         prefetch(fp->rx_cons_sb);
1708         prefetch(fp->tx_cons_sb);
1709         prefetch(&fp->status_blk->c_status_block.status_block_index);
1710         prefetch(&fp->status_blk->u_status_block.status_block_index);
1711
1712         napi_schedule(&bnx2x_fp(bp, index, napi));
1713
1714         return IRQ_HANDLED;
1715 }
1716
1717 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1718 {
1719         struct bnx2x *bp = netdev_priv(dev_instance);
1720         u16 status = bnx2x_ack_int(bp);
1721         u16 mask;
1722
1723         /* Return here if interrupt is shared and it's not for us */
1724         if (unlikely(status == 0)) {
1725                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1726                 return IRQ_NONE;
1727         }
1728         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1729
1730         /* Return here if interrupt is disabled */
1731         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1732                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1733                 return IRQ_HANDLED;
1734         }
1735
1736 #ifdef BNX2X_STOP_ON_ERROR
1737         if (unlikely(bp->panic))
1738                 return IRQ_HANDLED;
1739 #endif
1740
1741         mask = 0x2 << bp->fp[0].sb_id;
1742         if (status & mask) {
1743                 struct bnx2x_fastpath *fp = &bp->fp[0];
1744
1745                 prefetch(fp->rx_cons_sb);
1746                 prefetch(fp->tx_cons_sb);
1747                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1748                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1749
1750                 napi_schedule(&bnx2x_fp(bp, 0, napi));
1751
1752                 status &= ~mask;
1753         }
1754
1755
1756         if (unlikely(status & 0x1)) {
1757                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1758
1759                 status &= ~0x1;
1760                 if (!status)
1761                         return IRQ_HANDLED;
1762         }
1763
1764         if (status)
1765                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1766                    status);
1767
1768         return IRQ_HANDLED;
1769 }
1770
1771 /* end of fast path */
1772
1773 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1774
1775 /* Link */
1776
1777 /*
1778  * General service functions
1779  */
1780
1781 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1782 {
1783         u32 lock_status;
1784         u32 resource_bit = (1 << resource);
1785         int func = BP_FUNC(bp);
1786         u32 hw_lock_control_reg;
1787         int cnt;
1788
1789         /* Validating that the resource is within range */
1790         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1791                 DP(NETIF_MSG_HW,
1792                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1793                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1794                 return -EINVAL;
1795         }
1796
1797         if (func <= 5) {
1798                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1799         } else {
1800                 hw_lock_control_reg =
1801                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1802         }
1803
1804         /* Validating that the resource is not already taken */
1805         lock_status = REG_RD(bp, hw_lock_control_reg);
1806         if (lock_status & resource_bit) {
1807                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1808                    lock_status, resource_bit);
1809                 return -EEXIST;
1810         }
1811
1812         /* Try for 5 second every 5ms */
1813         for (cnt = 0; cnt < 1000; cnt++) {
1814                 /* Try to acquire the lock */
1815                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1816                 lock_status = REG_RD(bp, hw_lock_control_reg);
1817                 if (lock_status & resource_bit)
1818                         return 0;
1819
1820                 msleep(5);
1821         }
1822         DP(NETIF_MSG_HW, "Timeout\n");
1823         return -EAGAIN;
1824 }
1825
1826 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1827 {
1828         u32 lock_status;
1829         u32 resource_bit = (1 << resource);
1830         int func = BP_FUNC(bp);
1831         u32 hw_lock_control_reg;
1832
1833         /* Validating that the resource is within range */
1834         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1835                 DP(NETIF_MSG_HW,
1836                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1837                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1838                 return -EINVAL;
1839         }
1840
1841         if (func <= 5) {
1842                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1843         } else {
1844                 hw_lock_control_reg =
1845                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1846         }
1847
1848         /* Validating that the resource is currently taken */
1849         lock_status = REG_RD(bp, hw_lock_control_reg);
1850         if (!(lock_status & resource_bit)) {
1851                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1852                    lock_status, resource_bit);
1853                 return -EFAULT;
1854         }
1855
1856         REG_WR(bp, hw_lock_control_reg, resource_bit);
1857         return 0;
1858 }
1859
1860 /* HW Lock for shared dual port PHYs */
1861 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1862 {
1863         mutex_lock(&bp->port.phy_mutex);
1864
1865         if (bp->port.need_hw_lock)
1866                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1867 }
1868
1869 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1870 {
1871         if (bp->port.need_hw_lock)
1872                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1873
1874         mutex_unlock(&bp->port.phy_mutex);
1875 }
1876
1877 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1878 {
1879         /* The GPIO should be swapped if swap register is set and active */
1880         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1881                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1882         int gpio_shift = gpio_num +
1883                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1884         u32 gpio_mask = (1 << gpio_shift);
1885         u32 gpio_reg;
1886         int value;
1887
1888         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1889                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1890                 return -EINVAL;
1891         }
1892
1893         /* read GPIO value */
1894         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1895
1896         /* get the requested pin value */
1897         if ((gpio_reg & gpio_mask) == gpio_mask)
1898                 value = 1;
1899         else
1900                 value = 0;
1901
1902         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1903
1904         return value;
1905 }
1906
1907 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1908 {
1909         /* The GPIO should be swapped if swap register is set and active */
1910         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1911                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1912         int gpio_shift = gpio_num +
1913                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1914         u32 gpio_mask = (1 << gpio_shift);
1915         u32 gpio_reg;
1916
1917         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1918                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1919                 return -EINVAL;
1920         }
1921
1922         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1923         /* read GPIO and mask except the float bits */
1924         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1925
1926         switch (mode) {
1927         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1928                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1929                    gpio_num, gpio_shift);
1930                 /* clear FLOAT and set CLR */
1931                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1932                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1933                 break;
1934
1935         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1936                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1937                    gpio_num, gpio_shift);
1938                 /* clear FLOAT and set SET */
1939                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1940                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1941                 break;
1942
1943         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1944                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1945                    gpio_num, gpio_shift);
1946                 /* set FLOAT */
1947                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1948                 break;
1949
1950         default:
1951                 break;
1952         }
1953
1954         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1955         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1956
1957         return 0;
1958 }
1959
1960 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1961 {
1962         /* The GPIO should be swapped if swap register is set and active */
1963         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1964                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1965         int gpio_shift = gpio_num +
1966                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1967         u32 gpio_mask = (1 << gpio_shift);
1968         u32 gpio_reg;
1969
1970         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1971                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1972                 return -EINVAL;
1973         }
1974
1975         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1976         /* read GPIO int */
1977         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1978
1979         switch (mode) {
1980         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1981                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1982                                    "output low\n", gpio_num, gpio_shift);
1983                 /* clear SET and set CLR */
1984                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1985                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1986                 break;
1987
1988         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1989                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1990                                    "output high\n", gpio_num, gpio_shift);
1991                 /* clear CLR and set SET */
1992                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1993                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1994                 break;
1995
1996         default:
1997                 break;
1998         }
1999
2000         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2001         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2002
2003         return 0;
2004 }
2005
2006 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2007 {
2008         u32 spio_mask = (1 << spio_num);
2009         u32 spio_reg;
2010
2011         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2012             (spio_num > MISC_REGISTERS_SPIO_7)) {
2013                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2014                 return -EINVAL;
2015         }
2016
2017         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2018         /* read SPIO and mask except the float bits */
2019         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2020
2021         switch (mode) {
2022         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2023                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2024                 /* clear FLOAT and set CLR */
2025                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2026                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2027                 break;
2028
2029         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2030                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2031                 /* clear FLOAT and set SET */
2032                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2033                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2034                 break;
2035
2036         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2037                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2038                 /* set FLOAT */
2039                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2040                 break;
2041
2042         default:
2043                 break;
2044         }
2045
2046         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2047         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2048
2049         return 0;
2050 }
2051
2052 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2053 {
2054         switch (bp->link_vars.ieee_fc &
2055                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2056         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2057                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2058                                           ADVERTISED_Pause);
2059                 break;
2060
2061         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2062                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2063                                          ADVERTISED_Pause);
2064                 break;
2065
2066         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2067                 bp->port.advertising |= ADVERTISED_Asym_Pause;
2068                 break;
2069
2070         default:
2071                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2072                                           ADVERTISED_Pause);
2073                 break;
2074         }
2075 }
2076
2077 static void bnx2x_link_report(struct bnx2x *bp)
2078 {
2079         if (bp->link_vars.link_up) {
2080                 if (bp->state == BNX2X_STATE_OPEN)
2081                         netif_carrier_on(bp->dev);
2082                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2083
2084                 printk("%d Mbps ", bp->link_vars.line_speed);
2085
2086                 if (bp->link_vars.duplex == DUPLEX_FULL)
2087                         printk("full duplex");
2088                 else
2089                         printk("half duplex");
2090
2091                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2092                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2093                                 printk(", receive ");
2094                                 if (bp->link_vars.flow_ctrl &
2095                                     BNX2X_FLOW_CTRL_TX)
2096                                         printk("& transmit ");
2097                         } else {
2098                                 printk(", transmit ");
2099                         }
2100                         printk("flow control ON");
2101                 }
2102                 printk("\n");
2103
2104         } else { /* link_down */
2105                 netif_carrier_off(bp->dev);
2106                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2107         }
2108 }
2109
2110 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2111 {
2112         if (!BP_NOMCP(bp)) {
2113                 u8 rc;
2114
2115                 /* Initialize link parameters structure variables */
2116                 /* It is recommended to turn off RX FC for jumbo frames
2117                    for better performance */
2118                 if (IS_E1HMF(bp))
2119                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2120                 else if (bp->dev->mtu > 5000)
2121                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2122                 else
2123                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2124
2125                 bnx2x_acquire_phy_lock(bp);
2126
2127                 if (load_mode == LOAD_DIAG)
2128                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2129
2130                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2131
2132                 bnx2x_release_phy_lock(bp);
2133
2134                 bnx2x_calc_fc_adv(bp);
2135
2136                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2137                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2138                         bnx2x_link_report(bp);
2139                 }
2140
2141                 return rc;
2142         }
2143         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2144         return -EINVAL;
2145 }
2146
2147 static void bnx2x_link_set(struct bnx2x *bp)
2148 {
2149         if (!BP_NOMCP(bp)) {
2150                 bnx2x_acquire_phy_lock(bp);
2151                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2152                 bnx2x_release_phy_lock(bp);
2153
2154                 bnx2x_calc_fc_adv(bp);
2155         } else
2156                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2157 }
2158
2159 static void bnx2x__link_reset(struct bnx2x *bp)
2160 {
2161         if (!BP_NOMCP(bp)) {
2162                 bnx2x_acquire_phy_lock(bp);
2163                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2164                 bnx2x_release_phy_lock(bp);
2165         } else
2166                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2167 }
2168
2169 static u8 bnx2x_link_test(struct bnx2x *bp)
2170 {
2171         u8 rc;
2172
2173         bnx2x_acquire_phy_lock(bp);
2174         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2175         bnx2x_release_phy_lock(bp);
2176
2177         return rc;
2178 }
2179
2180 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2181 {
2182         u32 r_param = bp->link_vars.line_speed / 8;
2183         u32 fair_periodic_timeout_usec;
2184         u32 t_fair;
2185
2186         memset(&(bp->cmng.rs_vars), 0,
2187                sizeof(struct rate_shaping_vars_per_port));
2188         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2189
2190         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2191         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2192
2193         /* this is the threshold below which no timer arming will occur
2194            1.25 coefficient is for the threshold to be a little bigger
2195            than the real time, to compensate for timer in-accuracy */
2196         bp->cmng.rs_vars.rs_threshold =
2197                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2198
2199         /* resolution of fairness timer */
2200         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2201         /* for 10G it is 1000usec. for 1G it is 10000usec. */
2202         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2203
2204         /* this is the threshold below which we won't arm the timer anymore */
2205         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2206
2207         /* we multiply by 1e3/8 to get bytes/msec.
2208            We don't want the credits to pass a credit
2209            of the t_fair*FAIR_MEM (algorithm resolution) */
2210         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2211         /* since each tick is 4 usec */
2212         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2213 }
2214
2215 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2216 {
2217         struct rate_shaping_vars_per_vn m_rs_vn;
2218         struct fairness_vars_per_vn m_fair_vn;
2219         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2220         u16 vn_min_rate, vn_max_rate;
2221         int i;
2222
2223         /* If function is hidden - set min and max to zeroes */
2224         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2225                 vn_min_rate = 0;
2226                 vn_max_rate = 0;
2227
2228         } else {
2229                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2230                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2231                 /* If fairness is enabled (not all min rates are zeroes) and
2232                    if current min rate is zero - set it to 1.
2233                    This is a requirement of the algorithm. */
2234                 if (bp->vn_weight_sum && (vn_min_rate == 0))
2235                         vn_min_rate = DEF_MIN_RATE;
2236                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2237                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2238         }
2239
2240         DP(NETIF_MSG_IFUP,
2241            "func %d: vn_min_rate=%d  vn_max_rate=%d  vn_weight_sum=%d\n",
2242            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2243
2244         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2245         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2246
2247         /* global vn counter - maximal Mbps for this vn */
2248         m_rs_vn.vn_counter.rate = vn_max_rate;
2249
2250         /* quota - number of bytes transmitted in this period */
2251         m_rs_vn.vn_counter.quota =
2252                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2253
2254         if (bp->vn_weight_sum) {
2255                 /* credit for each period of the fairness algorithm:
2256                    number of bytes in T_FAIR (the vn share the port rate).
2257                    vn_weight_sum should not be larger than 10000, thus
2258                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2259                    than zero */
2260                 m_fair_vn.vn_credit_delta =
2261                         max((u32)(vn_min_rate * (T_FAIR_COEF /
2262                                                  (8 * bp->vn_weight_sum))),
2263                             (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2264                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2265                    m_fair_vn.vn_credit_delta);
2266         }
2267
2268         /* Store it to internal memory */
2269         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2270                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2271                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2272                        ((u32 *)(&m_rs_vn))[i]);
2273
2274         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2275                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2276                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2277                        ((u32 *)(&m_fair_vn))[i]);
2278 }
2279
2280
2281 /* This function is called upon link interrupt */
2282 static void bnx2x_link_attn(struct bnx2x *bp)
2283 {
2284         /* Make sure that we are synced with the current statistics */
2285         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2286
2287         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2288
2289         if (bp->link_vars.link_up) {
2290
2291                 /* dropless flow control */
2292                 if (CHIP_IS_E1H(bp)) {
2293                         int port = BP_PORT(bp);
2294                         u32 pause_enabled = 0;
2295
2296                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2297                                 pause_enabled = 1;
2298
2299                         REG_WR(bp, BAR_USTRORM_INTMEM +
2300                                USTORM_PAUSE_ENABLED_OFFSET(port),
2301                                pause_enabled);
2302                 }
2303
2304                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2305                         struct host_port_stats *pstats;
2306
2307                         pstats = bnx2x_sp(bp, port_stats);
2308                         /* reset old bmac stats */
2309                         memset(&(pstats->mac_stx[0]), 0,
2310                                sizeof(struct mac_stx));
2311                 }
2312                 if ((bp->state == BNX2X_STATE_OPEN) ||
2313                     (bp->state == BNX2X_STATE_DISABLED))
2314                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2315         }
2316
2317         /* indicate link status */
2318         bnx2x_link_report(bp);
2319
2320         if (IS_E1HMF(bp)) {
2321                 int port = BP_PORT(bp);
2322                 int func;
2323                 int vn;
2324
2325                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2326                         if (vn == BP_E1HVN(bp))
2327                                 continue;
2328
2329                         func = ((vn << 1) | port);
2330
2331                         /* Set the attention towards other drivers
2332                            on the same port */
2333                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2334                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2335                 }
2336
2337                 if (bp->link_vars.link_up) {
2338                         int i;
2339
2340                         /* Init rate shaping and fairness contexts */
2341                         bnx2x_init_port_minmax(bp);
2342
2343                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2344                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2345
2346                         /* Store it to internal memory */
2347                         for (i = 0;
2348                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
2349                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2350                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2351                                        ((u32 *)(&bp->cmng))[i]);
2352                 }
2353         }
2354 }
2355
2356 static void bnx2x__link_status_update(struct bnx2x *bp)
2357 {
2358         if (bp->state != BNX2X_STATE_OPEN)
2359                 return;
2360
2361         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2362
2363         if (bp->link_vars.link_up)
2364                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2365         else
2366                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2367
2368         /* indicate link status */
2369         bnx2x_link_report(bp);
2370 }
2371
2372 static void bnx2x_pmf_update(struct bnx2x *bp)
2373 {
2374         int port = BP_PORT(bp);
2375         u32 val;
2376
2377         bp->port.pmf = 1;
2378         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2379
2380         /* enable nig attention */
2381         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2382         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2383         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2384
2385         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2386 }
2387
2388 /* end of Link */
2389
2390 /* slow path */
2391
2392 /*
2393  * General service functions
2394  */
2395
2396 /* the slow path queue is odd since completions arrive on the fastpath ring */
2397 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2398                          u32 data_hi, u32 data_lo, int common)
2399 {
2400         int func = BP_FUNC(bp);
2401
2402         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2403            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2404            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2405            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2406            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2407
2408 #ifdef BNX2X_STOP_ON_ERROR
2409         if (unlikely(bp->panic))
2410                 return -EIO;
2411 #endif
2412
2413         spin_lock_bh(&bp->spq_lock);
2414
2415         if (!bp->spq_left) {
2416                 BNX2X_ERR("BUG! SPQ ring full!\n");
2417                 spin_unlock_bh(&bp->spq_lock);
2418                 bnx2x_panic();
2419                 return -EBUSY;
2420         }
2421
2422         /* CID needs port number to be encoded int it */
2423         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2424                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2425                                      HW_CID(bp, cid)));
2426         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2427         if (common)
2428                 bp->spq_prod_bd->hdr.type |=
2429                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2430
2431         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2432         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2433
2434         bp->spq_left--;
2435
2436         if (bp->spq_prod_bd == bp->spq_last_bd) {
2437                 bp->spq_prod_bd = bp->spq;
2438                 bp->spq_prod_idx = 0;
2439                 DP(NETIF_MSG_TIMER, "end of spq\n");
2440
2441         } else {
2442                 bp->spq_prod_bd++;
2443                 bp->spq_prod_idx++;
2444         }
2445
2446         /* Make sure that BD data is updated before writing the producer */
2447         wmb();
2448
2449         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2450                bp->spq_prod_idx);
2451
2452         mmiowb();
2453
2454         spin_unlock_bh(&bp->spq_lock);
2455         return 0;
2456 }
2457
2458 /* acquire split MCP access lock register */
2459 static int bnx2x_acquire_alr(struct bnx2x *bp)
2460 {
2461         u32 i, j, val;
2462         int rc = 0;
2463
2464         might_sleep();
2465         i = 100;
2466         for (j = 0; j < i*10; j++) {
2467                 val = (1UL << 31);
2468                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2469                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2470                 if (val & (1L << 31))
2471                         break;
2472
2473                 msleep(5);
2474         }
2475         if (!(val & (1L << 31))) {
2476                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2477                 rc = -EBUSY;
2478         }
2479
2480         return rc;
2481 }
2482
2483 /* release split MCP access lock register */
2484 static void bnx2x_release_alr(struct bnx2x *bp)
2485 {
2486         u32 val = 0;
2487
2488         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2489 }
2490
2491 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2492 {
2493         struct host_def_status_block *def_sb = bp->def_status_blk;
2494         u16 rc = 0;
2495
2496         barrier(); /* status block is written to by the chip */
2497         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2498                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2499                 rc |= 1;
2500         }
2501         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2502                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2503                 rc |= 2;
2504         }
2505         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2506                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2507                 rc |= 4;
2508         }
2509         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2510                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2511                 rc |= 8;
2512         }
2513         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2514                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2515                 rc |= 16;
2516         }
2517         return rc;
2518 }
2519
2520 /*
2521  * slow path service functions
2522  */
2523
2524 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2525 {
2526         int port = BP_PORT(bp);
2527         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2528                        COMMAND_REG_ATTN_BITS_SET);
2529         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2530                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2531         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2532                                        NIG_REG_MASK_INTERRUPT_PORT0;
2533         u32 aeu_mask;
2534         u32 nig_mask = 0;
2535
2536         if (bp->attn_state & asserted)
2537                 BNX2X_ERR("IGU ERROR\n");
2538
2539         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2540         aeu_mask = REG_RD(bp, aeu_addr);
2541
2542         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2543            aeu_mask, asserted);
2544         aeu_mask &= ~(asserted & 0xff);
2545         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2546
2547         REG_WR(bp, aeu_addr, aeu_mask);
2548         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2549
2550         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2551         bp->attn_state |= asserted;
2552         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2553
2554         if (asserted & ATTN_HARD_WIRED_MASK) {
2555                 if (asserted & ATTN_NIG_FOR_FUNC) {
2556
2557                         bnx2x_acquire_phy_lock(bp);
2558
2559                         /* save nig interrupt mask */
2560                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2561                         REG_WR(bp, nig_int_mask_addr, 0);
2562
2563                         bnx2x_link_attn(bp);
2564
2565                         /* handle unicore attn? */
2566                 }
2567                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2568                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2569
2570                 if (asserted & GPIO_2_FUNC)
2571                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2572
2573                 if (asserted & GPIO_3_FUNC)
2574                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2575
2576                 if (asserted & GPIO_4_FUNC)
2577                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2578
2579                 if (port == 0) {
2580                         if (asserted & ATTN_GENERAL_ATTN_1) {
2581                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2582                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2583                         }
2584                         if (asserted & ATTN_GENERAL_ATTN_2) {
2585                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2586                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2587                         }
2588                         if (asserted & ATTN_GENERAL_ATTN_3) {
2589                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2590                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2591                         }
2592                 } else {
2593                         if (asserted & ATTN_GENERAL_ATTN_4) {
2594                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2595                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2596                         }
2597                         if (asserted & ATTN_GENERAL_ATTN_5) {
2598                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2599                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2600                         }
2601                         if (asserted & ATTN_GENERAL_ATTN_6) {
2602                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2603                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2604                         }
2605                 }
2606
2607         } /* if hardwired */
2608
2609         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2610            asserted, hc_addr);
2611         REG_WR(bp, hc_addr, asserted);
2612
2613         /* now set back the mask */
2614         if (asserted & ATTN_NIG_FOR_FUNC) {
2615                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2616                 bnx2x_release_phy_lock(bp);
2617         }
2618 }
2619
2620 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2621 {
2622         int port = BP_PORT(bp);
2623
2624         /* mark the failure */
2625         bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2626         bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2627         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2628                  bp->link_params.ext_phy_config);
2629
2630         /* log the failure */
2631         printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2632                " the driver to shutdown the card to prevent permanent"
2633                " damage.  Please contact Dell Support for assistance\n",
2634                bp->dev->name);
2635 }
2636 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2637 {
2638         int port = BP_PORT(bp);
2639         int reg_offset;
2640         u32 val;
2641
2642         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2643                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2644
2645         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2646
2647                 val = REG_RD(bp, reg_offset);
2648                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2649                 REG_WR(bp, reg_offset, val);
2650
2651                 BNX2X_ERR("SPIO5 hw attention\n");
2652
2653                 /* Fan failure attention */
2654                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2655                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2656                         /* Low power mode is controlled by GPIO 2 */
2657                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2658                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2659                         /* The PHY reset is controlled by GPIO 1 */
2660                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2661                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2662                         break;
2663
2664                 default:
2665                         break;
2666                 }
2667                 bnx2x_fan_failure(bp);
2668         }
2669
2670         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2671                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2672                 bnx2x_acquire_phy_lock(bp);
2673                 bnx2x_handle_module_detect_int(&bp->link_params);
2674                 bnx2x_release_phy_lock(bp);
2675         }
2676
2677         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2678
2679                 val = REG_RD(bp, reg_offset);
2680                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2681                 REG_WR(bp, reg_offset, val);
2682
2683                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2684                           (attn & HW_INTERRUT_ASSERT_SET_0));
2685                 bnx2x_panic();
2686         }
2687 }
2688
2689 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2690 {
2691         u32 val;
2692
2693         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2694
2695                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2696                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2697                 /* DORQ discard attention */
2698                 if (val & 0x2)
2699                         BNX2X_ERR("FATAL error from DORQ\n");
2700         }
2701
2702         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2703
2704                 int port = BP_PORT(bp);
2705                 int reg_offset;
2706
2707                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2708                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2709
2710                 val = REG_RD(bp, reg_offset);
2711                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2712                 REG_WR(bp, reg_offset, val);
2713
2714                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2715                           (attn & HW_INTERRUT_ASSERT_SET_1));
2716                 bnx2x_panic();
2717         }
2718 }
2719
2720 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2721 {
2722         u32 val;
2723
2724         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2725
2726                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2727                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2728                 /* CFC error attention */
2729                 if (val & 0x2)
2730                         BNX2X_ERR("FATAL error from CFC\n");
2731         }
2732
2733         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2734
2735                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2736                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2737                 /* RQ_USDMDP_FIFO_OVERFLOW */
2738                 if (val & 0x18000)
2739                         BNX2X_ERR("FATAL error from PXP\n");
2740         }
2741
2742         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2743
2744                 int port = BP_PORT(bp);
2745                 int reg_offset;
2746
2747                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2748                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2749
2750                 val = REG_RD(bp, reg_offset);
2751                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2752                 REG_WR(bp, reg_offset, val);
2753
2754                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2755                           (attn & HW_INTERRUT_ASSERT_SET_2));
2756                 bnx2x_panic();
2757         }
2758 }
2759
2760 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2761 {
2762         u32 val;
2763
2764         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2765
2766                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2767                         int func = BP_FUNC(bp);
2768
2769                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2770                         bnx2x__link_status_update(bp);
2771                         if (SHMEM_RD(bp, func_mb[func].drv_status) &
2772                                                         DRV_STATUS_PMF)
2773                                 bnx2x_pmf_update(bp);
2774
2775                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2776
2777                         BNX2X_ERR("MC assert!\n");
2778                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2779                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2780                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2781                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2782                         bnx2x_panic();
2783
2784                 } else if (attn & BNX2X_MCP_ASSERT) {
2785
2786                         BNX2X_ERR("MCP assert!\n");
2787                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2788                         bnx2x_fw_dump(bp);
2789
2790                 } else
2791                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2792         }
2793
2794         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2795                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2796                 if (attn & BNX2X_GRC_TIMEOUT) {
2797                         val = CHIP_IS_E1H(bp) ?
2798                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2799                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2800                 }
2801                 if (attn & BNX2X_GRC_RSV) {
2802                         val = CHIP_IS_E1H(bp) ?
2803                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2804                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2805                 }
2806                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2807         }
2808 }
2809
2810 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2811 {
2812         struct attn_route attn;
2813         struct attn_route group_mask;
2814         int port = BP_PORT(bp);
2815         int index;
2816         u32 reg_addr;
2817         u32 val;
2818         u32 aeu_mask;
2819
2820         /* need to take HW lock because MCP or other port might also
2821            try to handle this event */
2822         bnx2x_acquire_alr(bp);
2823
2824         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2825         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2826         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2827         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2828         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2829            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2830
2831         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2832                 if (deasserted & (1 << index)) {
2833                         group_mask = bp->attn_group[index];
2834
2835                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2836                            index, group_mask.sig[0], group_mask.sig[1],
2837                            group_mask.sig[2], group_mask.sig[3]);
2838
2839                         bnx2x_attn_int_deasserted3(bp,
2840                                         attn.sig[3] & group_mask.sig[3]);
2841                         bnx2x_attn_int_deasserted1(bp,
2842                                         attn.sig[1] & group_mask.sig[1]);
2843                         bnx2x_attn_int_deasserted2(bp,
2844                                         attn.sig[2] & group_mask.sig[2]);
2845                         bnx2x_attn_int_deasserted0(bp,
2846                                         attn.sig[0] & group_mask.sig[0]);
2847
2848                         if ((attn.sig[0] & group_mask.sig[0] &
2849                                                 HW_PRTY_ASSERT_SET_0) ||
2850                             (attn.sig[1] & group_mask.sig[1] &
2851                                                 HW_PRTY_ASSERT_SET_1) ||
2852                             (attn.sig[2] & group_mask.sig[2] &
2853                                                 HW_PRTY_ASSERT_SET_2))
2854                                 BNX2X_ERR("FATAL HW block parity attention\n");
2855                 }
2856         }
2857
2858         bnx2x_release_alr(bp);
2859
2860         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2861
2862         val = ~deasserted;
2863         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2864            val, reg_addr);
2865         REG_WR(bp, reg_addr, val);
2866
2867         if (~bp->attn_state & deasserted)
2868                 BNX2X_ERR("IGU ERROR\n");
2869
2870         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2871                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2872
2873         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2874         aeu_mask = REG_RD(bp, reg_addr);
2875
2876         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2877            aeu_mask, deasserted);
2878         aeu_mask |= (deasserted & 0xff);
2879         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2880
2881         REG_WR(bp, reg_addr, aeu_mask);
2882         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2883
2884         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2885         bp->attn_state &= ~deasserted;
2886         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2887 }
2888
2889 static void bnx2x_attn_int(struct bnx2x *bp)
2890 {
2891         /* read local copy of bits */
2892         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2893                                                                 attn_bits);
2894         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2895                                                                 attn_bits_ack);
2896         u32 attn_state = bp->attn_state;
2897
2898         /* look for changed bits */
2899         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2900         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2901
2902         DP(NETIF_MSG_HW,
2903            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2904            attn_bits, attn_ack, asserted, deasserted);
2905
2906         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2907                 BNX2X_ERR("BAD attention state\n");
2908
2909         /* handle bits that were raised */
2910         if (asserted)
2911                 bnx2x_attn_int_asserted(bp, asserted);
2912
2913         if (deasserted)
2914                 bnx2x_attn_int_deasserted(bp, deasserted);
2915 }
2916
2917 static void bnx2x_sp_task(struct work_struct *work)
2918 {
2919         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2920         u16 status;
2921
2922
2923         /* Return here if interrupt is disabled */
2924         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2925                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2926                 return;
2927         }
2928
2929         status = bnx2x_update_dsb_idx(bp);
2930 /*      if (status == 0)                                     */
2931 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2932
2933         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2934
2935         /* HW attentions */
2936         if (status & 0x1)
2937                 bnx2x_attn_int(bp);
2938
2939         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2940                      IGU_INT_NOP, 1);
2941         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2942                      IGU_INT_NOP, 1);
2943         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2944                      IGU_INT_NOP, 1);
2945         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2946                      IGU_INT_NOP, 1);
2947         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2948                      IGU_INT_ENABLE, 1);
2949
2950 }
2951
2952 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2953 {
2954         struct net_device *dev = dev_instance;
2955         struct bnx2x *bp = netdev_priv(dev);
2956
2957         /* Return here if interrupt is disabled */
2958         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2959                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2960                 return IRQ_HANDLED;
2961         }
2962
2963         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2964
2965 #ifdef BNX2X_STOP_ON_ERROR
2966         if (unlikely(bp->panic))
2967                 return IRQ_HANDLED;
2968 #endif
2969
2970         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2971
2972         return IRQ_HANDLED;
2973 }
2974
2975 /* end of slow path */
2976
2977 /* Statistics */
2978
2979 /****************************************************************************
2980 * Macros
2981 ****************************************************************************/
2982
2983 /* sum[hi:lo] += add[hi:lo] */
2984 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2985         do { \
2986                 s_lo += a_lo; \
2987                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2988         } while (0)
2989
2990 /* difference = minuend - subtrahend */
2991 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2992         do { \
2993                 if (m_lo < s_lo) { \
2994                         /* underflow */ \
2995                         d_hi = m_hi - s_hi; \
2996                         if (d_hi > 0) { \
2997                                 /* we can 'loan' 1 */ \
2998                                 d_hi--; \
2999                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3000                         } else { \
3001                                 /* m_hi <= s_hi */ \
3002                                 d_hi = 0; \
3003                                 d_lo = 0; \
3004                         } \
3005                 } else { \
3006                         /* m_lo >= s_lo */ \
3007                         if (m_hi < s_hi) { \
3008                                 d_hi = 0; \
3009                                 d_lo = 0; \
3010                         } else { \
3011                                 /* m_hi >= s_hi */ \
3012                                 d_hi = m_hi - s_hi; \
3013                                 d_lo = m_lo - s_lo; \
3014                         } \
3015                 } \
3016         } while (0)
3017
3018 #define UPDATE_STAT64(s, t) \
3019         do { \
3020                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3021                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3022                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3023                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3024                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3025                        pstats->mac_stx[1].t##_lo, diff.lo); \
3026         } while (0)
3027
3028 #define UPDATE_STAT64_NIG(s, t) \
3029         do { \
3030                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3031                         diff.lo, new->s##_lo, old->s##_lo); \
3032                 ADD_64(estats->t##_hi, diff.hi, \
3033                        estats->t##_lo, diff.lo); \
3034         } while (0)
3035
3036 /* sum[hi:lo] += add */
3037 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3038         do { \
3039                 s_lo += a; \
3040                 s_hi += (s_lo < a) ? 1 : 0; \
3041         } while (0)
3042
3043 #define UPDATE_EXTEND_STAT(s) \
3044         do { \
3045                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3046                               pstats->mac_stx[1].s##_lo, \
3047                               new->s); \
3048         } while (0)
3049
3050 #define UPDATE_EXTEND_TSTAT(s, t) \
3051         do { \
3052                 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3053                 old_tclient->s = tclient->s; \
3054                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3055         } while (0)
3056
3057 #define UPDATE_EXTEND_USTAT(s, t) \
3058         do { \
3059                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3060                 old_uclient->s = uclient->s; \
3061                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3062         } while (0)
3063
3064 #define UPDATE_EXTEND_XSTAT(s, t) \
3065         do { \
3066                 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3067                 old_xclient->s = xclient->s; \
3068                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3069         } while (0)
3070
3071 /* minuend -= subtrahend */
3072 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3073         do { \
3074                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3075         } while (0)
3076
3077 /* minuend[hi:lo] -= subtrahend */
3078 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3079         do { \
3080                 SUB_64(m_hi, 0, m_lo, s); \
3081         } while (0)
3082
3083 #define SUB_EXTEND_USTAT(s, t) \
3084         do { \
3085                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3086                 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3087         } while (0)
3088
3089 /*
3090  * General service functions
3091  */
3092
3093 static inline long bnx2x_hilo(u32 *hiref)
3094 {
3095         u32 lo = *(hiref + 1);
3096 #if (BITS_PER_LONG == 64)
3097         u32 hi = *hiref;
3098
3099         return HILO_U64(hi, lo);
3100 #else
3101         return lo;
3102 #endif
3103 }
3104
3105 /*
3106  * Init service functions
3107  */
3108
3109 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3110 {
3111         if (!bp->stats_pending) {
3112                 struct eth_query_ramrod_data ramrod_data = {0};
3113                 int i, rc;
3114
3115                 ramrod_data.drv_counter = bp->stats_counter++;
3116                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3117                 for_each_queue(bp, i)
3118                         ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3119
3120                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3121                                    ((u32 *)&ramrod_data)[1],
3122                                    ((u32 *)&ramrod_data)[0], 0);
3123                 if (rc == 0) {
3124                         /* stats ramrod has it's own slot on the spq */
3125                         bp->spq_left++;
3126                         bp->stats_pending = 1;
3127                 }
3128         }
3129 }
3130
3131 static void bnx2x_stats_init(struct bnx2x *bp)
3132 {
3133         int port = BP_PORT(bp);
3134         int i;
3135
3136         bp->stats_pending = 0;
3137         bp->executer_idx = 0;
3138         bp->stats_counter = 0;
3139
3140         /* port stats */
3141         if (!BP_NOMCP(bp))
3142                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3143         else
3144                 bp->port.port_stx = 0;
3145         DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3146
3147         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3148         bp->port.old_nig_stats.brb_discard =
3149                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3150         bp->port.old_nig_stats.brb_truncate =
3151                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3152         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3153                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3154         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3155                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3156
3157         /* function stats */
3158         for_each_queue(bp, i) {
3159                 struct bnx2x_fastpath *fp = &bp->fp[i];
3160
3161                 memset(&fp->old_tclient, 0,
3162                        sizeof(struct tstorm_per_client_stats));
3163                 memset(&fp->old_uclient, 0,
3164                        sizeof(struct ustorm_per_client_stats));
3165                 memset(&fp->old_xclient, 0,
3166                        sizeof(struct xstorm_per_client_stats));
3167                 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3168         }
3169
3170         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3171         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3172
3173         bp->stats_state = STATS_STATE_DISABLED;
3174         if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3175                 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3176 }
3177
3178 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3179 {
3180         struct dmae_command *dmae = &bp->stats_dmae;
3181         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3182
3183         *stats_comp = DMAE_COMP_VAL;
3184         if (CHIP_REV_IS_SLOW(bp))
3185                 return;
3186
3187         /* loader */
3188         if (bp->executer_idx) {
3189                 int loader_idx = PMF_DMAE_C(bp);
3190
3191                 memset(dmae, 0, sizeof(struct dmae_command));
3192
3193                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3194                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3195                                 DMAE_CMD_DST_RESET |
3196 #ifdef __BIG_ENDIAN
3197                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3198 #else
3199                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3200 #endif
3201                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3202                                                DMAE_CMD_PORT_0) |
3203                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3204                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3205                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3206                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3207                                      sizeof(struct dmae_command) *
3208                                      (loader_idx + 1)) >> 2;
3209                 dmae->dst_addr_hi = 0;
3210                 dmae->len = sizeof(struct dmae_command) >> 2;
3211                 if (CHIP_IS_E1(bp))
3212                         dmae->len--;
3213                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3214                 dmae->comp_addr_hi = 0;
3215                 dmae->comp_val = 1;
3216
3217                 *stats_comp = 0;
3218                 bnx2x_post_dmae(bp, dmae, loader_idx);
3219
3220         } else if (bp->func_stx) {
3221                 *stats_comp = 0;
3222                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3223         }
3224 }
3225
3226 static int bnx2x_stats_comp(struct bnx2x *bp)
3227 {
3228         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3229         int cnt = 10;
3230
3231         might_sleep();
3232         while (*stats_comp != DMAE_COMP_VAL) {
3233                 if (!cnt) {
3234                         BNX2X_ERR("timeout waiting for stats finished\n");
3235                         break;
3236                 }
3237                 cnt--;
3238                 msleep(1);
3239         }
3240         return 1;
3241 }
3242
3243 /*
3244  * Statistics service functions
3245  */
3246
3247 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3248 {
3249         struct dmae_command *dmae;
3250         u32 opcode;
3251         int loader_idx = PMF_DMAE_C(bp);
3252         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3253
3254         /* sanity */
3255         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3256                 BNX2X_ERR("BUG!\n");
3257                 return;
3258         }
3259
3260         bp->executer_idx = 0;
3261
3262         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3263                   DMAE_CMD_C_ENABLE |
3264                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3265 #ifdef __BIG_ENDIAN
3266                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3267 #else
3268                   DMAE_CMD_ENDIANITY_DW_SWAP |
3269 #endif
3270                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3271                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3272
3273         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3274         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3275         dmae->src_addr_lo = bp->port.port_stx >> 2;
3276         dmae->src_addr_hi = 0;
3277         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3278         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3279         dmae->len = DMAE_LEN32_RD_MAX;
3280         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3281         dmae->comp_addr_hi = 0;
3282         dmae->comp_val = 1;
3283
3284         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3285         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3286         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3287         dmae->src_addr_hi = 0;
3288         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3289                                    DMAE_LEN32_RD_MAX * 4);
3290         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3291                                    DMAE_LEN32_RD_MAX * 4);
3292         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3293         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3294         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3295         dmae->comp_val = DMAE_COMP_VAL;
3296
3297         *stats_comp = 0;
3298         bnx2x_hw_stats_post(bp);
3299         bnx2x_stats_comp(bp);
3300 }
3301
3302 static void bnx2x_port_stats_init(struct bnx2x *bp)
3303 {
3304         struct dmae_command *dmae;
3305         int port = BP_PORT(bp);
3306         int vn = BP_E1HVN(bp);
3307         u32 opcode;
3308         int loader_idx = PMF_DMAE_C(bp);
3309         u32 mac_addr;
3310         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3311
3312         /* sanity */
3313         if (!bp->link_vars.link_up || !bp->port.pmf) {
3314                 BNX2X_ERR("BUG!\n");
3315                 return;
3316         }
3317
3318         bp->executer_idx = 0;
3319
3320         /* MCP */
3321         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3322                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3323                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3324 #ifdef __BIG_ENDIAN
3325                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3326 #else
3327                   DMAE_CMD_ENDIANITY_DW_SWAP |
3328 #endif
3329                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3330                   (vn << DMAE_CMD_E1HVN_SHIFT));
3331
3332         if (bp->port.port_stx) {
3333
3334                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3335                 dmae->opcode = opcode;
3336                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3337                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3338                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3339                 dmae->dst_addr_hi = 0;
3340                 dmae->len = sizeof(struct host_port_stats) >> 2;
3341                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3342                 dmae->comp_addr_hi = 0;
3343                 dmae->comp_val = 1;
3344         }
3345
3346         if (bp->func_stx) {
3347
3348                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3349                 dmae->opcode = opcode;
3350                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3351                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3352                 dmae->dst_addr_lo = bp->func_stx >> 2;
3353                 dmae->dst_addr_hi = 0;
3354                 dmae->len = sizeof(struct host_func_stats) >> 2;
3355                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3356                 dmae->comp_addr_hi = 0;
3357                 dmae->comp_val = 1;
3358         }
3359
3360         /* MAC */
3361         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3362                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3363                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3364 #ifdef __BIG_ENDIAN
3365                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3366 #else
3367                   DMAE_CMD_ENDIANITY_DW_SWAP |
3368 #endif
3369                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3370                   (vn << DMAE_CMD_E1HVN_SHIFT));
3371
3372         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3373
3374                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3375                                    NIG_REG_INGRESS_BMAC0_MEM);
3376
3377                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3378                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3379                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3380                 dmae->opcode = opcode;
3381                 dmae->src_addr_lo = (mac_addr +
3382                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3383                 dmae->src_addr_hi = 0;
3384                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3385                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3386                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3387                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3388                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3389                 dmae->comp_addr_hi = 0;
3390                 dmae->comp_val = 1;
3391
3392                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3393                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3394                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3395                 dmae->opcode = opcode;
3396                 dmae->src_addr_lo = (mac_addr +
3397                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3398                 dmae->src_addr_hi = 0;
3399                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3400                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3401                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3402                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3403                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3404                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3405                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3406                 dmae->comp_addr_hi = 0;
3407                 dmae->comp_val = 1;
3408
3409         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3410
3411                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3412
3413                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3414                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3415                 dmae->opcode = opcode;
3416                 dmae->src_addr_lo = (mac_addr +
3417                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3418                 dmae->src_addr_hi = 0;
3419                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3420                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3421                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3422                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3423                 dmae->comp_addr_hi = 0;
3424                 dmae->comp_val = 1;
3425
3426                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3427                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3428                 dmae->opcode = opcode;
3429                 dmae->src_addr_lo = (mac_addr +
3430                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3431                 dmae->src_addr_hi = 0;
3432                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3433                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3434                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3435                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3436                 dmae->len = 1;
3437                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3438                 dmae->comp_addr_hi = 0;
3439                 dmae->comp_val = 1;
3440
3441                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3442                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3443                 dmae->opcode = opcode;
3444                 dmae->src_addr_lo = (mac_addr +
3445                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3446                 dmae->src_addr_hi = 0;
3447                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3448                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3449                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3450                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3451                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3452                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3453                 dmae->comp_addr_hi = 0;
3454                 dmae->comp_val = 1;
3455         }
3456
3457         /* NIG */
3458         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3459         dmae->opcode = opcode;
3460         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3461                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3462         dmae->src_addr_hi = 0;
3463         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3464         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3465         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3466         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3467         dmae->comp_addr_hi = 0;
3468         dmae->comp_val = 1;
3469
3470         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3471         dmae->opcode = opcode;
3472         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3473                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3474         dmae->src_addr_hi = 0;
3475         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3476                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3477         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3478                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3479         dmae->len = (2*sizeof(u32)) >> 2;
3480         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3481         dmae->comp_addr_hi = 0;
3482         dmae->comp_val = 1;
3483
3484         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3485         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3486                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3487                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3488 #ifdef __BIG_ENDIAN
3489                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3490 #else
3491                         DMAE_CMD_ENDIANITY_DW_SWAP |
3492 #endif
3493                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3494                         (vn << DMAE_CMD_E1HVN_SHIFT));
3495         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3496                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3497         dmae->src_addr_hi = 0;
3498         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3499                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3500         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3501                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3502         dmae->len = (2*sizeof(u32)) >> 2;
3503         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3504         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3505         dmae->comp_val = DMAE_COMP_VAL;
3506
3507         *stats_comp = 0;
3508 }
3509
3510 static void bnx2x_func_stats_init(struct bnx2x *bp)
3511 {
3512         struct dmae_command *dmae = &bp->stats_dmae;
3513         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3514
3515         /* sanity */
3516         if (!bp->func_stx) {
3517                 BNX2X_ERR("BUG!\n");
3518                 return;
3519         }
3520
3521         bp->executer_idx = 0;
3522         memset(dmae, 0, sizeof(struct dmae_command));
3523
3524         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3525                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3526                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3527 #ifdef __BIG_ENDIAN
3528                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3529 #else
3530                         DMAE_CMD_ENDIANITY_DW_SWAP |
3531 #endif
3532                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3533                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3534         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3535         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3536         dmae->dst_addr_lo = bp->func_stx >> 2;
3537         dmae->dst_addr_hi = 0;
3538         dmae->len = sizeof(struct host_func_stats) >> 2;
3539         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3540         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3541         dmae->comp_val = DMAE_COMP_VAL;
3542
3543         *stats_comp = 0;
3544 }
3545
3546 static void bnx2x_stats_start(struct bnx2x *bp)
3547 {
3548         if (bp->port.pmf)
3549                 bnx2x_port_stats_init(bp);
3550
3551         else if (bp->func_stx)
3552                 bnx2x_func_stats_init(bp);
3553
3554         bnx2x_hw_stats_post(bp);
3555         bnx2x_storm_stats_post(bp);
3556 }
3557
3558 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3559 {
3560         bnx2x_stats_comp(bp);
3561         bnx2x_stats_pmf_update(bp);
3562         bnx2x_stats_start(bp);
3563 }
3564
3565 static void bnx2x_stats_restart(struct bnx2x *bp)
3566 {
3567         bnx2x_stats_comp(bp);
3568         bnx2x_stats_start(bp);
3569 }
3570
3571 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3572 {
3573         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3574         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3575         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3576         struct {
3577                 u32 lo;
3578                 u32 hi;
3579         } diff;
3580
3581         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3582         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3583         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3584         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3585         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3586         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3587         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3588         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3589         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3590         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3591         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3592         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3593         UPDATE_STAT64(tx_stat_gt127,
3594                                 tx_stat_etherstatspkts65octetsto127octets);
3595         UPDATE_STAT64(tx_stat_gt255,
3596                                 tx_stat_etherstatspkts128octetsto255octets);
3597         UPDATE_STAT64(tx_stat_gt511,
3598                                 tx_stat_etherstatspkts256octetsto511octets);
3599         UPDATE_STAT64(tx_stat_gt1023,
3600                                 tx_stat_etherstatspkts512octetsto1023octets);
3601         UPDATE_STAT64(tx_stat_gt1518,
3602                                 tx_stat_etherstatspkts1024octetsto1522octets);
3603         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3604         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3605         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3606         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3607         UPDATE_STAT64(tx_stat_gterr,
3608                                 tx_stat_dot3statsinternalmactransmiterrors);
3609         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3610
3611         estats->pause_frames_received_hi =
3612                                 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3613         estats->pause_frames_received_lo =
3614                                 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3615
3616         estats->pause_frames_sent_hi =
3617                                 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3618         estats->pause_frames_sent_lo =
3619                                 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3620 }
3621
3622 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3623 {
3624         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3625         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3626         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3627
3628         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3629         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3630         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3631         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3632         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3633         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3634         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3635         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3636         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3637         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3638         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3639         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3640         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3641         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3642         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3643         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3644         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3645         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3646         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3647         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3648         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3649         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3650         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3651         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3652         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3653         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3654         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3655         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3656         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3657         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3658         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3659
3660         estats->pause_frames_received_hi =
3661                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3662         estats->pause_frames_received_lo =
3663                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3664         ADD_64(estats->pause_frames_received_hi,
3665                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3666                estats->pause_frames_received_lo,
3667                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3668
3669         estats->pause_frames_sent_hi =
3670                         pstats->mac_stx[1].tx_stat_outxonsent_hi;
3671         estats->pause_frames_sent_lo =
3672                         pstats->mac_stx[1].tx_stat_outxonsent_lo;
3673         ADD_64(estats->pause_frames_sent_hi,
3674                pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3675                estats->pause_frames_sent_lo,
3676                pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3677 }
3678
3679 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3680 {
3681         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3682         struct nig_stats *old = &(bp->port.old_nig_stats);
3683         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3684         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3685         struct {
3686                 u32 lo;
3687                 u32 hi;
3688         } diff;
3689         u32 nig_timer_max;
3690
3691         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3692                 bnx2x_bmac_stats_update(bp);
3693
3694         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3695                 bnx2x_emac_stats_update(bp);
3696
3697         else { /* unreached */
3698                 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3699                 return -1;
3700         }
3701
3702         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3703                       new->brb_discard - old->brb_discard);
3704         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3705                       new->brb_truncate - old->brb_truncate);
3706
3707         UPDATE_STAT64_NIG(egress_mac_pkt0,
3708                                         etherstatspkts1024octetsto1522octets);
3709         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3710
3711         memcpy(old, new, sizeof(struct nig_stats));
3712
3713         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3714                sizeof(struct mac_stx));
3715         estats->brb_drop_hi = pstats->brb_drop_hi;
3716         estats->brb_drop_lo = pstats->brb_drop_lo;
3717
3718         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3719
3720         nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3721         if (nig_timer_max != estats->nig_timer_max) {
3722                 estats->nig_timer_max = nig_timer_max;
3723                 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3724         }
3725
3726         return 0;
3727 }
3728
3729 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3730 {
3731         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3732         struct tstorm_per_port_stats *tport =
3733                                         &stats->tstorm_common.port_statistics;
3734         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3735         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3736         int i;
3737
3738         memset(&(fstats->total_bytes_received_hi), 0,
3739                sizeof(struct host_func_stats) - 2*sizeof(u32));
3740         estats->error_bytes_received_hi = 0;
3741         estats->error_bytes_received_lo = 0;
3742         estats->etherstatsoverrsizepkts_hi = 0;
3743         estats->etherstatsoverrsizepkts_lo = 0;
3744         estats->no_buff_discard_hi = 0;
3745         estats->no_buff_discard_lo = 0;
3746
3747         for_each_queue(bp, i) {
3748                 struct bnx2x_fastpath *fp = &bp->fp[i];
3749                 int cl_id = fp->cl_id;
3750                 struct tstorm_per_client_stats *tclient =
3751                                 &stats->tstorm_common.client_statistics[cl_id];
3752                 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3753                 struct ustorm_per_client_stats *uclient =
3754                                 &stats->ustorm_common.client_statistics[cl_id];
3755                 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3756                 struct xstorm_per_client_stats *xclient =
3757                                 &stats->xstorm_common.client_statistics[cl_id];
3758                 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3759                 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3760                 u32 diff;
3761
3762                 /* are storm stats valid? */
3763                 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3764                                                         bp->stats_counter) {
3765                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3766                            "  xstorm counter (%d) != stats_counter (%d)\n",
3767                            i, xclient->stats_counter, bp->stats_counter);
3768                         return -1;
3769                 }
3770                 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3771                                                         bp->stats_counter) {
3772                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3773                            "  tstorm counter (%d) != stats_counter (%d)\n",
3774                            i, tclient->stats_counter, bp->stats_counter);
3775                         return -2;
3776                 }
3777                 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3778                                                         bp->stats_counter) {
3779                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3780                            "  ustorm counter (%d) != stats_counter (%d)\n",
3781                            i, uclient->stats_counter, bp->stats_counter);
3782                         return -4;
3783                 }
3784
3785                 qstats->total_bytes_received_hi =
3786                 qstats->valid_bytes_received_hi =
3787                                 le32_to_cpu(tclient->total_rcv_bytes.hi);
3788                 qstats->total_bytes_received_lo =
3789                 qstats->valid_bytes_received_lo =
3790                                 le32_to_cpu(tclient->total_rcv_bytes.lo);
3791
3792                 qstats->error_bytes_received_hi =
3793                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
3794                 qstats->error_bytes_received_lo =
3795                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
3796
3797                 ADD_64(qstats->total_bytes_received_hi,
3798                        qstats->error_bytes_received_hi,
3799                        qstats->total_bytes_received_lo,
3800                        qstats->error_bytes_received_lo);
3801
3802                 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3803                                         total_unicast_packets_received);
3804                 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3805                                         total_multicast_packets_received);
3806                 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3807                                         total_broadcast_packets_received);
3808                 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3809                                         etherstatsoverrsizepkts);
3810                 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3811
3812                 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3813                                         total_unicast_packets_received);
3814                 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3815                                         total_multicast_packets_received);
3816                 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3817                                         total_broadcast_packets_received);
3818                 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3819                 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3820                 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3821
3822                 qstats->total_bytes_transmitted_hi =
3823                                 le32_to_cpu(xclient->total_sent_bytes.hi);
3824                 qstats->total_bytes_transmitted_lo =
3825                                 le32_to_cpu(xclient->total_sent_bytes.lo);
3826
3827                 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3828                                         total_unicast_packets_transmitted);
3829                 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3830                                         total_multicast_packets_transmitted);
3831                 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3832                                         total_broadcast_packets_transmitted);
3833
3834                 old_tclient->checksum_discard = tclient->checksum_discard;
3835                 old_tclient->ttl0_discard = tclient->ttl0_discard;
3836
3837                 ADD_64(fstats->total_bytes_received_hi,
3838                        qstats->total_bytes_received_hi,
3839                        fstats->total_bytes_received_lo,
3840                        qstats->total_bytes_received_lo);
3841                 ADD_64(fstats->total_bytes_transmitted_hi,
3842                        qstats->total_bytes_transmitted_hi,
3843                        fstats->total_bytes_transmitted_lo,
3844                        qstats->total_bytes_transmitted_lo);
3845                 ADD_64(fstats->total_unicast_packets_received_hi,
3846                        qstats->total_unicast_packets_received_hi,
3847                        fstats->total_unicast_packets_received_lo,
3848                        qstats->total_unicast_packets_received_lo);
3849                 ADD_64(fstats->total_multicast_packets_received_hi,
3850                        qstats->total_multicast_packets_received_hi,
3851                        fstats->total_multicast_packets_received_lo,
3852                        qstats->total_multicast_packets_received_lo);
3853                 ADD_64(fstats->total_broadcast_packets_received_hi,
3854                        qstats->total_broadcast_packets_received_hi,
3855                        fstats->total_broadcast_packets_received_lo,
3856                        qstats->total_broadcast_packets_received_lo);
3857                 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3858                        qstats->total_unicast_packets_transmitted_hi,
3859                        fstats->total_unicast_packets_transmitted_lo,
3860                        qstats->total_unicast_packets_transmitted_lo);
3861                 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3862                        qstats->total_multicast_packets_transmitted_hi,
3863                        fstats->total_multicast_packets_transmitted_lo,
3864                        qstats->total_multicast_packets_transmitted_lo);
3865                 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3866                        qstats->total_broadcast_packets_transmitted_hi,
3867                        fstats->total_broadcast_packets_transmitted_lo,
3868                        qstats->total_broadcast_packets_transmitted_lo);
3869                 ADD_64(fstats->valid_bytes_received_hi,
3870                        qstats->valid_bytes_received_hi,
3871                        fstats->valid_bytes_received_lo,
3872                        qstats->valid_bytes_received_lo);
3873
3874                 ADD_64(estats->error_bytes_received_hi,
3875                        qstats->error_bytes_received_hi,
3876                        estats->error_bytes_received_lo,
3877                        qstats->error_bytes_received_lo);
3878                 ADD_64(estats->etherstatsoverrsizepkts_hi,
3879                        qstats->etherstatsoverrsizepkts_hi,
3880                        estats->etherstatsoverrsizepkts_lo,
3881                        qstats->etherstatsoverrsizepkts_lo);
3882                 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3883                        estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3884         }
3885
3886         ADD_64(fstats->total_bytes_received_hi,
3887                estats->rx_stat_ifhcinbadoctets_hi,
3888                fstats->total_bytes_received_lo,
3889                estats->rx_stat_ifhcinbadoctets_lo);
3890
3891         memcpy(estats, &(fstats->total_bytes_received_hi),
3892                sizeof(struct host_func_stats) - 2*sizeof(u32));
3893
3894         ADD_64(estats->etherstatsoverrsizepkts_hi,
3895                estats->rx_stat_dot3statsframestoolong_hi,
3896                estats->etherstatsoverrsizepkts_lo,
3897                estats->rx_stat_dot3statsframestoolong_lo);
3898         ADD_64(estats->error_bytes_received_hi,
3899                estats->rx_stat_ifhcinbadoctets_hi,
3900                estats->error_bytes_received_lo,
3901                estats->rx_stat_ifhcinbadoctets_lo);
3902
3903         if (bp->port.pmf) {
3904                 estats->mac_filter_discard =
3905                                 le32_to_cpu(tport->mac_filter_discard);
3906                 estats->xxoverflow_discard =
3907                                 le32_to_cpu(tport->xxoverflow_discard);
3908                 estats->brb_truncate_discard =
3909                                 le32_to_cpu(tport->brb_truncate_discard);
3910                 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3911         }
3912
3913         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3914
3915         bp->stats_pending = 0;
3916
3917         return 0;
3918 }
3919
3920 static void bnx2x_net_stats_update(struct bnx2x *bp)
3921 {
3922         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3923         struct net_device_stats *nstats = &bp->dev->stats;
3924         int i;
3925
3926         nstats->rx_packets =
3927                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3928                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3929                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3930
3931         nstats->tx_packets =
3932                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3933                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3934                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3935
3936         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
3937
3938         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3939
3940         nstats->rx_dropped = estats->mac_discard;
3941         for_each_queue(bp, i)
3942                 nstats->rx_dropped +=
3943                         le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3944
3945         nstats->tx_dropped = 0;
3946
3947         nstats->multicast =
3948                 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
3949
3950         nstats->collisions =
3951                 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
3952
3953         nstats->rx_length_errors =
3954                 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3955                 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3956         nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3957                                  bnx2x_hilo(&estats->brb_truncate_hi);
3958         nstats->rx_crc_errors =
3959                 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3960         nstats->rx_frame_errors =
3961                 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3962         nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
3963         nstats->rx_missed_errors = estats->xxoverflow_discard;
3964
3965         nstats->rx_errors = nstats->rx_length_errors +
3966                             nstats->rx_over_errors +
3967                             nstats->rx_crc_errors +
3968                             nstats->rx_frame_errors +
3969                             nstats->rx_fifo_errors +
3970                             nstats->rx_missed_errors;
3971
3972         nstats->tx_aborted_errors =
3973                 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3974                 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3975         nstats->tx_carrier_errors =
3976                 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
3977         nstats->tx_fifo_errors = 0;
3978         nstats->tx_heartbeat_errors = 0;
3979         nstats->tx_window_errors = 0;
3980
3981         nstats->tx_errors = nstats->tx_aborted_errors +
3982                             nstats->tx_carrier_errors +
3983             bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3984 }
3985
3986 static void bnx2x_drv_stats_update(struct bnx2x *bp)
3987 {
3988         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3989         int i;
3990
3991         estats->driver_xoff = 0;
3992         estats->rx_err_discard_pkt = 0;
3993         estats->rx_skb_alloc_failed = 0;
3994         estats->hw_csum_err = 0;
3995         for_each_queue(bp, i) {
3996                 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3997
3998                 estats->driver_xoff += qstats->driver_xoff;
3999                 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4000                 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4001                 estats->hw_csum_err += qstats->hw_csum_err;
4002         }
4003 }
4004
4005 static void bnx2x_stats_update(struct bnx2x *bp)
4006 {
4007         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4008
4009         if (*stats_comp != DMAE_COMP_VAL)
4010                 return;
4011
4012         if (bp->port.pmf)
4013                 bnx2x_hw_stats_update(bp);
4014
4015         if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4016                 BNX2X_ERR("storm stats were not updated for 3 times\n");
4017                 bnx2x_panic();
4018                 return;
4019         }
4020
4021         bnx2x_net_stats_update(bp);
4022         bnx2x_drv_stats_update(bp);
4023
4024         if (bp->msglevel & NETIF_MSG_TIMER) {
4025                 struct tstorm_per_client_stats *old_tclient =
4026                                                         &bp->fp->old_tclient;
4027                 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4028                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4029                 struct net_device_stats *nstats = &bp->dev->stats;
4030                 int i;
4031
4032                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4033                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
4034                                   "  tx pkt (%lx)\n",
4035                        bnx2x_tx_avail(bp->fp),
4036                        le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
4037                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
4038                                   "  rx pkt (%lx)\n",
4039                        (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
4040                              bp->fp->rx_comp_cons),
4041                        le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
4042                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u  "
4043                                   "brb truncate %u\n",
4044                        (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4045                        qstats->driver_xoff,
4046                        estats->brb_drop_lo, estats->brb_truncate_lo);
4047                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
4048                         "packets_too_big_discard %lu  no_buff_discard %lu  "
4049                         "mac_discard %u  mac_filter_discard %u  "
4050                         "xxovrflow_discard %u  brb_truncate_discard %u  "
4051                         "ttl0_discard %u\n",
4052                        le32_to_cpu(old_tclient->checksum_discard),
4053                        bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4054                        bnx2x_hilo(&qstats->no_buff_discard_hi),
4055                        estats->mac_discard, estats->mac_filter_discard,
4056                        estats->xxoverflow_discard, estats->brb_truncate_discard,
4057                        le32_to_cpu(old_tclient->ttl0_discard));
4058
4059                 for_each_queue(bp, i) {
4060                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4061                                bnx2x_fp(bp, i, tx_pkt),
4062                                bnx2x_fp(bp, i, rx_pkt),
4063                                bnx2x_fp(bp, i, rx_calls));
4064                 }
4065         }
4066
4067         bnx2x_hw_stats_post(bp);
4068         bnx2x_storm_stats_post(bp);
4069 }
4070
4071 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4072 {
4073         struct dmae_command *dmae;
4074         u32 opcode;
4075         int loader_idx = PMF_DMAE_C(bp);
4076         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4077
4078         bp->executer_idx = 0;
4079
4080         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4081                   DMAE_CMD_C_ENABLE |
4082                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4083 #ifdef __BIG_ENDIAN
4084                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4085 #else
4086                   DMAE_CMD_ENDIANITY_DW_SWAP |
4087 #endif
4088                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4089                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4090
4091         if (bp->port.port_stx) {
4092
4093                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4094                 if (bp->func_stx)
4095                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4096                 else
4097                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4098                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4099                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4100                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4101                 dmae->dst_addr_hi = 0;
4102                 dmae->len = sizeof(struct host_port_stats) >> 2;
4103                 if (bp->func_stx) {
4104                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4105                         dmae->comp_addr_hi = 0;
4106                         dmae->comp_val = 1;
4107                 } else {
4108                         dmae->comp_addr_lo =
4109                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4110                         dmae->comp_addr_hi =
4111                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4112                         dmae->comp_val = DMAE_COMP_VAL;
4113
4114                         *stats_comp = 0;
4115                 }
4116         }
4117
4118         if (bp->func_stx) {
4119
4120                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4121                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4122                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4123                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4124                 dmae->dst_addr_lo = bp->func_stx >> 2;
4125                 dmae->dst_addr_hi = 0;
4126                 dmae->len = sizeof(struct host_func_stats) >> 2;
4127                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4128                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4129                 dmae->comp_val = DMAE_COMP_VAL;
4130
4131                 *stats_comp = 0;
4132         }
4133 }
4134
4135 static void bnx2x_stats_stop(struct bnx2x *bp)
4136 {
4137         int update = 0;
4138
4139         bnx2x_stats_comp(bp);
4140
4141         if (bp->port.pmf)
4142                 update = (bnx2x_hw_stats_update(bp) == 0);
4143
4144         update |= (bnx2x_storm_stats_update(bp) == 0);
4145
4146         if (update) {
4147                 bnx2x_net_stats_update(bp);
4148
4149                 if (bp->port.pmf)
4150                         bnx2x_port_stats_stop(bp);
4151
4152                 bnx2x_hw_stats_post(bp);
4153                 bnx2x_stats_comp(bp);
4154         }
4155 }
4156
4157 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4158 {
4159 }
4160
4161 static const struct {
4162         void (*action)(struct bnx2x *bp);
4163         enum bnx2x_stats_state next_state;
4164 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4165 /* state        event   */
4166 {
4167 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4168 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
4169 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4170 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4171 },
4172 {
4173 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
4174 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
4175 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
4176 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
4177 }
4178 };
4179
4180 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4181 {
4182         enum bnx2x_stats_state state = bp->stats_state;
4183
4184         bnx2x_stats_stm[state][event].action(bp);
4185         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4186
4187         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4188                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4189                    state, event, bp->stats_state);
4190 }
4191
4192 static void bnx2x_timer(unsigned long data)
4193 {
4194         struct bnx2x *bp = (struct bnx2x *) data;
4195
4196         if (!netif_running(bp->dev))
4197                 return;
4198
4199         if (atomic_read(&bp->intr_sem) != 0)
4200                 goto timer_restart;
4201
4202         if (poll) {
4203                 struct bnx2x_fastpath *fp = &bp->fp[0];
4204                 int rc;
4205
4206                 bnx2x_tx_int(fp);
4207                 rc = bnx2x_rx_int(fp, 1000);
4208         }
4209
4210         if (!BP_NOMCP(bp)) {
4211                 int func = BP_FUNC(bp);
4212                 u32 drv_pulse;
4213                 u32 mcp_pulse;
4214
4215                 ++bp->fw_drv_pulse_wr_seq;
4216                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4217                 /* TBD - add SYSTEM_TIME */
4218                 drv_pulse = bp->fw_drv_pulse_wr_seq;
4219                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4220
4221                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4222                              MCP_PULSE_SEQ_MASK);
4223                 /* The delta between driver pulse and mcp response
4224                  * should be 1 (before mcp response) or 0 (after mcp response)
4225                  */
4226                 if ((drv_pulse != mcp_pulse) &&
4227                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4228                         /* someone lost a heartbeat... */
4229                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4230                                   drv_pulse, mcp_pulse);
4231                 }
4232         }
4233
4234         if ((bp->state == BNX2X_STATE_OPEN) ||
4235             (bp->state == BNX2X_STATE_DISABLED))
4236                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4237
4238 timer_restart:
4239         mod_timer(&bp->timer, jiffies + bp->current_interval);
4240 }
4241
4242 /* end of Statistics */
4243
4244 /* nic init */
4245
4246 /*
4247  * nic init service functions
4248  */
4249
4250 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4251 {
4252         int port = BP_PORT(bp);
4253
4254         bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
4255                         USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4256                         sizeof(struct ustorm_status_block)/4);
4257         bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
4258                         CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4259                         sizeof(struct cstorm_status_block)/4);
4260 }
4261
4262 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4263                           dma_addr_t mapping, int sb_id)
4264 {
4265         int port = BP_PORT(bp);
4266         int func = BP_FUNC(bp);
4267         int index;
4268         u64 section;
4269
4270         /* USTORM */
4271         section = ((u64)mapping) + offsetof(struct host_status_block,
4272                                             u_status_block);
4273         sb->u_status_block.status_block_id = sb_id;
4274
4275         REG_WR(bp, BAR_USTRORM_INTMEM +
4276                USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4277         REG_WR(bp, BAR_USTRORM_INTMEM +
4278                ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4279                U64_HI(section));
4280         REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4281                 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4282
4283         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4284                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4285                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4286
4287         /* CSTORM */
4288         section = ((u64)mapping) + offsetof(struct host_status_block,
4289                                             c_status_block);
4290         sb->c_status_block.status_block_id = sb_id;
4291
4292         REG_WR(bp, BAR_CSTRORM_INTMEM +
4293                CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4294         REG_WR(bp, BAR_CSTRORM_INTMEM +
4295                ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4296                U64_HI(section));
4297         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4298                 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4299
4300         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4301                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4302                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4303
4304         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4305 }
4306
4307 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4308 {
4309         int func = BP_FUNC(bp);
4310
4311         bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR +
4312                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4313                         sizeof(struct tstorm_def_status_block)/4);
4314         bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
4315                         USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4316                         sizeof(struct ustorm_def_status_block)/4);
4317         bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
4318                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4319                         sizeof(struct cstorm_def_status_block)/4);
4320         bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR +
4321                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4322                         sizeof(struct xstorm_def_status_block)/4);
4323 }
4324
4325 static void bnx2x_init_def_sb(struct bnx2x *bp,
4326                               struct host_def_status_block *def_sb,
4327                               dma_addr_t mapping, int sb_id)
4328 {
4329         int port = BP_PORT(bp);
4330         int func = BP_FUNC(bp);
4331         int index, val, reg_offset;
4332         u64 section;
4333
4334         /* ATTN */
4335         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4336                                             atten_status_block);
4337         def_sb->atten_status_block.status_block_id = sb_id;
4338
4339         bp->attn_state = 0;
4340
4341         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4342                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4343
4344         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4345                 bp->attn_group[index].sig[0] = REG_RD(bp,
4346                                                      reg_offset + 0x10*index);
4347                 bp->attn_group[index].sig[1] = REG_RD(bp,
4348                                                reg_offset + 0x4 + 0x10*index);
4349                 bp->attn_group[index].sig[2] = REG_RD(bp,
4350                                                reg_offset + 0x8 + 0x10*index);
4351                 bp->attn_group[index].sig[3] = REG_RD(bp,
4352                                                reg_offset + 0xc + 0x10*index);
4353         }
4354
4355         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4356                              HC_REG_ATTN_MSG0_ADDR_L);
4357
4358         REG_WR(bp, reg_offset, U64_LO(section));
4359         REG_WR(bp, reg_offset + 4, U64_HI(section));
4360
4361         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4362
4363         val = REG_RD(bp, reg_offset);
4364         val |= sb_id;
4365         REG_WR(bp, reg_offset, val);
4366
4367         /* USTORM */
4368         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4369                                             u_def_status_block);
4370         def_sb->u_def_status_block.status_block_id = sb_id;
4371
4372         REG_WR(bp, BAR_USTRORM_INTMEM +
4373                USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4374         REG_WR(bp, BAR_USTRORM_INTMEM +
4375                ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4376                U64_HI(section));
4377         REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4378                 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4379
4380         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4381                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4382                          USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4383
4384         /* CSTORM */
4385         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4386                                             c_def_status_block);
4387         def_sb->c_def_status_block.status_block_id = sb_id;
4388
4389         REG_WR(bp, BAR_CSTRORM_INTMEM +
4390                CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4391         REG_WR(bp, BAR_CSTRORM_INTMEM +
4392                ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4393                U64_HI(section));
4394         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4395                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4396
4397         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4398                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4399                          CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4400
4401         /* TSTORM */
4402         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4403                                             t_def_status_block);
4404         def_sb->t_def_status_block.status_block_id = sb_id;
4405
4406         REG_WR(bp, BAR_TSTRORM_INTMEM +
4407                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4408         REG_WR(bp, BAR_TSTRORM_INTMEM +
4409                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4410                U64_HI(section));
4411         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4412                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4413
4414         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4415                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4416                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4417
4418         /* XSTORM */
4419         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4420                                             x_def_status_block);
4421         def_sb->x_def_status_block.status_block_id = sb_id;
4422
4423         REG_WR(bp, BAR_XSTRORM_INTMEM +
4424                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4425         REG_WR(bp, BAR_XSTRORM_INTMEM +
4426                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4427                U64_HI(section));
4428         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4429                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4430
4431         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4432                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4433                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4434
4435         bp->stats_pending = 0;
4436         bp->set_mac_pending = 0;
4437
4438         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4439 }
4440
4441 static void bnx2x_update_coalesce(struct bnx2x *bp)
4442 {
4443         int port = BP_PORT(bp);
4444         int i;
4445
4446         for_each_queue(bp, i) {
4447                 int sb_id = bp->fp[i].sb_id;
4448
4449                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4450                 REG_WR8(bp, BAR_USTRORM_INTMEM +
4451                         USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4452                                                     U_SB_ETH_RX_CQ_INDEX),
4453                         bp->rx_ticks/12);
4454                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4455                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4456                                                      U_SB_ETH_RX_CQ_INDEX),
4457                          (bp->rx_ticks/12) ? 0 : 1);
4458
4459                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4460                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4461                         CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4462                                                     C_SB_ETH_TX_CQ_INDEX),
4463                         bp->tx_ticks/12);
4464                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4465                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4466                                                      C_SB_ETH_TX_CQ_INDEX),
4467                          (bp->tx_ticks/12) ? 0 : 1);
4468         }
4469 }
4470
4471 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4472                                        struct bnx2x_fastpath *fp, int last)
4473 {
4474         int i;
4475
4476         for (i = 0; i < last; i++) {
4477                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4478                 struct sk_buff *skb = rx_buf->skb;
4479
4480                 if (skb == NULL) {
4481                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4482                         continue;
4483                 }
4484
4485                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4486                         pci_unmap_single(bp->pdev,
4487                                          pci_unmap_addr(rx_buf, mapping),
4488                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4489
4490                 dev_kfree_skb(skb);
4491                 rx_buf->skb = NULL;
4492         }
4493 }
4494
4495 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4496 {
4497         int func = BP_FUNC(bp);
4498         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4499                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4500         u16 ring_prod, cqe_ring_prod;
4501         int i, j;
4502
4503         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4504         DP(NETIF_MSG_IFUP,
4505            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4506
4507         if (bp->flags & TPA_ENABLE_FLAG) {
4508
4509                 for_each_rx_queue(bp, j) {
4510                         struct bnx2x_fastpath *fp = &bp->fp[j];
4511
4512                         for (i = 0; i < max_agg_queues; i++) {
4513                                 fp->tpa_pool[i].skb =
4514                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4515                                 if (!fp->tpa_pool[i].skb) {
4516                                         BNX2X_ERR("Failed to allocate TPA "
4517                                                   "skb pool for queue[%d] - "
4518                                                   "disabling TPA on this "
4519                                                   "queue!\n", j);
4520                                         bnx2x_free_tpa_pool(bp, fp, i);
4521                                         fp->disable_tpa = 1;
4522                                         break;
4523                                 }
4524                                 pci_unmap_addr_set((struct sw_rx_bd *)
4525                                                         &bp->fp->tpa_pool[i],
4526                                                    mapping, 0);
4527                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4528                         }
4529                 }
4530         }
4531
4532         for_each_rx_queue(bp, j) {
4533                 struct bnx2x_fastpath *fp = &bp->fp[j];
4534
4535                 fp->rx_bd_cons = 0;
4536                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4537                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4538
4539                 /* "next page" elements initialization */
4540                 /* SGE ring */
4541                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4542                         struct eth_rx_sge *sge;
4543
4544                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4545                         sge->addr_hi =
4546                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4547                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4548                         sge->addr_lo =
4549                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4550                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4551                 }
4552
4553                 bnx2x_init_sge_ring_bit_mask(fp);
4554
4555                 /* RX BD ring */
4556                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4557                         struct eth_rx_bd *rx_bd;
4558
4559                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4560                         rx_bd->addr_hi =
4561                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4562                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4563                         rx_bd->addr_lo =
4564                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4565                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4566                 }
4567
4568                 /* CQ ring */
4569                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4570                         struct eth_rx_cqe_next_page *nextpg;
4571
4572                         nextpg = (struct eth_rx_cqe_next_page *)
4573                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4574                         nextpg->addr_hi =
4575                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4576                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4577                         nextpg->addr_lo =
4578                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4579                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4580                 }
4581
4582                 /* Allocate SGEs and initialize the ring elements */
4583                 for (i = 0, ring_prod = 0;
4584                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4585
4586                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4587                                 BNX2X_ERR("was only able to allocate "
4588                                           "%d rx sges\n", i);
4589                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4590                                 /* Cleanup already allocated elements */
4591                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4592                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4593                                 fp->disable_tpa = 1;
4594                                 ring_prod = 0;
4595                                 break;
4596                         }
4597                         ring_prod = NEXT_SGE_IDX(ring_prod);
4598                 }
4599                 fp->rx_sge_prod = ring_prod;
4600
4601                 /* Allocate BDs and initialize BD ring */
4602                 fp->rx_comp_cons = 0;
4603                 cqe_ring_prod = ring_prod = 0;
4604                 for (i = 0; i < bp->rx_ring_size; i++) {
4605                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4606                                 BNX2X_ERR("was only able to allocate "
4607                                           "%d rx skbs on queue[%d]\n", i, j);
4608                                 fp->eth_q_stats.rx_skb_alloc_failed++;
4609                                 break;
4610                         }
4611                         ring_prod = NEXT_RX_IDX(ring_prod);
4612                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4613                         WARN_ON(ring_prod <= i);
4614                 }
4615
4616                 fp->rx_bd_prod = ring_prod;
4617                 /* must not have more available CQEs than BDs */
4618                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4619                                        cqe_ring_prod);
4620                 fp->rx_pkt = fp->rx_calls = 0;
4621
4622                 /* Warning!
4623                  * this will generate an interrupt (to the TSTORM)
4624                  * must only be done after chip is initialized
4625                  */
4626                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4627                                      fp->rx_sge_prod);
4628                 if (j != 0)
4629                         continue;
4630
4631                 REG_WR(bp, BAR_USTRORM_INTMEM +
4632                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4633                        U64_LO(fp->rx_comp_mapping));
4634                 REG_WR(bp, BAR_USTRORM_INTMEM +
4635                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4636                        U64_HI(fp->rx_comp_mapping));
4637         }
4638 }
4639
4640 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4641 {
4642         int i, j;
4643
4644         for_each_tx_queue(bp, j) {
4645                 struct bnx2x_fastpath *fp = &bp->fp[j];
4646
4647                 for (i = 1; i <= NUM_TX_RINGS; i++) {
4648                         struct eth_tx_bd *tx_bd =
4649                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4650
4651                         tx_bd->addr_hi =
4652                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4653                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4654                         tx_bd->addr_lo =
4655                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4656                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4657                 }
4658
4659                 fp->tx_pkt_prod = 0;
4660                 fp->tx_pkt_cons = 0;
4661                 fp->tx_bd_prod = 0;
4662                 fp->tx_bd_cons = 0;
4663                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4664                 fp->tx_pkt = 0;
4665         }
4666 }
4667
4668 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4669 {
4670         int func = BP_FUNC(bp);
4671
4672         spin_lock_init(&bp->spq_lock);
4673
4674         bp->spq_left = MAX_SPQ_PENDING;
4675         bp->spq_prod_idx = 0;
4676         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4677         bp->spq_prod_bd = bp->spq;
4678         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4679
4680         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4681                U64_LO(bp->spq_mapping));
4682         REG_WR(bp,
4683                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4684                U64_HI(bp->spq_mapping));
4685
4686         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4687                bp->spq_prod_idx);
4688 }
4689
4690 static void bnx2x_init_context(struct bnx2x *bp)
4691 {
4692         int i;
4693
4694         for_each_queue(bp, i) {
4695                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4696                 struct bnx2x_fastpath *fp = &bp->fp[i];
4697                 u8 cl_id = fp->cl_id;
4698                 u8 sb_id = fp->sb_id;
4699
4700                 context->ustorm_st_context.common.sb_index_numbers =
4701                                                 BNX2X_RX_SB_INDEX_NUM;
4702                 context->ustorm_st_context.common.clientId = cl_id;
4703                 context->ustorm_st_context.common.status_block_id = sb_id;
4704                 context->ustorm_st_context.common.flags =
4705                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4706                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4707                 context->ustorm_st_context.common.statistics_counter_id =
4708                                                 cl_id;
4709                 context->ustorm_st_context.common.mc_alignment_log_size =
4710                                                 BNX2X_RX_ALIGN_SHIFT;
4711                 context->ustorm_st_context.common.bd_buff_size =
4712                                                 bp->rx_buf_size;
4713                 context->ustorm_st_context.common.bd_page_base_hi =
4714                                                 U64_HI(fp->rx_desc_mapping);
4715                 context->ustorm_st_context.common.bd_page_base_lo =
4716                                                 U64_LO(fp->rx_desc_mapping);
4717                 if (!fp->disable_tpa) {
4718                         context->ustorm_st_context.common.flags |=
4719                                 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4720                                  USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4721                         context->ustorm_st_context.common.sge_buff_size =
4722                                 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4723                                          (u32)0xffff);
4724                         context->ustorm_st_context.common.sge_page_base_hi =
4725                                                 U64_HI(fp->rx_sge_mapping);
4726                         context->ustorm_st_context.common.sge_page_base_lo =
4727                                                 U64_LO(fp->rx_sge_mapping);
4728                 }
4729
4730                 context->ustorm_ag_context.cdu_usage =
4731                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4732                                                CDU_REGION_NUMBER_UCM_AG,
4733                                                ETH_CONNECTION_TYPE);
4734
4735                 context->xstorm_st_context.tx_bd_page_base_hi =
4736                                                 U64_HI(fp->tx_desc_mapping);
4737                 context->xstorm_st_context.tx_bd_page_base_lo =
4738                                                 U64_LO(fp->tx_desc_mapping);
4739                 context->xstorm_st_context.db_data_addr_hi =
4740                                                 U64_HI(fp->tx_prods_mapping);
4741                 context->xstorm_st_context.db_data_addr_lo =
4742                                                 U64_LO(fp->tx_prods_mapping);
4743                 context->xstorm_st_context.statistics_data = (cl_id |
4744                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4745                 context->cstorm_st_context.sb_index_number =
4746                                                 C_SB_ETH_TX_CQ_INDEX;
4747                 context->cstorm_st_context.status_block_id = sb_id;
4748
4749                 context->xstorm_ag_context.cdu_reserved =
4750                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4751                                                CDU_REGION_NUMBER_XCM_AG,
4752                                                ETH_CONNECTION_TYPE);
4753         }
4754 }
4755
4756 static void bnx2x_init_ind_table(struct bnx2x *bp)
4757 {
4758         int func = BP_FUNC(bp);
4759         int i;
4760
4761         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4762                 return;
4763
4764         DP(NETIF_MSG_IFUP,
4765            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
4766         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4767                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4768                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4769                         bp->fp->cl_id + (i % bp->num_rx_queues));
4770 }
4771
4772 static void bnx2x_set_client_config(struct bnx2x *bp)
4773 {
4774         struct tstorm_eth_client_config tstorm_client = {0};
4775         int port = BP_PORT(bp);
4776         int i;
4777
4778         tstorm_client.mtu = bp->dev->mtu;
4779         tstorm_client.config_flags =
4780                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4781                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
4782 #ifdef BCM_VLAN
4783         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4784                 tstorm_client.config_flags |=
4785                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
4786                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4787         }
4788 #endif
4789
4790         if (bp->flags & TPA_ENABLE_FLAG) {
4791                 tstorm_client.max_sges_for_packet =
4792                         SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4793                 tstorm_client.max_sges_for_packet =
4794                         ((tstorm_client.max_sges_for_packet +
4795                           PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4796                         PAGES_PER_SGE_SHIFT;
4797
4798                 tstorm_client.config_flags |=
4799                                 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4800         }
4801
4802         for_each_queue(bp, i) {
4803                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4804
4805                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4806                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4807                        ((u32 *)&tstorm_client)[0]);
4808                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4809                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4810                        ((u32 *)&tstorm_client)[1]);
4811         }
4812
4813         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4814            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4815 }
4816
4817 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4818 {
4819         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4820         int mode = bp->rx_mode;
4821         int mask = (1 << BP_L_ID(bp));
4822         int func = BP_FUNC(bp);
4823         int i;
4824
4825         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
4826
4827         switch (mode) {
4828         case BNX2X_RX_MODE_NONE: /* no Rx */
4829                 tstorm_mac_filter.ucast_drop_all = mask;
4830                 tstorm_mac_filter.mcast_drop_all = mask;
4831                 tstorm_mac_filter.bcast_drop_all = mask;
4832                 break;
4833
4834         case BNX2X_RX_MODE_NORMAL:
4835                 tstorm_mac_filter.bcast_accept_all = mask;
4836                 break;
4837
4838         case BNX2X_RX_MODE_ALLMULTI:
4839                 tstorm_mac_filter.mcast_accept_all = mask;
4840                 tstorm_mac_filter.bcast_accept_all = mask;
4841                 break;
4842
4843         case BNX2X_RX_MODE_PROMISC:
4844                 tstorm_mac_filter.ucast_accept_all = mask;
4845                 tstorm_mac_filter.mcast_accept_all = mask;
4846                 tstorm_mac_filter.bcast_accept_all = mask;
4847                 break;
4848
4849         default:
4850                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4851                 break;
4852         }
4853
4854         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4855                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4856                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4857                        ((u32 *)&tstorm_mac_filter)[i]);
4858
4859 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4860                    ((u32 *)&tstorm_mac_filter)[i]); */
4861         }
4862
4863         if (mode != BNX2X_RX_MODE_NONE)
4864                 bnx2x_set_client_config(bp);
4865 }
4866
4867 static void bnx2x_init_internal_common(struct bnx2x *bp)
4868 {
4869         int i;
4870
4871         if (bp->flags & TPA_ENABLE_FLAG) {
4872                 struct tstorm_eth_tpa_exist tpa = {0};
4873
4874                 tpa.tpa_exist = 1;
4875
4876                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4877                        ((u32 *)&tpa)[0]);
4878                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4879                        ((u32 *)&tpa)[1]);
4880         }
4881
4882         /* Zero this manually as its initialization is
4883            currently missing in the initTool */
4884         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4885                 REG_WR(bp, BAR_USTRORM_INTMEM +
4886                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4887 }
4888
4889 static void bnx2x_init_internal_port(struct bnx2x *bp)
4890 {
4891         int port = BP_PORT(bp);
4892
4893         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4894         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4895         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4896         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4897 }
4898
4899 /* Calculates the sum of vn_min_rates.
4900    It's needed for further normalizing of the min_rates.
4901    Returns:
4902      sum of vn_min_rates.
4903        or
4904      0 - if all the min_rates are 0.
4905      In the later case fainess algorithm should be deactivated.
4906      If not all min_rates are zero then those that are zeroes will be set to 1.
4907  */
4908 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4909 {
4910         int all_zero = 1;
4911         int port = BP_PORT(bp);
4912         int vn;
4913
4914         bp->vn_weight_sum = 0;
4915         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4916                 int func = 2*vn + port;
4917                 u32 vn_cfg =
4918                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4919                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4920                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4921
4922                 /* Skip hidden vns */
4923                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4924                         continue;
4925
4926                 /* If min rate is zero - set it to 1 */
4927                 if (!vn_min_rate)
4928                         vn_min_rate = DEF_MIN_RATE;
4929                 else
4930                         all_zero = 0;
4931
4932                 bp->vn_weight_sum += vn_min_rate;
4933         }
4934
4935         /* ... only if all min rates are zeros - disable fairness */
4936         if (all_zero)
4937                 bp->vn_weight_sum = 0;
4938 }
4939
4940 static void bnx2x_init_internal_func(struct bnx2x *bp)
4941 {
4942         struct tstorm_eth_function_common_config tstorm_config = {0};
4943         struct stats_indication_flags stats_flags = {0};
4944         int port = BP_PORT(bp);
4945         int func = BP_FUNC(bp);
4946         int i, j;
4947         u32 offset;
4948         u16 max_agg_size;
4949
4950         if (is_multi(bp)) {
4951                 tstorm_config.config_flags = MULTI_FLAGS(bp);
4952                 tstorm_config.rss_result_mask = MULTI_MASK;
4953         }
4954         if (IS_E1HMF(bp))
4955                 tstorm_config.config_flags |=
4956                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
4957
4958         tstorm_config.leading_client_id = BP_L_ID(bp);
4959
4960         REG_WR(bp, BAR_TSTRORM_INTMEM +
4961                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4962                (*(u32 *)&tstorm_config));
4963
4964         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4965         bnx2x_set_storm_rx_mode(bp);
4966
4967         for_each_queue(bp, i) {
4968                 u8 cl_id = bp->fp[i].cl_id;
4969
4970                 /* reset xstorm per client statistics */
4971                 offset = BAR_XSTRORM_INTMEM +
4972                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4973                 for (j = 0;
4974                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4975                         REG_WR(bp, offset + j*4, 0);
4976
4977                 /* reset tstorm per client statistics */
4978                 offset = BAR_TSTRORM_INTMEM +
4979                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4980                 for (j = 0;
4981                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4982                         REG_WR(bp, offset + j*4, 0);
4983
4984                 /* reset ustorm per client statistics */
4985                 offset = BAR_USTRORM_INTMEM +
4986                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4987                 for (j = 0;
4988                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4989                         REG_WR(bp, offset + j*4, 0);
4990         }
4991
4992         /* Init statistics related context */
4993         stats_flags.collect_eth = 1;
4994
4995         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4996                ((u32 *)&stats_flags)[0]);
4997         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4998                ((u32 *)&stats_flags)[1]);
4999
5000         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5001                ((u32 *)&stats_flags)[0]);
5002         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5003                ((u32 *)&stats_flags)[1]);
5004
5005         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5006                ((u32 *)&stats_flags)[0]);
5007         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5008                ((u32 *)&stats_flags)[1]);
5009
5010         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5011                ((u32 *)&stats_flags)[0]);
5012         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5013                ((u32 *)&stats_flags)[1]);
5014
5015         REG_WR(bp, BAR_XSTRORM_INTMEM +
5016                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5017                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5018         REG_WR(bp, BAR_XSTRORM_INTMEM +
5019                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5020                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5021
5022         REG_WR(bp, BAR_TSTRORM_INTMEM +
5023                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5024                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5025         REG_WR(bp, BAR_TSTRORM_INTMEM +
5026                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5027                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5028
5029         REG_WR(bp, BAR_USTRORM_INTMEM +
5030                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5031                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5032         REG_WR(bp, BAR_USTRORM_INTMEM +
5033                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5034                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5035
5036         if (CHIP_IS_E1H(bp)) {
5037                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5038                         IS_E1HMF(bp));
5039                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5040                         IS_E1HMF(bp));
5041                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5042                         IS_E1HMF(bp));
5043                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5044                         IS_E1HMF(bp));
5045
5046                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5047                          bp->e1hov);
5048         }
5049
5050         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5051         max_agg_size =
5052                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5053                           SGE_PAGE_SIZE * PAGES_PER_SGE),
5054                     (u32)0xffff);
5055         for_each_rx_queue(bp, i) {
5056                 struct bnx2x_fastpath *fp = &bp->fp[i];
5057
5058                 REG_WR(bp, BAR_USTRORM_INTMEM +
5059                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5060                        U64_LO(fp->rx_comp_mapping));
5061                 REG_WR(bp, BAR_USTRORM_INTMEM +
5062                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5063                        U64_HI(fp->rx_comp_mapping));
5064
5065                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5066                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5067                          max_agg_size);
5068         }
5069
5070         /* dropless flow control */
5071         if (CHIP_IS_E1H(bp)) {
5072                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5073
5074                 rx_pause.bd_thr_low = 250;
5075                 rx_pause.cqe_thr_low = 250;
5076                 rx_pause.cos = 1;
5077                 rx_pause.sge_thr_low = 0;
5078                 rx_pause.bd_thr_high = 350;
5079                 rx_pause.cqe_thr_high = 350;
5080                 rx_pause.sge_thr_high = 0;
5081
5082                 for_each_rx_queue(bp, i) {
5083                         struct bnx2x_fastpath *fp = &bp->fp[i];
5084
5085                         if (!fp->disable_tpa) {
5086                                 rx_pause.sge_thr_low = 150;
5087                                 rx_pause.sge_thr_high = 250;
5088                         }
5089
5090
5091                         offset = BAR_USTRORM_INTMEM +
5092                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5093                                                                    fp->cl_id);
5094                         for (j = 0;
5095                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5096                              j++)
5097                                 REG_WR(bp, offset + j*4,
5098                                        ((u32 *)&rx_pause)[j]);
5099                 }
5100         }
5101
5102         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5103
5104         /* Init rate shaping and fairness contexts */
5105         if (IS_E1HMF(bp)) {
5106                 int vn;
5107
5108                 /* During init there is no active link
5109                    Until link is up, set link rate to 10Gbps */
5110                 bp->link_vars.line_speed = SPEED_10000;
5111                 bnx2x_init_port_minmax(bp);
5112
5113                 bnx2x_calc_vn_weight_sum(bp);
5114
5115                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5116                         bnx2x_init_vn_minmax(bp, 2*vn + port);
5117
5118                 /* Enable rate shaping and fairness */
5119                 bp->cmng.flags.cmng_enables =
5120                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5121                 if (bp->vn_weight_sum)
5122                         bp->cmng.flags.cmng_enables |=
5123                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5124                 else
5125                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5126                            "  fairness will be disabled\n");
5127         } else {
5128                 /* rate shaping and fairness are disabled */
5129                 DP(NETIF_MSG_IFUP,
5130                    "single function mode  minmax will be disabled\n");
5131         }
5132
5133
5134         /* Store it to internal memory */
5135         if (bp->port.pmf)
5136                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5137                         REG_WR(bp, BAR_XSTRORM_INTMEM +
5138                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5139                                ((u32 *)(&bp->cmng))[i]);
5140 }
5141
5142 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5143 {
5144         switch (load_code) {
5145         case FW_MSG_CODE_DRV_LOAD_COMMON:
5146                 bnx2x_init_internal_common(bp);
5147                 /* no break */
5148
5149         case FW_MSG_CODE_DRV_LOAD_PORT:
5150                 bnx2x_init_internal_port(bp);
5151                 /* no break */
5152
5153         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5154                 bnx2x_init_internal_func(bp);
5155                 break;
5156
5157         default:
5158                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5159                 break;
5160         }
5161 }
5162
5163 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5164 {
5165         int i;
5166
5167         for_each_queue(bp, i) {
5168                 struct bnx2x_fastpath *fp = &bp->fp[i];
5169
5170                 fp->bp = bp;
5171                 fp->state = BNX2X_FP_STATE_CLOSED;
5172                 fp->index = i;
5173                 fp->cl_id = BP_L_ID(bp) + i;
5174                 fp->sb_id = fp->cl_id;
5175                 DP(NETIF_MSG_IFUP,
5176                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
5177                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5178                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5179                               fp->sb_id);
5180                 bnx2x_update_fpsb_idx(fp);
5181         }
5182
5183         /* ensure status block indices were read */
5184         rmb();
5185
5186
5187         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5188                           DEF_SB_ID);
5189         bnx2x_update_dsb_idx(bp);
5190         bnx2x_update_coalesce(bp);
5191         bnx2x_init_rx_rings(bp);
5192         bnx2x_init_tx_ring(bp);
5193         bnx2x_init_sp_ring(bp);
5194         bnx2x_init_context(bp);
5195         bnx2x_init_internal(bp, load_code);
5196         bnx2x_init_ind_table(bp);
5197         bnx2x_stats_init(bp);
5198
5199         /* At this point, we are ready for interrupts */
5200         atomic_set(&bp->intr_sem, 0);
5201
5202         /* flush all before enabling interrupts */
5203         mb();
5204         mmiowb();
5205
5206         bnx2x_int_enable(bp);
5207
5208         /* Check for SPIO5 */
5209         bnx2x_attn_int_deasserted0(bp,
5210                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5211                                    AEU_INPUTS_ATTN_BITS_SPIO5);
5212 }
5213
5214 /* end of nic init */
5215
5216 /*
5217  * gzip service functions
5218  */
5219
5220 static int bnx2x_gunzip_init(struct bnx2x *bp)
5221 {
5222         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5223                                               &bp->gunzip_mapping);
5224         if (bp->gunzip_buf  == NULL)
5225                 goto gunzip_nomem1;
5226
5227         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5228         if (bp->strm  == NULL)
5229                 goto gunzip_nomem2;
5230
5231         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5232                                       GFP_KERNEL);
5233         if (bp->strm->workspace == NULL)
5234                 goto gunzip_nomem3;
5235
5236         return 0;
5237
5238 gunzip_nomem3:
5239         kfree(bp->strm);
5240         bp->strm = NULL;
5241
5242 gunzip_nomem2:
5243         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5244                             bp->gunzip_mapping);
5245         bp->gunzip_buf = NULL;
5246
5247 gunzip_nomem1:
5248         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5249                " un-compression\n", bp->dev->name);
5250         return -ENOMEM;
5251 }
5252
5253 static void bnx2x_gunzip_end(struct bnx2x *bp)
5254 {
5255         kfree(bp->strm->workspace);
5256
5257         kfree(bp->strm);
5258         bp->strm = NULL;
5259
5260         if (bp->gunzip_buf) {
5261                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5262                                     bp->gunzip_mapping);
5263                 bp->gunzip_buf = NULL;
5264         }
5265 }
5266
5267 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5268 {
5269         int n, rc;
5270
5271         /* check gzip header */
5272         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5273                 BNX2X_ERR("Bad gzip header\n");
5274                 return -EINVAL;
5275         }
5276
5277         n = 10;
5278
5279 #define FNAME                           0x8
5280
5281         if (zbuf[3] & FNAME)
5282                 while ((zbuf[n++] != 0) && (n < len));
5283
5284         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5285         bp->strm->avail_in = len - n;
5286         bp->strm->next_out = bp->gunzip_buf;
5287         bp->strm->avail_out = FW_BUF_SIZE;
5288
5289         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5290         if (rc != Z_OK)
5291                 return rc;
5292
5293         rc = zlib_inflate(bp->strm, Z_FINISH);
5294         if ((rc != Z_OK) && (rc != Z_STREAM_END))
5295                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5296                        bp->dev->name, bp->strm->msg);
5297
5298         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5299         if (bp->gunzip_outlen & 0x3)
5300                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5301                                     " gunzip_outlen (%d) not aligned\n",
5302                        bp->dev->name, bp->gunzip_outlen);
5303         bp->gunzip_outlen >>= 2;
5304
5305         zlib_inflateEnd(bp->strm);
5306
5307         if (rc == Z_STREAM_END)
5308                 return 0;
5309
5310         return rc;
5311 }
5312
5313 /* nic load/unload */
5314
5315 /*
5316  * General service functions
5317  */
5318
5319 /* send a NIG loopback debug packet */
5320 static void bnx2x_lb_pckt(struct bnx2x *bp)
5321 {
5322         u32 wb_write[3];
5323
5324         /* Ethernet source and destination addresses */
5325         wb_write[0] = 0x55555555;
5326         wb_write[1] = 0x55555555;
5327         wb_write[2] = 0x20;             /* SOP */
5328         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5329
5330         /* NON-IP protocol */
5331         wb_write[0] = 0x09000000;
5332         wb_write[1] = 0x55555555;
5333         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
5334         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5335 }
5336
5337 /* some of the internal memories
5338  * are not directly readable from the driver
5339  * to test them we send debug packets
5340  */
5341 static int bnx2x_int_mem_test(struct bnx2x *bp)
5342 {
5343         int factor;
5344         int count, i;
5345         u32 val = 0;
5346
5347         if (CHIP_REV_IS_FPGA(bp))
5348                 factor = 120;
5349         else if (CHIP_REV_IS_EMUL(bp))
5350                 factor = 200;
5351         else
5352                 factor = 1;
5353
5354         DP(NETIF_MSG_HW, "start part1\n");
5355
5356         /* Disable inputs of parser neighbor blocks */
5357         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5358         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5359         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5360         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5361
5362         /*  Write 0 to parser credits for CFC search request */
5363         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5364
5365         /* send Ethernet packet */
5366         bnx2x_lb_pckt(bp);
5367
5368         /* TODO do i reset NIG statistic? */
5369         /* Wait until NIG register shows 1 packet of size 0x10 */
5370         count = 1000 * factor;
5371         while (count) {
5372
5373                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5374                 val = *bnx2x_sp(bp, wb_data[0]);
5375                 if (val == 0x10)
5376                         break;
5377
5378                 msleep(10);
5379                 count--;
5380         }
5381         if (val != 0x10) {
5382                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5383                 return -1;
5384         }
5385
5386         /* Wait until PRS register shows 1 packet */
5387         count = 1000 * factor;
5388         while (count) {
5389                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5390                 if (val == 1)
5391                         break;
5392
5393                 msleep(10);
5394                 count--;
5395         }
5396         if (val != 0x1) {
5397                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5398                 return -2;
5399         }
5400
5401         /* Reset and init BRB, PRS */
5402         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5403         msleep(50);
5404         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5405         msleep(50);
5406         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5407         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5408
5409         DP(NETIF_MSG_HW, "part2\n");
5410
5411         /* Disable inputs of parser neighbor blocks */
5412         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5413         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5414         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5415         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5416
5417         /* Write 0 to parser credits for CFC search request */
5418         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5419
5420         /* send 10 Ethernet packets */
5421         for (i = 0; i < 10; i++)
5422                 bnx2x_lb_pckt(bp);
5423
5424         /* Wait until NIG register shows 10 + 1
5425            packets of size 11*0x10 = 0xb0 */
5426         count = 1000 * factor;
5427         while (count) {
5428
5429                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5430                 val = *bnx2x_sp(bp, wb_data[0]);
5431                 if (val == 0xb0)
5432                         break;
5433
5434                 msleep(10);
5435                 count--;
5436         }
5437         if (val != 0xb0) {
5438                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5439                 return -3;
5440         }
5441
5442         /* Wait until PRS register shows 2 packets */
5443         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5444         if (val != 2)
5445                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5446
5447         /* Write 1 to parser credits for CFC search request */
5448         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5449
5450         /* Wait until PRS register shows 3 packets */
5451         msleep(10 * factor);
5452         /* Wait until NIG register shows 1 packet of size 0x10 */
5453         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5454         if (val != 3)
5455                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5456
5457         /* clear NIG EOP FIFO */
5458         for (i = 0; i < 11; i++)
5459                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5460         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5461         if (val != 1) {
5462                 BNX2X_ERR("clear of NIG failed\n");
5463                 return -4;
5464         }
5465
5466         /* Reset and init BRB, PRS, NIG */
5467         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5468         msleep(50);
5469         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5470         msleep(50);
5471         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5472         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5473 #ifndef BCM_ISCSI
5474         /* set NIC mode */
5475         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5476 #endif
5477
5478         /* Enable inputs of parser neighbor blocks */
5479         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5480         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5481         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5482         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5483
5484         DP(NETIF_MSG_HW, "done\n");
5485
5486         return 0; /* OK */
5487 }
5488
5489 static void enable_blocks_attention(struct bnx2x *bp)
5490 {
5491         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5492         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5493         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5494         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5495         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5496         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5497         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5498         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5499         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5500 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5501 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5502         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5503         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5504         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5505 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5506 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5507         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5508         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5509         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5510         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5511 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5512 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5513         if (CHIP_REV_IS_FPGA(bp))
5514                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5515         else
5516                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5517         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5518         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5519         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5520 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5521 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5522         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5523         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5524 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5525         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5526 }
5527
5528
5529 static void bnx2x_reset_common(struct bnx2x *bp)
5530 {
5531         /* reset_common */
5532         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5533                0xd3ffff7f);
5534         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5535 }
5536
5537
5538 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5539 {
5540         u32 val;
5541         u8 port;
5542         u8 is_required = 0;
5543
5544         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5545               SHARED_HW_CFG_FAN_FAILURE_MASK;
5546
5547         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5548                 is_required = 1;
5549
5550         /*
5551          * The fan failure mechanism is usually related to the PHY type since
5552          * the power consumption of the board is affected by the PHY. Currently,
5553          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5554          */
5555         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5556                 for (port = PORT_0; port < PORT_MAX; port++) {
5557                         u32 phy_type =
5558                                 SHMEM_RD(bp, dev_info.port_hw_config[port].
5559                                          external_phy_config) &
5560                                 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
5561                         is_required |=
5562                                 ((phy_type ==
5563                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
5564                                  (phy_type ==
5565                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
5566                 }
5567
5568         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
5569
5570         if (is_required == 0)
5571                 return;
5572
5573         /* Fan failure is indicated by SPIO 5 */
5574         bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5575                        MISC_REGISTERS_SPIO_INPUT_HI_Z);
5576
5577         /* set to active low mode */
5578         val = REG_RD(bp, MISC_REG_SPIO_INT);
5579         val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5580                                 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5581         REG_WR(bp, MISC_REG_SPIO_INT, val);
5582
5583         /* enable interrupt to signal the IGU */
5584         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5585         val |= (1 << MISC_REGISTERS_SPIO_5);
5586         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5587 }
5588
5589 static int bnx2x_init_common(struct bnx2x *bp)
5590 {
5591         u32 val, i;
5592
5593         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5594
5595         bnx2x_reset_common(bp);
5596         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5597         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5598
5599         bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
5600         if (CHIP_IS_E1H(bp))
5601                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5602
5603         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5604         msleep(30);
5605         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5606
5607         bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
5608         if (CHIP_IS_E1(bp)) {
5609                 /* enable HW interrupt from PXP on USDM overflow
5610                    bit 16 on INT_MASK_0 */
5611                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5612         }
5613
5614         bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
5615         bnx2x_init_pxp(bp);
5616
5617 #ifdef __BIG_ENDIAN
5618         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5619         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5620         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5621         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5622         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5623         /* make sure this value is 0 */
5624         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5625
5626 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5627         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5628         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5629         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5630         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5631 #endif
5632
5633         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5634 #ifdef BCM_ISCSI
5635         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5636         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5637         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5638 #endif
5639
5640         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5641                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5642
5643         /* let the HW do it's magic ... */
5644         msleep(100);
5645         /* finish PXP init */
5646         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5647         if (val != 1) {
5648                 BNX2X_ERR("PXP2 CFG failed\n");
5649                 return -EBUSY;
5650         }
5651         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5652         if (val != 1) {
5653                 BNX2X_ERR("PXP2 RD_INIT failed\n");
5654                 return -EBUSY;
5655         }
5656
5657         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5658         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5659
5660         bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
5661
5662         /* clean the DMAE memory */
5663         bp->dmae_ready = 1;
5664         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5665
5666         bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5667         bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5668         bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5669         bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
5670
5671         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5672         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5673         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5674         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5675
5676         bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
5677         /* soft reset pulse */
5678         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5679         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5680
5681 #ifdef BCM_ISCSI
5682         bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
5683 #endif
5684
5685         bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
5686         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5687         if (!CHIP_REV_IS_SLOW(bp)) {
5688                 /* enable hw interrupt from doorbell Q */
5689                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5690         }
5691
5692         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5693         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5694         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5695         /* set NIC mode */
5696         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5697         if (CHIP_IS_E1H(bp))
5698                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5699
5700         bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5701         bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5702         bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5703         bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
5704
5705         bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5706         bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5707         bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5708         bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5709
5710         bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5711         bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5712         bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5713         bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
5714
5715         /* sync semi rtc */
5716         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5717                0x80000000);
5718         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5719                0x80000000);
5720
5721         bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5722         bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5723         bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
5724
5725         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5726         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5727                 REG_WR(bp, i, 0xc0cac01a);
5728                 /* TODO: replace with something meaningful */
5729         }
5730         bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
5731         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5732
5733         if (sizeof(union cdu_context) != 1024)
5734                 /* we currently assume that a context is 1024 bytes */
5735                 printk(KERN_ALERT PFX "please adjust the size of"
5736                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5737
5738         bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
5739         val = (4 << 24) + (0 << 12) + 1024;
5740         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5741         if (CHIP_IS_E1(bp)) {
5742                 /* !!! fix pxp client crdit until excel update */
5743                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5744                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5745         }
5746
5747         bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
5748         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5749         /* enable context validation interrupt from CFC */
5750         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5751
5752         /* set the thresholds to prevent CFC/CDU race */
5753         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5754
5755         bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
5756         bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
5757
5758         /* PXPCS COMMON comes here */
5759         bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
5760         /* Reset PCIE errors for debug */
5761         REG_WR(bp, 0x2814, 0xffffffff);
5762         REG_WR(bp, 0x3820, 0xffffffff);
5763
5764         /* EMAC0 COMMON comes here */
5765         bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
5766         /* EMAC1 COMMON comes here */
5767         bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
5768         /* DBU COMMON comes here */
5769         bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
5770         /* DBG COMMON comes here */
5771         bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
5772
5773         bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
5774         if (CHIP_IS_E1H(bp)) {
5775                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5776                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5777         }
5778
5779         if (CHIP_REV_IS_SLOW(bp))
5780                 msleep(200);
5781
5782         /* finish CFC init */
5783         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5784         if (val != 1) {
5785                 BNX2X_ERR("CFC LL_INIT failed\n");
5786                 return -EBUSY;
5787         }
5788         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5789         if (val != 1) {
5790                 BNX2X_ERR("CFC AC_INIT failed\n");
5791                 return -EBUSY;
5792         }
5793         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5794         if (val != 1) {
5795                 BNX2X_ERR("CFC CAM_INIT failed\n");
5796                 return -EBUSY;
5797         }
5798         REG_WR(bp, CFC_REG_DEBUG0, 0);
5799
5800         /* read NIG statistic
5801            to see if this is our first up since powerup */
5802         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5803         val = *bnx2x_sp(bp, wb_data[0]);
5804
5805         /* do internal memory self test */
5806         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5807                 BNX2X_ERR("internal mem self test failed\n");
5808                 return -EBUSY;
5809         }
5810
5811         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5812         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5813         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5814         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5815                 bp->port.need_hw_lock = 1;
5816                 break;
5817
5818         default:
5819                 break;
5820         }
5821
5822         bnx2x_setup_fan_failure_detection(bp);
5823
5824         /* clear PXP2 attentions */
5825         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5826
5827         enable_blocks_attention(bp);
5828
5829         if (!BP_NOMCP(bp)) {
5830                 bnx2x_acquire_phy_lock(bp);
5831                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5832                 bnx2x_release_phy_lock(bp);
5833         } else
5834                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5835
5836         return 0;
5837 }
5838
5839 static int bnx2x_init_port(struct bnx2x *bp)
5840 {
5841         int port = BP_PORT(bp);
5842         int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
5843         u32 low, high;
5844         u32 val;
5845
5846         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
5847
5848         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5849
5850         /* Port PXP comes here */
5851         bnx2x_init_block(bp, PXP_BLOCK, init_stage);
5852         /* Port PXP2 comes here */
5853         bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
5854 #ifdef BCM_ISCSI
5855         /* Port0  1
5856          * Port1  385 */
5857         i++;
5858         wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5859         wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5860         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5861         REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5862
5863         /* Port0  2
5864          * Port1  386 */
5865         i++;
5866         wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5867         wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5868         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5869         REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5870
5871         /* Port0  3
5872          * Port1  387 */
5873         i++;
5874         wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5875         wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5876         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5877         REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5878 #endif
5879         /* Port CMs come here */
5880         bnx2x_init_block(bp, XCM_BLOCK, init_stage);
5881
5882         /* Port QM comes here */
5883 #ifdef BCM_ISCSI
5884         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5885         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5886
5887         bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
5888 #endif
5889         /* Port DQ comes here */
5890         bnx2x_init_block(bp, DQ_BLOCK, init_stage);
5891
5892         bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5893         if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5894                 /* no pause for emulation and FPGA */
5895                 low = 0;
5896                 high = 513;
5897         } else {
5898                 if (IS_E1HMF(bp))
5899                         low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5900                 else if (bp->dev->mtu > 4096) {
5901                         if (bp->flags & ONE_PORT_FLAG)
5902                                 low = 160;
5903                         else {
5904                                 val = bp->dev->mtu;
5905                                 /* (24*1024 + val*4)/256 */
5906                                 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5907                         }
5908                 } else
5909                         low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5910                 high = low + 56;        /* 14*1024/256 */
5911         }
5912         REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5913         REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5914
5915
5916         /* Port PRS comes here */
5917         bnx2x_init_block(bp, PRS_BLOCK, init_stage);
5918         /* Port TSDM comes here */
5919         bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
5920         /* Port CSDM comes here */
5921         bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
5922         /* Port USDM comes here */
5923         bnx2x_init_block(bp, USDM_BLOCK, init_stage);
5924         /* Port XSDM comes here */
5925         bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
5926
5927         bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5928         bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5929         bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5930         bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
5931
5932         /* Port UPB comes here */
5933         bnx2x_init_block(bp, UPB_BLOCK, init_stage);
5934         /* Port XPB comes here */
5935         bnx2x_init_block(bp, XPB_BLOCK, init_stage);
5936
5937         bnx2x_init_block(bp, PBF_BLOCK, init_stage);
5938
5939         /* configure PBF to work without PAUSE mtu 9000 */
5940         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5941
5942         /* update threshold */
5943         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5944         /* update init credit */
5945         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5946
5947         /* probe changes */
5948         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5949         msleep(5);
5950         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5951
5952 #ifdef BCM_ISCSI
5953         /* tell the searcher where the T2 table is */
5954         REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5955
5956         wb_write[0] = U64_LO(bp->t2_mapping);
5957         wb_write[1] = U64_HI(bp->t2_mapping);
5958         REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5959         wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5960         wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5961         REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5962
5963         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5964         /* Port SRCH comes here */
5965 #endif
5966         /* Port CDU comes here */
5967         bnx2x_init_block(bp, CDU_BLOCK, init_stage);
5968         /* Port CFC comes here */
5969         bnx2x_init_block(bp, CFC_BLOCK, init_stage);
5970
5971         if (CHIP_IS_E1(bp)) {
5972                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5973                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5974         }
5975         bnx2x_init_block(bp, HC_BLOCK, init_stage);
5976
5977         bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
5978         /* init aeu_mask_attn_func_0/1:
5979          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5980          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5981          *             bits 4-7 are used for "per vn group attention" */
5982         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5983                (IS_E1HMF(bp) ? 0xF7 : 0x7));
5984
5985         /* Port PXPCS comes here */
5986         bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
5987         /* Port EMAC0 comes here */
5988         bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
5989         /* Port EMAC1 comes here */
5990         bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
5991         /* Port DBU comes here */
5992         bnx2x_init_block(bp, DBU_BLOCK, init_stage);
5993         /* Port DBG comes here */
5994         bnx2x_init_block(bp, DBG_BLOCK, init_stage);
5995
5996         bnx2x_init_block(bp, NIG_BLOCK, init_stage);
5997
5998         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5999
6000         if (CHIP_IS_E1H(bp)) {
6001                 /* 0x2 disable e1hov, 0x1 enable */
6002                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6003                        (IS_E1HMF(bp) ? 0x1 : 0x2));
6004
6005                 /* support pause requests from USDM, TSDM and BRB */
6006                 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
6007
6008                 {
6009                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6010                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6011                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6012                 }
6013         }
6014
6015         /* Port MCP comes here */
6016         bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6017         /* Port DMAE comes here */
6018         bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6019
6020         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6021         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6022                 {
6023                 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6024
6025                 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6026                                MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6027
6028                 /* The GPIO should be swapped if the swap register is
6029                    set and active */
6030                 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6031                 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6032
6033                 /* Select function upon port-swap configuration */
6034                 if (port == 0) {
6035                         offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6036                         aeu_gpio_mask = (swap_val && swap_override) ?
6037                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6038                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6039                 } else {
6040                         offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6041                         aeu_gpio_mask = (swap_val && swap_override) ?
6042                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6043                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6044                 }
6045                 val = REG_RD(bp, offset);
6046                 /* add GPIO3 to group */
6047                 val |= aeu_gpio_mask;
6048                 REG_WR(bp, offset, val);
6049                 }
6050                 break;
6051
6052         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6053                 /* add SPIO 5 to group 0 */
6054                 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6055                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6056                 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
6057                 break;
6058
6059         default:
6060                 break;
6061         }
6062
6063         bnx2x__link_reset(bp);
6064
6065         return 0;
6066 }
6067
6068 #define ILT_PER_FUNC            (768/2)
6069 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
6070 /* the phys address is shifted right 12 bits and has an added
6071    1=valid bit added to the 53rd bit
6072    then since this is a wide register(TM)
6073    we split it into two 32 bit writes
6074  */
6075 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6076 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
6077 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
6078 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
6079
6080 #define CNIC_ILT_LINES          0
6081
6082 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6083 {
6084         int reg;
6085
6086         if (CHIP_IS_E1H(bp))
6087                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6088         else /* E1 */
6089                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6090
6091         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6092 }
6093
6094 static int bnx2x_init_func(struct bnx2x *bp)
6095 {
6096         int port = BP_PORT(bp);
6097         int func = BP_FUNC(bp);
6098         u32 addr, val;
6099         int i;
6100
6101         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
6102
6103         /* set MSI reconfigure capability */
6104         addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6105         val = REG_RD(bp, addr);
6106         val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6107         REG_WR(bp, addr, val);
6108
6109         i = FUNC_ILT_BASE(func);
6110
6111         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6112         if (CHIP_IS_E1H(bp)) {
6113                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6114                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6115         } else /* E1 */
6116                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6117                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6118
6119
6120         if (CHIP_IS_E1H(bp)) {
6121                 for (i = 0; i < 9; i++)
6122                         bnx2x_init_block(bp,
6123                                          cm_blocks[i], FUNC0_STAGE + func);
6124
6125                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6126                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6127         }
6128
6129         /* HC init per function */
6130         if (CHIP_IS_E1H(bp)) {
6131                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6132
6133                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6134                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6135         }
6136         bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
6137
6138         /* Reset PCIE errors for debug */
6139         REG_WR(bp, 0x2114, 0xffffffff);
6140         REG_WR(bp, 0x2120, 0xffffffff);
6141
6142         return 0;
6143 }
6144
6145 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6146 {
6147         int i, rc = 0;
6148
6149         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
6150            BP_FUNC(bp), load_code);
6151
6152         bp->dmae_ready = 0;
6153         mutex_init(&bp->dmae_mutex);
6154         bnx2x_gunzip_init(bp);
6155
6156         switch (load_code) {
6157         case FW_MSG_CODE_DRV_LOAD_COMMON:
6158                 rc = bnx2x_init_common(bp);
6159                 if (rc)
6160                         goto init_hw_err;
6161                 /* no break */
6162
6163         case FW_MSG_CODE_DRV_LOAD_PORT:
6164                 bp->dmae_ready = 1;
6165                 rc = bnx2x_init_port(bp);
6166                 if (rc)
6167                         goto init_hw_err;
6168                 /* no break */
6169
6170         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6171                 bp->dmae_ready = 1;
6172                 rc = bnx2x_init_func(bp);
6173                 if (rc)
6174                         goto init_hw_err;
6175                 break;
6176
6177         default:
6178                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6179                 break;
6180         }
6181
6182         if (!BP_NOMCP(bp)) {
6183                 int func = BP_FUNC(bp);
6184
6185                 bp->fw_drv_pulse_wr_seq =
6186                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6187                                  DRV_PULSE_SEQ_MASK);
6188                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6189                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x  func_stx 0x%x\n",
6190                    bp->fw_drv_pulse_wr_seq, bp->func_stx);
6191         } else
6192                 bp->func_stx = 0;
6193
6194         /* this needs to be done before gunzip end */
6195         bnx2x_zero_def_sb(bp);
6196         for_each_queue(bp, i)
6197                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6198
6199 init_hw_err:
6200         bnx2x_gunzip_end(bp);
6201
6202         return rc;
6203 }
6204
6205 /* send the MCP a request, block until there is a reply */
6206 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6207 {
6208         int func = BP_FUNC(bp);
6209         u32 seq = ++bp->fw_seq;
6210         u32 rc = 0;
6211         u32 cnt = 1;
6212         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
6213
6214         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
6215         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
6216
6217         do {
6218                 /* let the FW do it's magic ... */
6219                 msleep(delay);
6220
6221                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
6222
6223                 /* Give the FW up to 2 second (200*10ms) */
6224         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6225
6226         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6227            cnt*delay, rc, seq);
6228
6229         /* is this a reply to our command? */
6230         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6231                 rc &= FW_MSG_CODE_MASK;
6232
6233         } else {
6234                 /* FW BUG! */
6235                 BNX2X_ERR("FW failed to respond!\n");
6236                 bnx2x_fw_dump(bp);
6237                 rc = 0;
6238         }
6239
6240         return rc;
6241 }
6242
6243 static void bnx2x_free_mem(struct bnx2x *bp)
6244 {
6245
6246 #define BNX2X_PCI_FREE(x, y, size) \
6247         do { \
6248                 if (x) { \
6249                         pci_free_consistent(bp->pdev, size, x, y); \
6250                         x = NULL; \
6251                         y = 0; \
6252                 } \
6253         } while (0)
6254
6255 #define BNX2X_FREE(x) \
6256         do { \
6257                 if (x) { \
6258                         vfree(x); \
6259                         x = NULL; \
6260                 } \
6261         } while (0)
6262
6263         int i;
6264
6265         /* fastpath */
6266         /* Common */
6267         for_each_queue(bp, i) {
6268
6269                 /* status blocks */
6270                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6271                                bnx2x_fp(bp, i, status_blk_mapping),
6272                                sizeof(struct host_status_block) +
6273                                sizeof(struct eth_tx_db_data));
6274         }
6275         /* Rx */
6276         for_each_rx_queue(bp, i) {
6277
6278                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6279                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6280                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6281                                bnx2x_fp(bp, i, rx_desc_mapping),
6282                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
6283
6284                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6285                                bnx2x_fp(bp, i, rx_comp_mapping),
6286                                sizeof(struct eth_fast_path_rx_cqe) *
6287                                NUM_RCQ_BD);
6288
6289                 /* SGE ring */
6290                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6291                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6292                                bnx2x_fp(bp, i, rx_sge_mapping),
6293                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6294         }
6295         /* Tx */
6296         for_each_tx_queue(bp, i) {
6297
6298                 /* fastpath tx rings: tx_buf tx_desc */
6299                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6300                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6301                                bnx2x_fp(bp, i, tx_desc_mapping),
6302                                sizeof(struct eth_tx_bd) * NUM_TX_BD);
6303         }
6304         /* end of fastpath */
6305
6306         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6307                        sizeof(struct host_def_status_block));
6308
6309         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6310                        sizeof(struct bnx2x_slowpath));
6311
6312 #ifdef BCM_ISCSI
6313         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6314         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6315         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6316         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6317 #endif
6318         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6319
6320 #undef BNX2X_PCI_FREE
6321 #undef BNX2X_KFREE
6322 }
6323
6324 static int bnx2x_alloc_mem(struct bnx2x *bp)
6325 {
6326
6327 #define BNX2X_PCI_ALLOC(x, y, size) \
6328         do { \
6329                 x = pci_alloc_consistent(bp->pdev, size, y); \
6330                 if (x == NULL) \
6331                         goto alloc_mem_err; \
6332                 memset(x, 0, size); \
6333         } while (0)
6334
6335 #define BNX2X_ALLOC(x, size) \
6336         do { \
6337                 x = vmalloc(size); \
6338                 if (x == NULL) \
6339                         goto alloc_mem_err; \
6340                 memset(x, 0, size); \
6341         } while (0)
6342
6343         int i;
6344
6345         /* fastpath */
6346         /* Common */
6347         for_each_queue(bp, i) {
6348                 bnx2x_fp(bp, i, bp) = bp;
6349
6350                 /* status blocks */
6351                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6352                                 &bnx2x_fp(bp, i, status_blk_mapping),
6353                                 sizeof(struct host_status_block) +
6354                                 sizeof(struct eth_tx_db_data));
6355         }
6356         /* Rx */
6357         for_each_rx_queue(bp, i) {
6358
6359                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6360                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6361                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6362                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6363                                 &bnx2x_fp(bp, i, rx_desc_mapping),
6364                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6365
6366                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6367                                 &bnx2x_fp(bp, i, rx_comp_mapping),
6368                                 sizeof(struct eth_fast_path_rx_cqe) *
6369                                 NUM_RCQ_BD);
6370
6371                 /* SGE ring */
6372                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6373                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6374                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6375                                 &bnx2x_fp(bp, i, rx_sge_mapping),
6376                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6377         }
6378         /* Tx */
6379         for_each_tx_queue(bp, i) {
6380
6381                 bnx2x_fp(bp, i, hw_tx_prods) =
6382                                 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6383
6384                 bnx2x_fp(bp, i, tx_prods_mapping) =
6385                                 bnx2x_fp(bp, i, status_blk_mapping) +
6386                                 sizeof(struct host_status_block);
6387
6388                 /* fastpath tx rings: tx_buf tx_desc */
6389                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6390                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6391                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6392                                 &bnx2x_fp(bp, i, tx_desc_mapping),
6393                                 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6394         }
6395         /* end of fastpath */
6396
6397         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6398                         sizeof(struct host_def_status_block));
6399
6400         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6401                         sizeof(struct bnx2x_slowpath));
6402
6403 #ifdef BCM_ISCSI
6404         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6405
6406         /* Initialize T1 */
6407         for (i = 0; i < 64*1024; i += 64) {
6408                 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6409                 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6410         }
6411
6412         /* allocate searcher T2 table
6413            we allocate 1/4 of alloc num for T2
6414           (which is not entered into the ILT) */
6415         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6416
6417         /* Initialize T2 */
6418         for (i = 0; i < 16*1024; i += 64)
6419                 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6420
6421         /* now fixup the last line in the block to point to the next block */
6422         *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6423
6424         /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6425         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6426
6427         /* QM queues (128*MAX_CONN) */
6428         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6429 #endif
6430
6431         /* Slow path ring */
6432         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6433
6434         return 0;
6435
6436 alloc_mem_err:
6437         bnx2x_free_mem(bp);
6438         return -ENOMEM;
6439
6440 #undef BNX2X_PCI_ALLOC
6441 #undef BNX2X_ALLOC
6442 }
6443
6444 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6445 {
6446         int i;
6447
6448         for_each_tx_queue(bp, i) {
6449                 struct bnx2x_fastpath *fp = &bp->fp[i];
6450
6451                 u16 bd_cons = fp->tx_bd_cons;
6452                 u16 sw_prod = fp->tx_pkt_prod;
6453                 u16 sw_cons = fp->tx_pkt_cons;
6454
6455                 while (sw_cons != sw_prod) {
6456                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6457                         sw_cons++;
6458                 }
6459         }
6460 }
6461
6462 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6463 {
6464         int i, j;
6465
6466         for_each_rx_queue(bp, j) {
6467                 struct bnx2x_fastpath *fp = &bp->fp[j];
6468
6469                 for (i = 0; i < NUM_RX_BD; i++) {
6470                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6471                         struct sk_buff *skb = rx_buf->skb;
6472
6473                         if (skb == NULL)
6474                                 continue;
6475
6476                         pci_unmap_single(bp->pdev,
6477                                          pci_unmap_addr(rx_buf, mapping),
6478                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6479
6480                         rx_buf->skb = NULL;
6481                         dev_kfree_skb(skb);
6482                 }
6483                 if (!fp->disable_tpa)
6484                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6485                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
6486                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
6487         }
6488 }
6489
6490 static void bnx2x_free_skbs(struct bnx2x *bp)
6491 {
6492         bnx2x_free_tx_skbs(bp);
6493         bnx2x_free_rx_skbs(bp);
6494 }
6495
6496 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6497 {
6498         int i, offset = 1;
6499
6500         free_irq(bp->msix_table[0].vector, bp->dev);
6501         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6502            bp->msix_table[0].vector);
6503
6504         for_each_queue(bp, i) {
6505                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
6506                    "state %x\n", i, bp->msix_table[i + offset].vector,
6507                    bnx2x_fp(bp, i, state));
6508
6509                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6510         }
6511 }
6512
6513 static void bnx2x_free_irq(struct bnx2x *bp)
6514 {
6515         if (bp->flags & USING_MSIX_FLAG) {
6516                 bnx2x_free_msix_irqs(bp);
6517                 pci_disable_msix(bp->pdev);
6518                 bp->flags &= ~USING_MSIX_FLAG;
6519
6520         } else if (bp->flags & USING_MSI_FLAG) {
6521                 free_irq(bp->pdev->irq, bp->dev);
6522                 pci_disable_msi(bp->pdev);
6523                 bp->flags &= ~USING_MSI_FLAG;
6524
6525         } else
6526                 free_irq(bp->pdev->irq, bp->dev);
6527 }
6528
6529 static int bnx2x_enable_msix(struct bnx2x *bp)
6530 {
6531         int i, rc, offset = 1;
6532         int igu_vec = 0;
6533
6534         bp->msix_table[0].entry = igu_vec;
6535         DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6536
6537         for_each_queue(bp, i) {
6538                 igu_vec = BP_L_ID(bp) + offset + i;
6539                 bp->msix_table[i + offset].entry = igu_vec;
6540                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6541                    "(fastpath #%u)\n", i + offset, igu_vec, i);
6542         }
6543
6544         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6545                              BNX2X_NUM_QUEUES(bp) + offset);
6546         if (rc) {
6547                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
6548                 return rc;
6549         }
6550
6551         bp->flags |= USING_MSIX_FLAG;
6552
6553         return 0;
6554 }
6555
6556 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6557 {
6558         int i, rc, offset = 1;
6559
6560         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6561                          bp->dev->name, bp->dev);
6562         if (rc) {
6563                 BNX2X_ERR("request sp irq failed\n");
6564                 return -EBUSY;
6565         }
6566
6567         for_each_queue(bp, i) {
6568                 struct bnx2x_fastpath *fp = &bp->fp[i];
6569
6570                 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
6571                 rc = request_irq(bp->msix_table[i + offset].vector,
6572                                  bnx2x_msix_fp_int, 0, fp->name, fp);
6573                 if (rc) {
6574                         BNX2X_ERR("request fp #%d irq failed  rc %d\n", i, rc);
6575                         bnx2x_free_msix_irqs(bp);
6576                         return -EBUSY;
6577                 }
6578
6579                 fp->state = BNX2X_FP_STATE_IRQ;
6580         }
6581
6582         i = BNX2X_NUM_QUEUES(bp);
6583         if (is_multi(bp))
6584                 printk(KERN_INFO PFX
6585                        "%s: using MSI-X  IRQs: sp %d  fp %d - %d\n",
6586                        bp->dev->name, bp->msix_table[0].vector,
6587                        bp->msix_table[offset].vector,
6588                        bp->msix_table[offset + i - 1].vector);
6589         else
6590                 printk(KERN_INFO PFX "%s: using MSI-X  IRQs: sp %d  fp %d\n",
6591                        bp->dev->name, bp->msix_table[0].vector,
6592                        bp->msix_table[offset + i - 1].vector);
6593
6594         return 0;
6595 }
6596
6597 static int bnx2x_enable_msi(struct bnx2x *bp)
6598 {
6599         int rc;
6600
6601         rc = pci_enable_msi(bp->pdev);
6602         if (rc) {
6603                 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6604                 return -1;
6605         }
6606         bp->flags |= USING_MSI_FLAG;
6607
6608         return 0;
6609 }
6610
6611 static int bnx2x_req_irq(struct bnx2x *bp)
6612 {
6613         unsigned long flags;
6614         int rc;
6615
6616         if (bp->flags & USING_MSI_FLAG)
6617                 flags = 0;
6618         else
6619                 flags = IRQF_SHARED;
6620
6621         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
6622                          bp->dev->name, bp->dev);
6623         if (!rc)
6624                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6625
6626         return rc;
6627 }
6628
6629 static void bnx2x_napi_enable(struct bnx2x *bp)
6630 {
6631         int i;
6632
6633         for_each_rx_queue(bp, i)
6634                 napi_enable(&bnx2x_fp(bp, i, napi));
6635 }
6636
6637 static void bnx2x_napi_disable(struct bnx2x *bp)
6638 {
6639         int i;
6640
6641         for_each_rx_queue(bp, i)
6642                 napi_disable(&bnx2x_fp(bp, i, napi));
6643 }
6644
6645 static void bnx2x_netif_start(struct bnx2x *bp)
6646 {
6647         int intr_sem;
6648
6649         intr_sem = atomic_dec_and_test(&bp->intr_sem);
6650         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
6651
6652         if (intr_sem) {
6653                 if (netif_running(bp->dev)) {
6654                         bnx2x_napi_enable(bp);
6655                         bnx2x_int_enable(bp);
6656                         if (bp->state == BNX2X_STATE_OPEN)
6657                                 netif_tx_wake_all_queues(bp->dev);
6658                 }
6659         }
6660 }
6661
6662 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6663 {
6664         bnx2x_int_disable_sync(bp, disable_hw);
6665         bnx2x_napi_disable(bp);
6666         netif_tx_disable(bp->dev);
6667         bp->dev->trans_start = jiffies; /* prevent tx timeout */
6668 }
6669
6670 /*
6671  * Init service functions
6672  */
6673
6674 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6675 {
6676         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6677         int port = BP_PORT(bp);
6678
6679         /* CAM allocation
6680          * unicasts 0-31:port0 32-63:port1
6681          * multicast 64-127:port0 128-191:port1
6682          */
6683         config->hdr.length = 2;
6684         config->hdr.offset = port ? 32 : 0;
6685         config->hdr.client_id = bp->fp->cl_id;
6686         config->hdr.reserved1 = 0;
6687
6688         /* primary MAC */
6689         config->config_table[0].cam_entry.msb_mac_addr =
6690                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6691         config->config_table[0].cam_entry.middle_mac_addr =
6692                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6693         config->config_table[0].cam_entry.lsb_mac_addr =
6694                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6695         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6696         if (set)
6697                 config->config_table[0].target_table_entry.flags = 0;
6698         else
6699                 CAM_INVALIDATE(config->config_table[0]);
6700         config->config_table[0].target_table_entry.client_id = 0;
6701         config->config_table[0].target_table_entry.vlan_id = 0;
6702
6703         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6704            (set ? "setting" : "clearing"),
6705            config->config_table[0].cam_entry.msb_mac_addr,
6706            config->config_table[0].cam_entry.middle_mac_addr,
6707            config->config_table[0].cam_entry.lsb_mac_addr);
6708
6709         /* broadcast */
6710         config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
6711         config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
6712         config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
6713         config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6714         if (set)
6715                 config->config_table[1].target_table_entry.flags =
6716                                 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6717         else
6718                 CAM_INVALIDATE(config->config_table[1]);
6719         config->config_table[1].target_table_entry.client_id = 0;
6720         config->config_table[1].target_table_entry.vlan_id = 0;
6721
6722         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6723                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6724                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6725 }
6726
6727 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6728 {
6729         struct mac_configuration_cmd_e1h *config =
6730                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6731
6732         if (set && (bp->state != BNX2X_STATE_OPEN)) {
6733                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6734                 return;
6735         }
6736
6737         /* CAM allocation for E1H
6738          * unicasts: by func number
6739          * multicast: 20+FUNC*20, 20 each
6740          */
6741         config->hdr.length = 1;
6742         config->hdr.offset = BP_FUNC(bp);
6743         config->hdr.client_id = bp->fp->cl_id;
6744         config->hdr.reserved1 = 0;
6745
6746         /* primary MAC */
6747         config->config_table[0].msb_mac_addr =
6748                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6749         config->config_table[0].middle_mac_addr =
6750                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6751         config->config_table[0].lsb_mac_addr =
6752                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6753         config->config_table[0].client_id = BP_L_ID(bp);
6754         config->config_table[0].vlan_id = 0;
6755         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6756         if (set)
6757                 config->config_table[0].flags = BP_PORT(bp);
6758         else
6759                 config->config_table[0].flags =
6760                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6761
6762         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID %d\n",
6763            (set ? "setting" : "clearing"),
6764            config->config_table[0].msb_mac_addr,
6765            config->config_table[0].middle_mac_addr,
6766            config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6767
6768         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6769                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6770                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6771 }
6772
6773 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6774                              int *state_p, int poll)
6775 {
6776         /* can take a while if any port is running */
6777         int cnt = 5000;
6778
6779         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6780            poll ? "polling" : "waiting", state, idx);
6781
6782         might_sleep();
6783         while (cnt--) {
6784                 if (poll) {
6785                         bnx2x_rx_int(bp->fp, 10);
6786                         /* if index is different from 0
6787                          * the reply for some commands will
6788                          * be on the non default queue
6789                          */
6790                         if (idx)
6791                                 bnx2x_rx_int(&bp->fp[idx], 10);
6792                 }
6793
6794                 mb(); /* state is changed by bnx2x_sp_event() */
6795                 if (*state_p == state) {
6796 #ifdef BNX2X_STOP_ON_ERROR
6797                         DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
6798 #endif
6799                         return 0;
6800                 }
6801
6802                 msleep(1);
6803         }
6804
6805         /* timeout! */
6806         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6807                   poll ? "polling" : "waiting", state, idx);
6808 #ifdef BNX2X_STOP_ON_ERROR
6809         bnx2x_panic();
6810 #endif
6811
6812         return -EBUSY;
6813 }
6814
6815 static int bnx2x_setup_leading(struct bnx2x *bp)
6816 {
6817         int rc;
6818
6819         /* reset IGU state */
6820         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6821
6822         /* SETUP ramrod */
6823         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6824
6825         /* Wait for completion */
6826         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6827
6828         return rc;
6829 }
6830
6831 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6832 {
6833         struct bnx2x_fastpath *fp = &bp->fp[index];
6834
6835         /* reset IGU state */
6836         bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6837
6838         /* SETUP ramrod */
6839         fp->state = BNX2X_FP_STATE_OPENING;
6840         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6841                       fp->cl_id, 0);
6842
6843         /* Wait for completion */
6844         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6845                                  &(fp->state), 0);
6846 }
6847
6848 static int bnx2x_poll(struct napi_struct *napi, int budget);
6849
6850 static void bnx2x_set_int_mode(struct bnx2x *bp)
6851 {
6852         int num_queues;
6853
6854         switch (int_mode) {
6855         case INT_MODE_INTx:
6856         case INT_MODE_MSI:
6857                 num_queues = 1;
6858                 bp->num_rx_queues = num_queues;
6859                 bp->num_tx_queues = num_queues;
6860                 DP(NETIF_MSG_IFUP,
6861                    "set number of queues to %d\n", num_queues);
6862                 break;
6863
6864         case INT_MODE_MSIX:
6865         default:
6866                 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6867                         num_queues = min_t(u32, num_online_cpus(),
6868                                            BNX2X_MAX_QUEUES(bp));
6869                 else
6870                         num_queues = 1;
6871                 bp->num_rx_queues = num_queues;
6872                 bp->num_tx_queues = num_queues;
6873                 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6874                    "  number of tx queues to %d\n",
6875                    bp->num_rx_queues, bp->num_tx_queues);
6876                 /* if we can't use MSI-X we only need one fp,
6877                  * so try to enable MSI-X with the requested number of fp's
6878                  * and fallback to MSI or legacy INTx with one fp
6879                  */
6880                 if (bnx2x_enable_msix(bp)) {
6881                         /* failed to enable MSI-X */
6882                         num_queues = 1;
6883                         bp->num_rx_queues = num_queues;
6884                         bp->num_tx_queues = num_queues;
6885                         if (bp->multi_mode)
6886                                 BNX2X_ERR("Multi requested but failed to "
6887                                           "enable MSI-X  set number of "
6888                                           "queues to %d\n", num_queues);
6889                 }
6890                 break;
6891         }
6892         bp->dev->real_num_tx_queues = bp->num_tx_queues;
6893 }
6894
6895 static void bnx2x_set_rx_mode(struct net_device *dev);
6896
6897 /* must be called with rtnl_lock */
6898 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6899 {
6900         u32 load_code;
6901         int i, rc = 0;
6902 #ifdef BNX2X_STOP_ON_ERROR
6903         DP(NETIF_MSG_IFUP, "enter  load_mode %d\n", load_mode);
6904         if (unlikely(bp->panic))
6905                 return -EPERM;
6906 #endif
6907
6908         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6909
6910         bnx2x_set_int_mode(bp);
6911
6912         if (bnx2x_alloc_mem(bp))
6913                 return -ENOMEM;
6914
6915         for_each_rx_queue(bp, i)
6916                 bnx2x_fp(bp, i, disable_tpa) =
6917                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
6918
6919         for_each_rx_queue(bp, i)
6920                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6921                                bnx2x_poll, 128);
6922
6923 #ifdef BNX2X_STOP_ON_ERROR
6924         for_each_rx_queue(bp, i) {
6925                 struct bnx2x_fastpath *fp = &bp->fp[i];
6926
6927                 fp->poll_no_work = 0;
6928                 fp->poll_calls = 0;
6929                 fp->poll_max_calls = 0;
6930                 fp->poll_complete = 0;
6931                 fp->poll_exit = 0;
6932         }
6933 #endif
6934         bnx2x_napi_enable(bp);
6935
6936         if (bp->flags & USING_MSIX_FLAG) {
6937                 rc = bnx2x_req_msix_irqs(bp);
6938                 if (rc) {
6939                         pci_disable_msix(bp->pdev);
6940                         goto load_error1;
6941                 }
6942         } else {
6943                 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6944                         bnx2x_enable_msi(bp);
6945                 bnx2x_ack_int(bp);
6946                 rc = bnx2x_req_irq(bp);
6947                 if (rc) {
6948                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
6949                         if (bp->flags & USING_MSI_FLAG)
6950                                 pci_disable_msi(bp->pdev);
6951                         goto load_error1;
6952                 }
6953                 if (bp->flags & USING_MSI_FLAG) {
6954                         bp->dev->irq = bp->pdev->irq;
6955                         printk(KERN_INFO PFX "%s: using MSI  IRQ %d\n",
6956                                bp->dev->name, bp->pdev->irq);
6957                 }
6958         }
6959
6960         /* Send LOAD_REQUEST command to MCP
6961            Returns the type of LOAD command:
6962            if it is the first port to be initialized
6963            common blocks should be initialized, otherwise - not
6964         */
6965         if (!BP_NOMCP(bp)) {
6966                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6967                 if (!load_code) {
6968                         BNX2X_ERR("MCP response failure, aborting\n");
6969                         rc = -EBUSY;
6970                         goto load_error2;
6971                 }
6972                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6973                         rc = -EBUSY; /* other port in diagnostic mode */
6974                         goto load_error2;
6975                 }
6976
6977         } else {
6978                 int port = BP_PORT(bp);
6979
6980                 DP(NETIF_MSG_IFUP, "NO MCP - load counts      %d, %d, %d\n",
6981                    load_count[0], load_count[1], load_count[2]);
6982                 load_count[0]++;
6983                 load_count[1 + port]++;
6984                 DP(NETIF_MSG_IFUP, "NO MCP - new load counts  %d, %d, %d\n",
6985                    load_count[0], load_count[1], load_count[2]);
6986                 if (load_count[0] == 1)
6987                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6988                 else if (load_count[1 + port] == 1)
6989                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6990                 else
6991                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6992         }
6993
6994         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6995             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6996                 bp->port.pmf = 1;
6997         else
6998                 bp->port.pmf = 0;
6999         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7000
7001         /* Initialize HW */
7002         rc = bnx2x_init_hw(bp, load_code);
7003         if (rc) {
7004                 BNX2X_ERR("HW init failed, aborting\n");
7005                 goto load_error2;
7006         }
7007
7008         /* Setup NIC internals and enable interrupts */
7009         bnx2x_nic_init(bp, load_code);
7010
7011         /* Send LOAD_DONE command to MCP */
7012         if (!BP_NOMCP(bp)) {
7013                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7014                 if (!load_code) {
7015                         BNX2X_ERR("MCP response failure, aborting\n");
7016                         rc = -EBUSY;
7017                         goto load_error3;
7018                 }
7019         }
7020
7021         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7022
7023         rc = bnx2x_setup_leading(bp);
7024         if (rc) {
7025                 BNX2X_ERR("Setup leading failed!\n");
7026                 goto load_error3;
7027         }
7028
7029         if (CHIP_IS_E1H(bp))
7030                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
7031                         DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
7032                         bp->state = BNX2X_STATE_DISABLED;
7033                 }
7034
7035         if (bp->state == BNX2X_STATE_OPEN)
7036                 for_each_nondefault_queue(bp, i) {
7037                         rc = bnx2x_setup_multi(bp, i);
7038                         if (rc)
7039                                 goto load_error3;
7040                 }
7041
7042         if (CHIP_IS_E1(bp))
7043                 bnx2x_set_mac_addr_e1(bp, 1);
7044         else
7045                 bnx2x_set_mac_addr_e1h(bp, 1);
7046
7047         if (bp->port.pmf)
7048                 bnx2x_initial_phy_init(bp, load_mode);
7049
7050         /* Start fast path */
7051         switch (load_mode) {
7052         case LOAD_NORMAL:
7053                 /* Tx queue should be only reenabled */
7054                 netif_tx_wake_all_queues(bp->dev);
7055                 /* Initialize the receive filter. */
7056                 bnx2x_set_rx_mode(bp->dev);
7057                 break;
7058
7059         case LOAD_OPEN:
7060                 netif_tx_start_all_queues(bp->dev);
7061                 /* Initialize the receive filter. */
7062                 bnx2x_set_rx_mode(bp->dev);
7063                 break;
7064
7065         case LOAD_DIAG:
7066                 /* Initialize the receive filter. */
7067                 bnx2x_set_rx_mode(bp->dev);
7068                 bp->state = BNX2X_STATE_DIAG;
7069                 break;
7070
7071         default:
7072                 break;
7073         }
7074
7075         if (!bp->port.pmf)
7076                 bnx2x__link_status_update(bp);
7077
7078         /* start the timer */
7079         mod_timer(&bp->timer, jiffies + bp->current_interval);
7080
7081
7082         return 0;
7083
7084 load_error3:
7085         bnx2x_int_disable_sync(bp, 1);
7086         if (!BP_NOMCP(bp)) {
7087                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7088                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7089         }
7090         bp->port.pmf = 0;
7091         /* Free SKBs, SGEs, TPA pool and driver internals */
7092         bnx2x_free_skbs(bp);
7093         for_each_rx_queue(bp, i)
7094                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7095 load_error2:
7096         /* Release IRQs */
7097         bnx2x_free_irq(bp);
7098 load_error1:
7099         bnx2x_napi_disable(bp);
7100         for_each_rx_queue(bp, i)
7101                 netif_napi_del(&bnx2x_fp(bp, i, napi));
7102         bnx2x_free_mem(bp);
7103
7104         return rc;
7105 }
7106
7107 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7108 {
7109         struct bnx2x_fastpath *fp = &bp->fp[index];
7110         int rc;
7111
7112         /* halt the connection */
7113         fp->state = BNX2X_FP_STATE_HALTING;
7114         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7115
7116         /* Wait for completion */
7117         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7118                                &(fp->state), 1);
7119         if (rc) /* timeout */
7120                 return rc;
7121
7122         /* delete cfc entry */
7123         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7124
7125         /* Wait for completion */
7126         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7127                                &(fp->state), 1);
7128         return rc;
7129 }
7130
7131 static int bnx2x_stop_leading(struct bnx2x *bp)
7132 {
7133         __le16 dsb_sp_prod_idx;
7134         /* if the other port is handling traffic,
7135            this can take a lot of time */
7136         int cnt = 500;
7137         int rc;
7138
7139         might_sleep();
7140
7141         /* Send HALT ramrod */
7142         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7143         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7144
7145         /* Wait for completion */
7146         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7147                                &(bp->fp[0].state), 1);
7148         if (rc) /* timeout */
7149                 return rc;
7150
7151         dsb_sp_prod_idx = *bp->dsb_sp_prod;
7152
7153         /* Send PORT_DELETE ramrod */
7154         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7155
7156         /* Wait for completion to arrive on default status block
7157            we are going to reset the chip anyway
7158            so there is not much to do if this times out
7159          */
7160         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7161                 if (!cnt) {
7162                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7163                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7164                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
7165 #ifdef BNX2X_STOP_ON_ERROR
7166                         bnx2x_panic();
7167 #endif
7168                         rc = -EBUSY;
7169                         break;
7170                 }
7171                 cnt--;
7172                 msleep(1);
7173                 rmb(); /* Refresh the dsb_sp_prod */
7174         }
7175         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7176         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7177
7178         return rc;
7179 }
7180
7181 static void bnx2x_reset_func(struct bnx2x *bp)
7182 {
7183         int port = BP_PORT(bp);
7184         int func = BP_FUNC(bp);
7185         int base, i;
7186
7187         /* Configure IGU */
7188         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7189         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7190
7191         /* Clear ILT */
7192         base = FUNC_ILT_BASE(func);
7193         for (i = base; i < base + ILT_PER_FUNC; i++)
7194                 bnx2x_ilt_wr(bp, i, 0);
7195 }
7196
7197 static void bnx2x_reset_port(struct bnx2x *bp)
7198 {
7199         int port = BP_PORT(bp);
7200         u32 val;
7201
7202         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7203
7204         /* Do not rcv packets to BRB */
7205         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7206         /* Do not direct rcv packets that are not for MCP to the BRB */
7207         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7208                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7209
7210         /* Configure AEU */
7211         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7212
7213         msleep(100);
7214         /* Check for BRB port occupancy */
7215         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7216         if (val)
7217                 DP(NETIF_MSG_IFDOWN,
7218                    "BRB1 is not empty  %d blocks are occupied\n", val);
7219
7220         /* TODO: Close Doorbell port? */
7221 }
7222
7223 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7224 {
7225         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
7226            BP_FUNC(bp), reset_code);
7227
7228         switch (reset_code) {
7229         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7230                 bnx2x_reset_port(bp);
7231                 bnx2x_reset_func(bp);
7232                 bnx2x_reset_common(bp);
7233                 break;
7234
7235         case FW_MSG_CODE_DRV_UNLOAD_PORT:
7236                 bnx2x_reset_port(bp);
7237                 bnx2x_reset_func(bp);
7238                 break;
7239
7240         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7241                 bnx2x_reset_func(bp);
7242                 break;
7243
7244         default:
7245                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7246                 break;
7247         }
7248 }
7249
7250 /* must be called with rtnl_lock */
7251 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7252 {
7253         int port = BP_PORT(bp);
7254         u32 reset_code = 0;
7255         int i, cnt, rc;
7256
7257         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7258
7259         bp->rx_mode = BNX2X_RX_MODE_NONE;
7260         bnx2x_set_storm_rx_mode(bp);
7261
7262         bnx2x_netif_stop(bp, 1);
7263
7264         del_timer_sync(&bp->timer);
7265         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7266                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7267         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7268
7269         /* Release IRQs */
7270         bnx2x_free_irq(bp);
7271
7272         /* Wait until tx fastpath tasks complete */
7273         for_each_tx_queue(bp, i) {
7274                 struct bnx2x_fastpath *fp = &bp->fp[i];
7275
7276                 cnt = 1000;
7277                 while (bnx2x_has_tx_work_unload(fp)) {
7278
7279                         bnx2x_tx_int(fp);
7280                         if (!cnt) {
7281                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
7282                                           i);
7283 #ifdef BNX2X_STOP_ON_ERROR
7284                                 bnx2x_panic();
7285                                 return -EBUSY;
7286 #else
7287                                 break;
7288 #endif
7289                         }
7290                         cnt--;
7291                         msleep(1);
7292                 }
7293         }
7294         /* Give HW time to discard old tx messages */
7295         msleep(1);
7296
7297         if (CHIP_IS_E1(bp)) {
7298                 struct mac_configuration_cmd *config =
7299                                                 bnx2x_sp(bp, mcast_config);
7300
7301                 bnx2x_set_mac_addr_e1(bp, 0);
7302
7303                 for (i = 0; i < config->hdr.length; i++)
7304                         CAM_INVALIDATE(config->config_table[i]);
7305
7306                 config->hdr.length = i;
7307                 if (CHIP_REV_IS_SLOW(bp))
7308                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7309                 else
7310                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7311                 config->hdr.client_id = bp->fp->cl_id;
7312                 config->hdr.reserved1 = 0;
7313
7314                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7315                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7316                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7317
7318         } else { /* E1H */
7319                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7320
7321                 bnx2x_set_mac_addr_e1h(bp, 0);
7322
7323                 for (i = 0; i < MC_HASH_SIZE; i++)
7324                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7325         }
7326
7327         if (unload_mode == UNLOAD_NORMAL)
7328                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7329
7330         else if (bp->flags & NO_WOL_FLAG) {
7331                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7332                 if (CHIP_IS_E1H(bp))
7333                         REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7334
7335         } else if (bp->wol) {
7336                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7337                 u8 *mac_addr = bp->dev->dev_addr;
7338                 u32 val;
7339                 /* The mac address is written to entries 1-4 to
7340                    preserve entry 0 which is used by the PMF */
7341                 u8 entry = (BP_E1HVN(bp) + 1)*8;
7342
7343                 val = (mac_addr[0] << 8) | mac_addr[1];
7344                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7345
7346                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7347                       (mac_addr[4] << 8) | mac_addr[5];
7348                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7349
7350                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7351
7352         } else
7353                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7354
7355         /* Close multi and leading connections
7356            Completions for ramrods are collected in a synchronous way */
7357         for_each_nondefault_queue(bp, i)
7358                 if (bnx2x_stop_multi(bp, i))
7359                         goto unload_error;
7360
7361         rc = bnx2x_stop_leading(bp);
7362         if (rc) {
7363                 BNX2X_ERR("Stop leading failed!\n");
7364 #ifdef BNX2X_STOP_ON_ERROR
7365                 return -EBUSY;
7366 #else
7367                 goto unload_error;
7368 #endif
7369         }
7370
7371 unload_error:
7372         if (!BP_NOMCP(bp))
7373                 reset_code = bnx2x_fw_command(bp, reset_code);
7374         else {
7375                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts      %d, %d, %d\n",
7376                    load_count[0], load_count[1], load_count[2]);
7377                 load_count[0]--;
7378                 load_count[1 + port]--;
7379                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts  %d, %d, %d\n",
7380                    load_count[0], load_count[1], load_count[2]);
7381                 if (load_count[0] == 0)
7382                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7383                 else if (load_count[1 + port] == 0)
7384                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7385                 else
7386                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7387         }
7388
7389         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7390             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7391                 bnx2x__link_reset(bp);
7392
7393         /* Reset the chip */
7394         bnx2x_reset_chip(bp, reset_code);
7395
7396         /* Report UNLOAD_DONE to MCP */
7397         if (!BP_NOMCP(bp))
7398                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7399
7400         bp->port.pmf = 0;
7401
7402         /* Free SKBs, SGEs, TPA pool and driver internals */
7403         bnx2x_free_skbs(bp);
7404         for_each_rx_queue(bp, i)
7405                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7406         for_each_rx_queue(bp, i)
7407                 netif_napi_del(&bnx2x_fp(bp, i, napi));
7408         bnx2x_free_mem(bp);
7409
7410         bp->state = BNX2X_STATE_CLOSED;
7411
7412         netif_carrier_off(bp->dev);
7413
7414         return 0;
7415 }
7416
7417 static void bnx2x_reset_task(struct work_struct *work)
7418 {
7419         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7420
7421 #ifdef BNX2X_STOP_ON_ERROR
7422         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7423                   " so reset not done to allow debug dump,\n"
7424          KERN_ERR " you will need to reboot when done\n");
7425         return;
7426 #endif
7427
7428         rtnl_lock();
7429
7430         if (!netif_running(bp->dev))
7431                 goto reset_task_exit;
7432
7433         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7434         bnx2x_nic_load(bp, LOAD_NORMAL);
7435
7436 reset_task_exit:
7437         rtnl_unlock();
7438 }
7439
7440 /* end of nic load/unload */
7441
7442 /* ethtool_ops */
7443
7444 /*
7445  * Init service functions
7446  */
7447
7448 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7449 {
7450         switch (func) {
7451         case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7452         case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7453         case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7454         case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7455         case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7456         case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7457         case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7458         case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7459         default:
7460                 BNX2X_ERR("Unsupported function index: %d\n", func);
7461                 return (u32)(-1);
7462         }
7463 }
7464
7465 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7466 {
7467         u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7468
7469         /* Flush all outstanding writes */
7470         mmiowb();
7471
7472         /* Pretend to be function 0 */
7473         REG_WR(bp, reg, 0);
7474         /* Flush the GRC transaction (in the chip) */
7475         new_val = REG_RD(bp, reg);
7476         if (new_val != 0) {
7477                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7478                           new_val);
7479                 BUG();
7480         }
7481
7482         /* From now we are in the "like-E1" mode */
7483         bnx2x_int_disable(bp);
7484
7485         /* Flush all outstanding writes */
7486         mmiowb();
7487
7488         /* Restore the original funtion settings */
7489         REG_WR(bp, reg, orig_func);
7490         new_val = REG_RD(bp, reg);
7491         if (new_val != orig_func) {
7492                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7493                           orig_func, new_val);
7494                 BUG();
7495         }
7496 }
7497
7498 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7499 {
7500         if (CHIP_IS_E1H(bp))
7501                 bnx2x_undi_int_disable_e1h(bp, func);
7502         else
7503                 bnx2x_int_disable(bp);
7504 }
7505
7506 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7507 {
7508         u32 val;
7509
7510         /* Check if there is any driver already loaded */
7511         val = REG_RD(bp, MISC_REG_UNPREPARED);
7512         if (val == 0x1) {
7513                 /* Check if it is the UNDI driver
7514                  * UNDI driver initializes CID offset for normal bell to 0x7
7515                  */
7516                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7517                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7518                 if (val == 0x7) {
7519                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7520                         /* save our func */
7521                         int func = BP_FUNC(bp);
7522                         u32 swap_en;
7523                         u32 swap_val;
7524
7525                         /* clear the UNDI indication */
7526                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7527
7528                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
7529
7530                         /* try unload UNDI on port 0 */
7531                         bp->func = 0;
7532                         bp->fw_seq =
7533                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7534                                 DRV_MSG_SEQ_NUMBER_MASK);
7535                         reset_code = bnx2x_fw_command(bp, reset_code);
7536
7537                         /* if UNDI is loaded on the other port */
7538                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7539
7540                                 /* send "DONE" for previous unload */
7541                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7542
7543                                 /* unload UNDI on port 1 */
7544                                 bp->func = 1;
7545                                 bp->fw_seq =
7546                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7547                                         DRV_MSG_SEQ_NUMBER_MASK);
7548                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7549
7550                                 bnx2x_fw_command(bp, reset_code);
7551                         }
7552
7553                         /* now it's safe to release the lock */
7554                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7555
7556                         bnx2x_undi_int_disable(bp, func);
7557
7558                         /* close input traffic and wait for it */
7559                         /* Do not rcv packets to BRB */
7560                         REG_WR(bp,
7561                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7562                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7563                         /* Do not direct rcv packets that are not for MCP to
7564                          * the BRB */
7565                         REG_WR(bp,
7566                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7567                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7568                         /* clear AEU */
7569                         REG_WR(bp,
7570                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7571                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7572                         msleep(10);
7573
7574                         /* save NIG port swap info */
7575                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7576                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7577                         /* reset device */
7578                         REG_WR(bp,
7579                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7580                                0xd3ffffff);
7581                         REG_WR(bp,
7582                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7583                                0x1403);
7584                         /* take the NIG out of reset and restore swap values */
7585                         REG_WR(bp,
7586                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7587                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
7588                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7589                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7590
7591                         /* send unload done to the MCP */
7592                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7593
7594                         /* restore our func and fw_seq */
7595                         bp->func = func;
7596                         bp->fw_seq =
7597                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7598                                 DRV_MSG_SEQ_NUMBER_MASK);
7599
7600                 } else
7601                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7602         }
7603 }
7604
7605 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7606 {
7607         u32 val, val2, val3, val4, id;
7608         u16 pmc;
7609
7610         /* Get the chip revision id and number. */
7611         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7612         val = REG_RD(bp, MISC_REG_CHIP_NUM);
7613         id = ((val & 0xffff) << 16);
7614         val = REG_RD(bp, MISC_REG_CHIP_REV);
7615         id |= ((val & 0xf) << 12);
7616         val = REG_RD(bp, MISC_REG_CHIP_METAL);
7617         id |= ((val & 0xff) << 4);
7618         val = REG_RD(bp, MISC_REG_BOND_ID);
7619         id |= (val & 0xf);
7620         bp->common.chip_id = id;
7621         bp->link_params.chip_id = bp->common.chip_id;
7622         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7623
7624         val = (REG_RD(bp, 0x2874) & 0x55);
7625         if ((bp->common.chip_id & 0x1) ||
7626             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7627                 bp->flags |= ONE_PORT_FLAG;
7628                 BNX2X_DEV_INFO("single port device\n");
7629         }
7630
7631         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7632         bp->common.flash_size = (NVRAM_1MB_SIZE <<
7633                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
7634         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7635                        bp->common.flash_size, bp->common.flash_size);
7636
7637         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7638         bp->link_params.shmem_base = bp->common.shmem_base;
7639         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7640
7641         if (!bp->common.shmem_base ||
7642             (bp->common.shmem_base < 0xA0000) ||
7643             (bp->common.shmem_base >= 0xC0000)) {
7644                 BNX2X_DEV_INFO("MCP not active\n");
7645                 bp->flags |= NO_MCP_FLAG;
7646                 return;
7647         }
7648
7649         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7650         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7651                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7652                 BNX2X_ERR("BAD MCP validity signature\n");
7653
7654         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7655         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
7656
7657         bp->link_params.hw_led_mode = ((bp->common.hw_config &
7658                                         SHARED_HW_CFG_LED_MODE_MASK) >>
7659                                        SHARED_HW_CFG_LED_MODE_SHIFT);
7660
7661         bp->link_params.feature_config_flags = 0;
7662         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7663         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7664                 bp->link_params.feature_config_flags |=
7665                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7666         else
7667                 bp->link_params.feature_config_flags &=
7668                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7669
7670         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7671         bp->common.bc_ver = val;
7672         BNX2X_DEV_INFO("bc_ver %X\n", val);
7673         if (val < BNX2X_BC_VER) {
7674                 /* for now only warn
7675                  * later we might need to enforce this */
7676                 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7677                           " please upgrade BC\n", BNX2X_BC_VER, val);
7678         }
7679
7680         if (BP_E1HVN(bp) == 0) {
7681                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7682                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7683         } else {
7684                 /* no WOL capability for E1HVN != 0 */
7685                 bp->flags |= NO_WOL_FLAG;
7686         }
7687         BNX2X_DEV_INFO("%sWoL capable\n",
7688                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
7689
7690         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7691         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7692         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7693         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7694
7695         printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7696                val, val2, val3, val4);
7697 }
7698
7699 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7700                                                     u32 switch_cfg)
7701 {
7702         int port = BP_PORT(bp);
7703         u32 ext_phy_type;
7704
7705         switch (switch_cfg) {
7706         case SWITCH_CFG_1G:
7707                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7708
7709                 ext_phy_type =
7710                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7711                 switch (ext_phy_type) {
7712                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7713                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7714                                        ext_phy_type);
7715
7716                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7717                                                SUPPORTED_10baseT_Full |
7718                                                SUPPORTED_100baseT_Half |
7719                                                SUPPORTED_100baseT_Full |
7720                                                SUPPORTED_1000baseT_Full |
7721                                                SUPPORTED_2500baseX_Full |
7722                                                SUPPORTED_TP |
7723                                                SUPPORTED_FIBRE |
7724                                                SUPPORTED_Autoneg |
7725                                                SUPPORTED_Pause |
7726                                                SUPPORTED_Asym_Pause);
7727                         break;
7728
7729                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7730                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7731                                        ext_phy_type);
7732
7733                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7734                                                SUPPORTED_10baseT_Full |
7735                                                SUPPORTED_100baseT_Half |
7736                                                SUPPORTED_100baseT_Full |
7737                                                SUPPORTED_1000baseT_Full |
7738                                                SUPPORTED_TP |
7739                                                SUPPORTED_FIBRE |
7740                                                SUPPORTED_Autoneg |
7741                                                SUPPORTED_Pause |
7742                                                SUPPORTED_Asym_Pause);
7743                         break;
7744
7745                 default:
7746                         BNX2X_ERR("NVRAM config error. "
7747                                   "BAD SerDes ext_phy_config 0x%x\n",
7748                                   bp->link_params.ext_phy_config);
7749                         return;
7750                 }
7751
7752                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7753                                            port*0x10);
7754                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7755                 break;
7756
7757         case SWITCH_CFG_10G:
7758                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7759
7760                 ext_phy_type =
7761                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7762                 switch (ext_phy_type) {
7763                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7764                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7765                                        ext_phy_type);
7766
7767                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7768                                                SUPPORTED_10baseT_Full |
7769                                                SUPPORTED_100baseT_Half |
7770                                                SUPPORTED_100baseT_Full |
7771                                                SUPPORTED_1000baseT_Full |
7772                                                SUPPORTED_2500baseX_Full |
7773                                                SUPPORTED_10000baseT_Full |
7774                                                SUPPORTED_TP |
7775                                                SUPPORTED_FIBRE |
7776                                                SUPPORTED_Autoneg |
7777                                                SUPPORTED_Pause |
7778                                                SUPPORTED_Asym_Pause);
7779                         break;
7780
7781                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7782                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7783                                        ext_phy_type);
7784
7785                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7786                                                SUPPORTED_1000baseT_Full |
7787                                                SUPPORTED_FIBRE |
7788                                                SUPPORTED_Autoneg |
7789                                                SUPPORTED_Pause |
7790                                                SUPPORTED_Asym_Pause);
7791                         break;
7792
7793                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7794                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7795                                        ext_phy_type);
7796
7797                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7798                                                SUPPORTED_2500baseX_Full |
7799                                                SUPPORTED_1000baseT_Full |
7800                                                SUPPORTED_FIBRE |
7801                                                SUPPORTED_Autoneg |
7802                                                SUPPORTED_Pause |
7803                                                SUPPORTED_Asym_Pause);
7804                         break;
7805
7806                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7807                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7808                                        ext_phy_type);
7809
7810                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7811                                                SUPPORTED_FIBRE |
7812                                                SUPPORTED_Pause |
7813                                                SUPPORTED_Asym_Pause);
7814                         break;
7815
7816                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7817                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7818                                        ext_phy_type);
7819
7820                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7821                                                SUPPORTED_1000baseT_Full |
7822                                                SUPPORTED_FIBRE |
7823                                                SUPPORTED_Pause |
7824                                                SUPPORTED_Asym_Pause);
7825                         break;
7826
7827                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7828                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
7829                                        ext_phy_type);
7830
7831                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7832                                                SUPPORTED_1000baseT_Full |
7833                                                SUPPORTED_Autoneg |
7834                                                SUPPORTED_FIBRE |
7835                                                SUPPORTED_Pause |
7836                                                SUPPORTED_Asym_Pause);
7837                         break;
7838
7839                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7840                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7841                                        ext_phy_type);
7842
7843                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7844                                                SUPPORTED_TP |
7845                                                SUPPORTED_Autoneg |
7846                                                SUPPORTED_Pause |
7847                                                SUPPORTED_Asym_Pause);
7848                         break;
7849
7850                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7851                         BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7852                                        ext_phy_type);
7853
7854                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7855                                                SUPPORTED_10baseT_Full |
7856                                                SUPPORTED_100baseT_Half |
7857                                                SUPPORTED_100baseT_Full |
7858                                                SUPPORTED_1000baseT_Full |
7859                                                SUPPORTED_10000baseT_Full |
7860                                                SUPPORTED_TP |
7861                                                SUPPORTED_Autoneg |
7862                                                SUPPORTED_Pause |
7863                                                SUPPORTED_Asym_Pause);
7864                         break;
7865
7866                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7867                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7868                                   bp->link_params.ext_phy_config);
7869                         break;
7870
7871                 default:
7872                         BNX2X_ERR("NVRAM config error. "
7873                                   "BAD XGXS ext_phy_config 0x%x\n",
7874                                   bp->link_params.ext_phy_config);
7875                         return;
7876                 }
7877
7878                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7879                                            port*0x18);
7880                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7881
7882                 break;
7883
7884         default:
7885                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7886                           bp->port.link_config);
7887                 return;
7888         }
7889         bp->link_params.phy_addr = bp->port.phy_addr;
7890
7891         /* mask what we support according to speed_cap_mask */
7892         if (!(bp->link_params.speed_cap_mask &
7893                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7894                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7895
7896         if (!(bp->link_params.speed_cap_mask &
7897                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7898                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7899
7900         if (!(bp->link_params.speed_cap_mask &
7901                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7902                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7903
7904         if (!(bp->link_params.speed_cap_mask &
7905                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7906                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7907
7908         if (!(bp->link_params.speed_cap_mask &
7909                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7910                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7911                                         SUPPORTED_1000baseT_Full);
7912
7913         if (!(bp->link_params.speed_cap_mask &
7914                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7915                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7916
7917         if (!(bp->link_params.speed_cap_mask &
7918                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7919                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7920
7921         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7922 }
7923
7924 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7925 {
7926         bp->link_params.req_duplex = DUPLEX_FULL;
7927
7928         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7929         case PORT_FEATURE_LINK_SPEED_AUTO:
7930                 if (bp->port.supported & SUPPORTED_Autoneg) {
7931                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7932                         bp->port.advertising = bp->port.supported;
7933                 } else {
7934                         u32 ext_phy_type =
7935                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7936
7937                         if ((ext_phy_type ==
7938                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7939                             (ext_phy_type ==
7940                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7941                                 /* force 10G, no AN */
7942                                 bp->link_params.req_line_speed = SPEED_10000;
7943                                 bp->port.advertising =
7944                                                 (ADVERTISED_10000baseT_Full |
7945                                                  ADVERTISED_FIBRE);
7946                                 break;
7947                         }
7948                         BNX2X_ERR("NVRAM config error. "
7949                                   "Invalid link_config 0x%x"
7950                                   "  Autoneg not supported\n",
7951                                   bp->port.link_config);
7952                         return;
7953                 }
7954                 break;
7955
7956         case PORT_FEATURE_LINK_SPEED_10M_FULL:
7957                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7958                         bp->link_params.req_line_speed = SPEED_10;
7959                         bp->port.advertising = (ADVERTISED_10baseT_Full |
7960                                                 ADVERTISED_TP);
7961                 } else {
7962                         BNX2X_ERR("NVRAM config error. "
7963                                   "Invalid link_config 0x%x"
7964                                   "  speed_cap_mask 0x%x\n",
7965                                   bp->port.link_config,
7966                                   bp->link_params.speed_cap_mask);
7967                         return;
7968                 }
7969                 break;
7970
7971         case PORT_FEATURE_LINK_SPEED_10M_HALF:
7972                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7973                         bp->link_params.req_line_speed = SPEED_10;
7974                         bp->link_params.req_duplex = DUPLEX_HALF;
7975                         bp->port.advertising = (ADVERTISED_10baseT_Half |
7976                                                 ADVERTISED_TP);
7977                 } else {
7978                         BNX2X_ERR("NVRAM config error. "
7979                                   "Invalid link_config 0x%x"
7980                                   "  speed_cap_mask 0x%x\n",
7981                                   bp->port.link_config,
7982                                   bp->link_params.speed_cap_mask);
7983                         return;
7984                 }
7985                 break;
7986
7987         case PORT_FEATURE_LINK_SPEED_100M_FULL:
7988                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7989                         bp->link_params.req_line_speed = SPEED_100;
7990                         bp->port.advertising = (ADVERTISED_100baseT_Full |
7991                                                 ADVERTISED_TP);
7992                 } else {
7993                         BNX2X_ERR("NVRAM config error. "
7994                                   "Invalid link_config 0x%x"
7995                                   "  speed_cap_mask 0x%x\n",
7996                                   bp->port.link_config,
7997                                   bp->link_params.speed_cap_mask);
7998                         return;
7999                 }
8000                 break;
8001
8002         case PORT_FEATURE_LINK_SPEED_100M_HALF:
8003                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
8004                         bp->link_params.req_line_speed = SPEED_100;
8005                         bp->link_params.req_duplex = DUPLEX_HALF;
8006                         bp->port.advertising = (ADVERTISED_100baseT_Half |
8007                                                 ADVERTISED_TP);
8008                 } else {
8009                         BNX2X_ERR("NVRAM config error. "
8010                                   "Invalid link_config 0x%x"
8011                                   "  speed_cap_mask 0x%x\n",
8012                                   bp->port.link_config,
8013                                   bp->link_params.speed_cap_mask);
8014                         return;
8015                 }
8016                 break;
8017
8018         case PORT_FEATURE_LINK_SPEED_1G:
8019                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
8020                         bp->link_params.req_line_speed = SPEED_1000;
8021                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
8022                                                 ADVERTISED_TP);
8023                 } else {
8024                         BNX2X_ERR("NVRAM config error. "
8025                                   "Invalid link_config 0x%x"
8026                                   "  speed_cap_mask 0x%x\n",
8027                                   bp->port.link_config,
8028                                   bp->link_params.speed_cap_mask);
8029                         return;
8030                 }
8031                 break;
8032
8033         case PORT_FEATURE_LINK_SPEED_2_5G:
8034                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
8035                         bp->link_params.req_line_speed = SPEED_2500;
8036                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
8037                                                 ADVERTISED_TP);
8038                 } else {
8039                         BNX2X_ERR("NVRAM config error. "
8040                                   "Invalid link_config 0x%x"
8041                                   "  speed_cap_mask 0x%x\n",
8042                                   bp->port.link_config,
8043                                   bp->link_params.speed_cap_mask);
8044                         return;
8045                 }
8046                 break;
8047
8048         case PORT_FEATURE_LINK_SPEED_10G_CX4:
8049         case PORT_FEATURE_LINK_SPEED_10G_KX4:
8050         case PORT_FEATURE_LINK_SPEED_10G_KR:
8051                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
8052                         bp->link_params.req_line_speed = SPEED_10000;
8053                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
8054                                                 ADVERTISED_FIBRE);
8055                 } else {
8056                         BNX2X_ERR("NVRAM config error. "
8057                                   "Invalid link_config 0x%x"
8058                                   "  speed_cap_mask 0x%x\n",
8059                                   bp->port.link_config,
8060                                   bp->link_params.speed_cap_mask);
8061                         return;
8062                 }
8063                 break;
8064
8065         default:
8066                 BNX2X_ERR("NVRAM config error. "
8067                           "BAD link speed link_config 0x%x\n",
8068                           bp->port.link_config);
8069                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8070                 bp->port.advertising = bp->port.supported;
8071                 break;
8072         }
8073
8074         bp->link_params.req_flow_ctrl = (bp->port.link_config &
8075                                          PORT_FEATURE_FLOW_CONTROL_MASK);
8076         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8077             !(bp->port.supported & SUPPORTED_Autoneg))
8078                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8079
8080         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
8081                        "  advertising 0x%x\n",
8082                        bp->link_params.req_line_speed,
8083                        bp->link_params.req_duplex,
8084                        bp->link_params.req_flow_ctrl, bp->port.advertising);
8085 }
8086
8087 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8088 {
8089         int port = BP_PORT(bp);
8090         u32 val, val2;
8091         u32 config;
8092         u16 i;
8093
8094         bp->link_params.bp = bp;
8095         bp->link_params.port = port;
8096
8097         bp->link_params.lane_config =
8098                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8099         bp->link_params.ext_phy_config =
8100                 SHMEM_RD(bp,
8101                          dev_info.port_hw_config[port].external_phy_config);
8102         bp->link_params.speed_cap_mask =
8103                 SHMEM_RD(bp,
8104                          dev_info.port_hw_config[port].speed_capability_mask);
8105
8106         bp->port.link_config =
8107                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8108
8109         /* Get the 4 lanes xgxs config rx and tx */
8110         for (i = 0; i < 2; i++) {
8111                 val = SHMEM_RD(bp,
8112                            dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8113                 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8114                 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8115
8116                 val = SHMEM_RD(bp,
8117                            dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8118                 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8119                 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8120         }
8121
8122         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8123         if (config & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_ENABLED)
8124                 bp->link_params.feature_config_flags |=
8125                                 FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8126         else
8127                 bp->link_params.feature_config_flags &=
8128                                 ~FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8129
8130         /* If the device is capable of WoL, set the default state according
8131          * to the HW
8132          */
8133         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8134                    (config & PORT_FEATURE_WOL_ENABLED));
8135
8136         BNX2X_DEV_INFO("lane_config 0x%08x  ext_phy_config 0x%08x"
8137                        "  speed_cap_mask 0x%08x  link_config 0x%08x\n",
8138                        bp->link_params.lane_config,
8139                        bp->link_params.ext_phy_config,
8140                        bp->link_params.speed_cap_mask, bp->port.link_config);
8141
8142         bp->link_params.switch_cfg = (bp->port.link_config &
8143                                       PORT_FEATURE_CONNECTED_SWITCH_MASK);
8144         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8145
8146         bnx2x_link_settings_requested(bp);
8147
8148         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8149         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8150         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8151         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8152         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8153         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8154         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
8155         bp->dev->dev_addr[5] = (u8)(val & 0xff);
8156         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8157         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8158 }
8159
8160 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8161 {
8162         int func = BP_FUNC(bp);
8163         u32 val, val2;
8164         int rc = 0;
8165
8166         bnx2x_get_common_hwinfo(bp);
8167
8168         bp->e1hov = 0;
8169         bp->e1hmf = 0;
8170         if (CHIP_IS_E1H(bp)) {
8171                 bp->mf_config =
8172                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8173
8174                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8175                        FUNC_MF_CFG_E1HOV_TAG_MASK);
8176                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8177
8178                         bp->e1hov = val;
8179                         bp->e1hmf = 1;
8180                         BNX2X_DEV_INFO("MF mode  E1HOV for func %d is %d "
8181                                        "(0x%04x)\n",
8182                                        func, bp->e1hov, bp->e1hov);
8183                 } else {
8184                         BNX2X_DEV_INFO("single function mode\n");
8185                         if (BP_E1HVN(bp)) {
8186                                 BNX2X_ERR("!!!  No valid E1HOV for func %d,"
8187                                           "  aborting\n", func);
8188                                 rc = -EPERM;
8189                         }
8190                 }
8191         }
8192
8193         if (!BP_NOMCP(bp)) {
8194                 bnx2x_get_port_hwinfo(bp);
8195
8196                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8197                               DRV_MSG_SEQ_NUMBER_MASK);
8198                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8199         }
8200
8201         if (IS_E1HMF(bp)) {
8202                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8203                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
8204                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8205                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8206                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8207                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8208                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8209                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8210                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
8211                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
8212                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8213                                ETH_ALEN);
8214                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8215                                ETH_ALEN);
8216                 }
8217
8218                 return rc;
8219         }
8220
8221         if (BP_NOMCP(bp)) {
8222                 /* only supposed to happen on emulation/FPGA */
8223                 BNX2X_ERR("warning random MAC workaround active\n");
8224                 random_ether_addr(bp->dev->dev_addr);
8225                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8226         }
8227
8228         return rc;
8229 }
8230
8231 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8232 {
8233         int func = BP_FUNC(bp);
8234         int timer_interval;
8235         int rc;
8236
8237         /* Disable interrupt handling until HW is initialized */
8238         atomic_set(&bp->intr_sem, 1);
8239         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8240
8241         mutex_init(&bp->port.phy_mutex);
8242
8243         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8244         INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8245
8246         rc = bnx2x_get_hwinfo(bp);
8247
8248         /* need to reset chip if undi was active */
8249         if (!BP_NOMCP(bp))
8250                 bnx2x_undi_unload(bp);
8251
8252         if (CHIP_REV_IS_FPGA(bp))
8253                 printk(KERN_ERR PFX "FPGA detected\n");
8254
8255         if (BP_NOMCP(bp) && (func == 0))
8256                 printk(KERN_ERR PFX
8257                        "MCP disabled, must load devices in order!\n");
8258
8259         /* Set multi queue mode */
8260         if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8261             ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8262                 printk(KERN_ERR PFX
8263                       "Multi disabled since int_mode requested is not MSI-X\n");
8264                 multi_mode = ETH_RSS_MODE_DISABLED;
8265         }
8266         bp->multi_mode = multi_mode;
8267
8268
8269         /* Set TPA flags */
8270         if (disable_tpa) {
8271                 bp->flags &= ~TPA_ENABLE_FLAG;
8272                 bp->dev->features &= ~NETIF_F_LRO;
8273         } else {
8274                 bp->flags |= TPA_ENABLE_FLAG;
8275                 bp->dev->features |= NETIF_F_LRO;
8276         }
8277
8278         bp->mrrs = mrrs;
8279
8280         bp->tx_ring_size = MAX_TX_AVAIL;
8281         bp->rx_ring_size = MAX_RX_AVAIL;
8282
8283         bp->rx_csum = 1;
8284
8285         bp->tx_ticks = 50;
8286         bp->rx_ticks = 25;
8287
8288         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8289         bp->current_interval = (poll ? poll : timer_interval);
8290
8291         init_timer(&bp->timer);
8292         bp->timer.expires = jiffies + bp->current_interval;
8293         bp->timer.data = (unsigned long) bp;
8294         bp->timer.function = bnx2x_timer;
8295
8296         return rc;
8297 }
8298
8299 /*
8300  * ethtool service functions
8301  */
8302
8303 /* All ethtool functions called with rtnl_lock */
8304
8305 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8306 {
8307         struct bnx2x *bp = netdev_priv(dev);
8308
8309         cmd->supported = bp->port.supported;
8310         cmd->advertising = bp->port.advertising;
8311
8312         if (netif_carrier_ok(dev)) {
8313                 cmd->speed = bp->link_vars.line_speed;
8314                 cmd->duplex = bp->link_vars.duplex;
8315         } else {
8316                 cmd->speed = bp->link_params.req_line_speed;
8317                 cmd->duplex = bp->link_params.req_duplex;
8318         }
8319         if (IS_E1HMF(bp)) {
8320                 u16 vn_max_rate;
8321
8322                 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8323                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8324                 if (vn_max_rate < cmd->speed)
8325                         cmd->speed = vn_max_rate;
8326         }
8327
8328         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8329                 u32 ext_phy_type =
8330                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8331
8332                 switch (ext_phy_type) {
8333                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8334                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8335                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8336                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8337                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8338                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8339                         cmd->port = PORT_FIBRE;
8340                         break;
8341
8342                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8343                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8344                         cmd->port = PORT_TP;
8345                         break;
8346
8347                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8348                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8349                                   bp->link_params.ext_phy_config);
8350                         break;
8351
8352                 default:
8353                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
8354                            bp->link_params.ext_phy_config);
8355                         break;
8356                 }
8357         } else
8358                 cmd->port = PORT_TP;
8359
8360         cmd->phy_address = bp->port.phy_addr;
8361         cmd->transceiver = XCVR_INTERNAL;
8362
8363         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8364                 cmd->autoneg = AUTONEG_ENABLE;
8365         else
8366                 cmd->autoneg = AUTONEG_DISABLE;
8367
8368         cmd->maxtxpkt = 0;
8369         cmd->maxrxpkt = 0;
8370
8371         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8372            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
8373            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
8374            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
8375            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8376            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8377            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8378
8379         return 0;
8380 }
8381
8382 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8383 {
8384         struct bnx2x *bp = netdev_priv(dev);
8385         u32 advertising;
8386
8387         if (IS_E1HMF(bp))
8388                 return 0;
8389
8390         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8391            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
8392            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
8393            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
8394            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8395            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8396            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8397
8398         if (cmd->autoneg == AUTONEG_ENABLE) {
8399                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8400                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8401                         return -EINVAL;
8402                 }
8403
8404                 /* advertise the requested speed and duplex if supported */
8405                 cmd->advertising &= bp->port.supported;
8406
8407                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8408                 bp->link_params.req_duplex = DUPLEX_FULL;
8409                 bp->port.advertising |= (ADVERTISED_Autoneg |
8410                                          cmd->advertising);
8411
8412         } else { /* forced speed */
8413                 /* advertise the requested speed and duplex if supported */
8414                 switch (cmd->speed) {
8415                 case SPEED_10:
8416                         if (cmd->duplex == DUPLEX_FULL) {
8417                                 if (!(bp->port.supported &
8418                                       SUPPORTED_10baseT_Full)) {
8419                                         DP(NETIF_MSG_LINK,
8420                                            "10M full not supported\n");
8421                                         return -EINVAL;
8422                                 }
8423
8424                                 advertising = (ADVERTISED_10baseT_Full |
8425                                                ADVERTISED_TP);
8426                         } else {
8427                                 if (!(bp->port.supported &
8428                                       SUPPORTED_10baseT_Half)) {
8429                                         DP(NETIF_MSG_LINK,
8430                                            "10M half not supported\n");
8431                                         return -EINVAL;
8432                                 }
8433
8434                                 advertising = (ADVERTISED_10baseT_Half |
8435                                                ADVERTISED_TP);
8436                         }
8437                         break;
8438
8439                 case SPEED_100:
8440                         if (cmd->duplex == DUPLEX_FULL) {
8441                                 if (!(bp->port.supported &
8442                                                 SUPPORTED_100baseT_Full)) {
8443                                         DP(NETIF_MSG_LINK,
8444                                            "100M full not supported\n");
8445                                         return -EINVAL;
8446                                 }
8447
8448                                 advertising = (ADVERTISED_100baseT_Full |
8449                                                ADVERTISED_TP);
8450                         } else {
8451                                 if (!(bp->port.supported &
8452                                                 SUPPORTED_100baseT_Half)) {
8453                                         DP(NETIF_MSG_LINK,
8454                                            "100M half not supported\n");
8455                                         return -EINVAL;
8456                                 }
8457
8458                                 advertising = (ADVERTISED_100baseT_Half |
8459                                                ADVERTISED_TP);
8460                         }
8461                         break;
8462
8463                 case SPEED_1000:
8464                         if (cmd->duplex != DUPLEX_FULL) {
8465                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
8466                                 return -EINVAL;
8467                         }
8468
8469                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
8470                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
8471                                 return -EINVAL;
8472                         }
8473
8474                         advertising = (ADVERTISED_1000baseT_Full |
8475                                        ADVERTISED_TP);
8476                         break;
8477
8478                 case SPEED_2500:
8479                         if (cmd->duplex != DUPLEX_FULL) {
8480                                 DP(NETIF_MSG_LINK,
8481                                    "2.5G half not supported\n");
8482                                 return -EINVAL;
8483                         }
8484
8485                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
8486                                 DP(NETIF_MSG_LINK,
8487                                    "2.5G full not supported\n");
8488                                 return -EINVAL;
8489                         }
8490
8491                         advertising = (ADVERTISED_2500baseX_Full |
8492                                        ADVERTISED_TP);
8493                         break;
8494
8495                 case SPEED_10000:
8496                         if (cmd->duplex != DUPLEX_FULL) {
8497                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
8498                                 return -EINVAL;
8499                         }
8500
8501                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
8502                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
8503                                 return -EINVAL;
8504                         }
8505
8506                         advertising = (ADVERTISED_10000baseT_Full |
8507                                        ADVERTISED_FIBRE);
8508                         break;
8509
8510                 default:
8511                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
8512                         return -EINVAL;
8513                 }
8514
8515                 bp->link_params.req_line_speed = cmd->speed;
8516                 bp->link_params.req_duplex = cmd->duplex;
8517                 bp->port.advertising = advertising;
8518         }
8519
8520         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
8521            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
8522            bp->link_params.req_line_speed, bp->link_params.req_duplex,
8523            bp->port.advertising);
8524
8525         if (netif_running(dev)) {
8526                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8527                 bnx2x_link_set(bp);
8528         }
8529
8530         return 0;
8531 }
8532
8533 #define PHY_FW_VER_LEN                  10
8534
8535 static void bnx2x_get_drvinfo(struct net_device *dev,
8536                               struct ethtool_drvinfo *info)
8537 {
8538         struct bnx2x *bp = netdev_priv(dev);
8539         u8 phy_fw_ver[PHY_FW_VER_LEN];
8540
8541         strcpy(info->driver, DRV_MODULE_NAME);
8542         strcpy(info->version, DRV_MODULE_VERSION);
8543
8544         phy_fw_ver[0] = '\0';
8545         if (bp->port.pmf) {
8546                 bnx2x_acquire_phy_lock(bp);
8547                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8548                                              (bp->state != BNX2X_STATE_CLOSED),
8549                                              phy_fw_ver, PHY_FW_VER_LEN);
8550                 bnx2x_release_phy_lock(bp);
8551         }
8552
8553         snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8554                  (bp->common.bc_ver & 0xff0000) >> 16,
8555                  (bp->common.bc_ver & 0xff00) >> 8,
8556                  (bp->common.bc_ver & 0xff),
8557                  ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
8558         strcpy(info->bus_info, pci_name(bp->pdev));
8559         info->n_stats = BNX2X_NUM_STATS;
8560         info->testinfo_len = BNX2X_NUM_TESTS;
8561         info->eedump_len = bp->common.flash_size;
8562         info->regdump_len = 0;
8563 }
8564
8565 #define IS_E1_ONLINE(info)      (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8566 #define IS_E1H_ONLINE(info)     (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8567
8568 static int bnx2x_get_regs_len(struct net_device *dev)
8569 {
8570         static u32 regdump_len;
8571         struct bnx2x *bp = netdev_priv(dev);
8572         int i;
8573
8574         if (regdump_len)
8575                 return regdump_len;
8576
8577         if (CHIP_IS_E1(bp)) {
8578                 for (i = 0; i < REGS_COUNT; i++)
8579                         if (IS_E1_ONLINE(reg_addrs[i].info))
8580                                 regdump_len += reg_addrs[i].size;
8581
8582                 for (i = 0; i < WREGS_COUNT_E1; i++)
8583                         if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
8584                                 regdump_len += wreg_addrs_e1[i].size *
8585                                         (1 + wreg_addrs_e1[i].read_regs_count);
8586
8587         } else { /* E1H */
8588                 for (i = 0; i < REGS_COUNT; i++)
8589                         if (IS_E1H_ONLINE(reg_addrs[i].info))
8590                                 regdump_len += reg_addrs[i].size;
8591
8592                 for (i = 0; i < WREGS_COUNT_E1H; i++)
8593                         if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
8594                                 regdump_len += wreg_addrs_e1h[i].size *
8595                                         (1 + wreg_addrs_e1h[i].read_regs_count);
8596         }
8597         regdump_len *= 4;
8598         regdump_len += sizeof(struct dump_hdr);
8599
8600         return regdump_len;
8601 }
8602
8603 static void bnx2x_get_regs(struct net_device *dev,
8604                            struct ethtool_regs *regs, void *_p)
8605 {
8606         u32 *p = _p, i, j;
8607         struct bnx2x *bp = netdev_priv(dev);
8608         struct dump_hdr dump_hdr = {0};
8609
8610         regs->version = 0;
8611         memset(p, 0, regs->len);
8612
8613         if (!netif_running(bp->dev))
8614                 return;
8615
8616         dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
8617         dump_hdr.dump_sign = dump_sign_all;
8618         dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
8619         dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
8620         dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
8621         dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
8622         dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
8623
8624         memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
8625         p += dump_hdr.hdr_size + 1;
8626
8627         if (CHIP_IS_E1(bp)) {
8628                 for (i = 0; i < REGS_COUNT; i++)
8629                         if (IS_E1_ONLINE(reg_addrs[i].info))
8630                                 for (j = 0; j < reg_addrs[i].size; j++)
8631                                         *p++ = REG_RD(bp,
8632                                                       reg_addrs[i].addr + j*4);
8633
8634         } else { /* E1H */
8635                 for (i = 0; i < REGS_COUNT; i++)
8636                         if (IS_E1H_ONLINE(reg_addrs[i].info))
8637                                 for (j = 0; j < reg_addrs[i].size; j++)
8638                                         *p++ = REG_RD(bp,
8639                                                       reg_addrs[i].addr + j*4);
8640         }
8641 }
8642
8643 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8644 {
8645         struct bnx2x *bp = netdev_priv(dev);
8646
8647         if (bp->flags & NO_WOL_FLAG) {
8648                 wol->supported = 0;
8649                 wol->wolopts = 0;
8650         } else {
8651                 wol->supported = WAKE_MAGIC;
8652                 if (bp->wol)
8653                         wol->wolopts = WAKE_MAGIC;
8654                 else
8655                         wol->wolopts = 0;
8656         }
8657         memset(&wol->sopass, 0, sizeof(wol->sopass));
8658 }
8659
8660 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8661 {
8662         struct bnx2x *bp = netdev_priv(dev);
8663
8664         if (wol->wolopts & ~WAKE_MAGIC)
8665                 return -EINVAL;
8666
8667         if (wol->wolopts & WAKE_MAGIC) {
8668                 if (bp->flags & NO_WOL_FLAG)
8669                         return -EINVAL;
8670
8671                 bp->wol = 1;
8672         } else
8673                 bp->wol = 0;
8674
8675         return 0;
8676 }
8677
8678 static u32 bnx2x_get_msglevel(struct net_device *dev)
8679 {
8680         struct bnx2x *bp = netdev_priv(dev);
8681
8682         return bp->msglevel;
8683 }
8684
8685 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8686 {
8687         struct bnx2x *bp = netdev_priv(dev);
8688
8689         if (capable(CAP_NET_ADMIN))
8690                 bp->msglevel = level;
8691 }
8692
8693 static int bnx2x_nway_reset(struct net_device *dev)
8694 {
8695         struct bnx2x *bp = netdev_priv(dev);
8696
8697         if (!bp->port.pmf)
8698                 return 0;
8699
8700         if (netif_running(dev)) {
8701                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8702                 bnx2x_link_set(bp);
8703         }
8704
8705         return 0;
8706 }
8707
8708 static u32
8709 bnx2x_get_link(struct net_device *dev)
8710 {
8711         struct bnx2x *bp = netdev_priv(dev);
8712
8713         return bp->link_vars.link_up;
8714 }
8715
8716 static int bnx2x_get_eeprom_len(struct net_device *dev)
8717 {
8718         struct bnx2x *bp = netdev_priv(dev);
8719
8720         return bp->common.flash_size;
8721 }
8722
8723 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8724 {
8725         int port = BP_PORT(bp);
8726         int count, i;
8727         u32 val = 0;
8728
8729         /* adjust timeout for emulation/FPGA */
8730         count = NVRAM_TIMEOUT_COUNT;
8731         if (CHIP_REV_IS_SLOW(bp))
8732                 count *= 100;
8733
8734         /* request access to nvram interface */
8735         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8736                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8737
8738         for (i = 0; i < count*10; i++) {
8739                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8740                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8741                         break;
8742
8743                 udelay(5);
8744         }
8745
8746         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
8747                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
8748                 return -EBUSY;
8749         }
8750
8751         return 0;
8752 }
8753
8754 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8755 {
8756         int port = BP_PORT(bp);
8757         int count, i;
8758         u32 val = 0;
8759
8760         /* adjust timeout for emulation/FPGA */
8761         count = NVRAM_TIMEOUT_COUNT;
8762         if (CHIP_REV_IS_SLOW(bp))
8763                 count *= 100;
8764
8765         /* relinquish nvram interface */
8766         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8767                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8768
8769         for (i = 0; i < count*10; i++) {
8770                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8771                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8772                         break;
8773
8774                 udelay(5);
8775         }
8776
8777         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
8778                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
8779                 return -EBUSY;
8780         }
8781
8782         return 0;
8783 }
8784
8785 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8786 {
8787         u32 val;
8788
8789         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8790
8791         /* enable both bits, even on read */
8792         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8793                (val | MCPR_NVM_ACCESS_ENABLE_EN |
8794                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
8795 }
8796
8797 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8798 {
8799         u32 val;
8800
8801         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8802
8803         /* disable both bits, even after read */
8804         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8805                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8806                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8807 }
8808
8809 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
8810                                   u32 cmd_flags)
8811 {
8812         int count, i, rc;
8813         u32 val;
8814
8815         /* build the command word */
8816         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8817
8818         /* need to clear DONE bit separately */
8819         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8820
8821         /* address of the NVRAM to read from */
8822         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8823                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8824
8825         /* issue a read command */
8826         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8827
8828         /* adjust timeout for emulation/FPGA */
8829         count = NVRAM_TIMEOUT_COUNT;
8830         if (CHIP_REV_IS_SLOW(bp))
8831                 count *= 100;
8832
8833         /* wait for completion */
8834         *ret_val = 0;
8835         rc = -EBUSY;
8836         for (i = 0; i < count; i++) {
8837                 udelay(5);
8838                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8839
8840                 if (val & MCPR_NVM_COMMAND_DONE) {
8841                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8842                         /* we read nvram data in cpu order
8843                          * but ethtool sees it as an array of bytes
8844                          * converting to big-endian will do the work */
8845                         *ret_val = cpu_to_be32(val);
8846                         rc = 0;
8847                         break;
8848                 }
8849         }
8850
8851         return rc;
8852 }
8853
8854 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8855                             int buf_size)
8856 {
8857         int rc;
8858         u32 cmd_flags;
8859         __be32 val;
8860
8861         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8862                 DP(BNX2X_MSG_NVM,
8863                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8864                    offset, buf_size);
8865                 return -EINVAL;
8866         }
8867
8868         if (offset + buf_size > bp->common.flash_size) {
8869                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8870                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8871                    offset, buf_size, bp->common.flash_size);
8872                 return -EINVAL;
8873         }
8874
8875         /* request access to nvram interface */
8876         rc = bnx2x_acquire_nvram_lock(bp);
8877         if (rc)
8878                 return rc;
8879
8880         /* enable access to nvram interface */
8881         bnx2x_enable_nvram_access(bp);
8882
8883         /* read the first word(s) */
8884         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8885         while ((buf_size > sizeof(u32)) && (rc == 0)) {
8886                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8887                 memcpy(ret_buf, &val, 4);
8888
8889                 /* advance to the next dword */
8890                 offset += sizeof(u32);
8891                 ret_buf += sizeof(u32);
8892                 buf_size -= sizeof(u32);
8893                 cmd_flags = 0;
8894         }
8895
8896         if (rc == 0) {
8897                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8898                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8899                 memcpy(ret_buf, &val, 4);
8900         }
8901
8902         /* disable access to nvram interface */
8903         bnx2x_disable_nvram_access(bp);
8904         bnx2x_release_nvram_lock(bp);
8905
8906         return rc;
8907 }
8908
8909 static int bnx2x_get_eeprom(struct net_device *dev,
8910                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8911 {
8912         struct bnx2x *bp = netdev_priv(dev);
8913         int rc;
8914
8915         if (!netif_running(dev))
8916                 return -EAGAIN;
8917
8918         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8919            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8920            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8921            eeprom->len, eeprom->len);
8922
8923         /* parameters already validated in ethtool_get_eeprom */
8924
8925         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8926
8927         return rc;
8928 }
8929
8930 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8931                                    u32 cmd_flags)
8932 {
8933         int count, i, rc;
8934
8935         /* build the command word */
8936         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8937
8938         /* need to clear DONE bit separately */
8939         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8940
8941         /* write the data */
8942         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8943
8944         /* address of the NVRAM to write to */
8945         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8946                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8947
8948         /* issue the write command */
8949         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8950
8951         /* adjust timeout for emulation/FPGA */
8952         count = NVRAM_TIMEOUT_COUNT;
8953         if (CHIP_REV_IS_SLOW(bp))
8954                 count *= 100;
8955
8956         /* wait for completion */
8957         rc = -EBUSY;
8958         for (i = 0; i < count; i++) {
8959                 udelay(5);
8960                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8961                 if (val & MCPR_NVM_COMMAND_DONE) {
8962                         rc = 0;
8963                         break;
8964                 }
8965         }
8966
8967         return rc;
8968 }
8969
8970 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
8971
8972 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8973                               int buf_size)
8974 {
8975         int rc;
8976         u32 cmd_flags;
8977         u32 align_offset;
8978         __be32 val;
8979
8980         if (offset + buf_size > bp->common.flash_size) {
8981                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8982                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8983                    offset, buf_size, bp->common.flash_size);
8984                 return -EINVAL;
8985         }
8986
8987         /* request access to nvram interface */
8988         rc = bnx2x_acquire_nvram_lock(bp);
8989         if (rc)
8990                 return rc;
8991
8992         /* enable access to nvram interface */
8993         bnx2x_enable_nvram_access(bp);
8994
8995         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8996         align_offset = (offset & ~0x03);
8997         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8998
8999         if (rc == 0) {
9000                 val &= ~(0xff << BYTE_OFFSET(offset));
9001                 val |= (*data_buf << BYTE_OFFSET(offset));
9002
9003                 /* nvram data is returned as an array of bytes
9004                  * convert it back to cpu order */
9005                 val = be32_to_cpu(val);
9006
9007                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9008                                              cmd_flags);
9009         }
9010
9011         /* disable access to nvram interface */
9012         bnx2x_disable_nvram_access(bp);
9013         bnx2x_release_nvram_lock(bp);
9014
9015         return rc;
9016 }
9017
9018 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9019                              int buf_size)
9020 {
9021         int rc;
9022         u32 cmd_flags;
9023         u32 val;
9024         u32 written_so_far;
9025
9026         if (buf_size == 1)      /* ethtool */
9027                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
9028
9029         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9030                 DP(BNX2X_MSG_NVM,
9031                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
9032                    offset, buf_size);
9033                 return -EINVAL;
9034         }
9035
9036         if (offset + buf_size > bp->common.flash_size) {
9037                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9038                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9039                    offset, buf_size, bp->common.flash_size);
9040                 return -EINVAL;
9041         }
9042
9043         /* request access to nvram interface */
9044         rc = bnx2x_acquire_nvram_lock(bp);
9045         if (rc)
9046                 return rc;
9047
9048         /* enable access to nvram interface */
9049         bnx2x_enable_nvram_access(bp);
9050
9051         written_so_far = 0;
9052         cmd_flags = MCPR_NVM_COMMAND_FIRST;
9053         while ((written_so_far < buf_size) && (rc == 0)) {
9054                 if (written_so_far == (buf_size - sizeof(u32)))
9055                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
9056                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9057                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
9058                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9059                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9060
9061                 memcpy(&val, data_buf, 4);
9062
9063                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9064
9065                 /* advance to the next dword */
9066                 offset += sizeof(u32);
9067                 data_buf += sizeof(u32);
9068                 written_so_far += sizeof(u32);
9069                 cmd_flags = 0;
9070         }
9071
9072         /* disable access to nvram interface */
9073         bnx2x_disable_nvram_access(bp);
9074         bnx2x_release_nvram_lock(bp);
9075
9076         return rc;
9077 }
9078
9079 static int bnx2x_set_eeprom(struct net_device *dev,
9080                             struct ethtool_eeprom *eeprom, u8 *eebuf)
9081 {
9082         struct bnx2x *bp = netdev_priv(dev);
9083         int rc;
9084
9085         if (!netif_running(dev))
9086                 return -EAGAIN;
9087
9088         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9089            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
9090            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9091            eeprom->len, eeprom->len);
9092
9093         /* parameters already validated in ethtool_set_eeprom */
9094
9095         /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
9096         if (eeprom->magic == 0x00504859)
9097                 if (bp->port.pmf) {
9098
9099                         bnx2x_acquire_phy_lock(bp);
9100                         rc = bnx2x_flash_download(bp, BP_PORT(bp),
9101                                              bp->link_params.ext_phy_config,
9102                                              (bp->state != BNX2X_STATE_CLOSED),
9103                                              eebuf, eeprom->len);
9104                         if ((bp->state == BNX2X_STATE_OPEN) ||
9105                             (bp->state == BNX2X_STATE_DISABLED)) {
9106                                 rc |= bnx2x_link_reset(&bp->link_params,
9107                                                        &bp->link_vars, 1);
9108                                 rc |= bnx2x_phy_init(&bp->link_params,
9109                                                      &bp->link_vars);
9110                         }
9111                         bnx2x_release_phy_lock(bp);
9112
9113                 } else /* Only the PMF can access the PHY */
9114                         return -EINVAL;
9115         else
9116                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
9117
9118         return rc;
9119 }
9120
9121 static int bnx2x_get_coalesce(struct net_device *dev,
9122                               struct ethtool_coalesce *coal)
9123 {
9124         struct bnx2x *bp = netdev_priv(dev);
9125
9126         memset(coal, 0, sizeof(struct ethtool_coalesce));
9127
9128         coal->rx_coalesce_usecs = bp->rx_ticks;
9129         coal->tx_coalesce_usecs = bp->tx_ticks;
9130
9131         return 0;
9132 }
9133
9134 static int bnx2x_set_coalesce(struct net_device *dev,
9135                               struct ethtool_coalesce *coal)
9136 {
9137         struct bnx2x *bp = netdev_priv(dev);
9138
9139         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
9140         if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
9141                 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
9142
9143         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
9144         if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
9145                 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
9146
9147         if (netif_running(dev))
9148                 bnx2x_update_coalesce(bp);
9149
9150         return 0;
9151 }
9152
9153 static void bnx2x_get_ringparam(struct net_device *dev,
9154                                 struct ethtool_ringparam *ering)
9155 {
9156         struct bnx2x *bp = netdev_priv(dev);
9157
9158         ering->rx_max_pending = MAX_RX_AVAIL;
9159         ering->rx_mini_max_pending = 0;
9160         ering->rx_jumbo_max_pending = 0;
9161
9162         ering->rx_pending = bp->rx_ring_size;
9163         ering->rx_mini_pending = 0;
9164         ering->rx_jumbo_pending = 0;
9165
9166         ering->tx_max_pending = MAX_TX_AVAIL;
9167         ering->tx_pending = bp->tx_ring_size;
9168 }
9169
9170 static int bnx2x_set_ringparam(struct net_device *dev,
9171                                struct ethtool_ringparam *ering)
9172 {
9173         struct bnx2x *bp = netdev_priv(dev);
9174         int rc = 0;
9175
9176         if ((ering->rx_pending > MAX_RX_AVAIL) ||
9177             (ering->tx_pending > MAX_TX_AVAIL) ||
9178             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9179                 return -EINVAL;
9180
9181         bp->rx_ring_size = ering->rx_pending;
9182         bp->tx_ring_size = ering->tx_pending;
9183
9184         if (netif_running(dev)) {
9185                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9186                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9187         }
9188
9189         return rc;
9190 }
9191
9192 static void bnx2x_get_pauseparam(struct net_device *dev,
9193                                  struct ethtool_pauseparam *epause)
9194 {
9195         struct bnx2x *bp = netdev_priv(dev);
9196
9197         epause->autoneg = (bp->link_params.req_flow_ctrl ==
9198                            BNX2X_FLOW_CTRL_AUTO) &&
9199                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9200
9201         epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9202                             BNX2X_FLOW_CTRL_RX);
9203         epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9204                             BNX2X_FLOW_CTRL_TX);
9205
9206         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9207            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9208            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9209 }
9210
9211 static int bnx2x_set_pauseparam(struct net_device *dev,
9212                                 struct ethtool_pauseparam *epause)
9213 {
9214         struct bnx2x *bp = netdev_priv(dev);
9215
9216         if (IS_E1HMF(bp))
9217                 return 0;
9218
9219         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9220            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9221            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9222
9223         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9224
9225         if (epause->rx_pause)
9226                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9227
9228         if (epause->tx_pause)
9229                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9230
9231         if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9232                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9233
9234         if (epause->autoneg) {
9235                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9236                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
9237                         return -EINVAL;
9238                 }
9239
9240                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9241                         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9242         }
9243
9244         DP(NETIF_MSG_LINK,
9245            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9246
9247         if (netif_running(dev)) {
9248                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9249                 bnx2x_link_set(bp);
9250         }
9251
9252         return 0;
9253 }
9254
9255 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9256 {
9257         struct bnx2x *bp = netdev_priv(dev);
9258         int changed = 0;
9259         int rc = 0;
9260
9261         /* TPA requires Rx CSUM offloading */
9262         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9263                 if (!(dev->features & NETIF_F_LRO)) {
9264                         dev->features |= NETIF_F_LRO;
9265                         bp->flags |= TPA_ENABLE_FLAG;
9266                         changed = 1;
9267                 }
9268
9269         } else if (dev->features & NETIF_F_LRO) {
9270                 dev->features &= ~NETIF_F_LRO;
9271                 bp->flags &= ~TPA_ENABLE_FLAG;
9272                 changed = 1;
9273         }
9274
9275         if (changed && netif_running(dev)) {
9276                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9277                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9278         }
9279
9280         return rc;
9281 }
9282
9283 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9284 {
9285         struct bnx2x *bp = netdev_priv(dev);
9286
9287         return bp->rx_csum;
9288 }
9289
9290 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9291 {
9292         struct bnx2x *bp = netdev_priv(dev);
9293         int rc = 0;
9294
9295         bp->rx_csum = data;
9296
9297         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9298            TPA'ed packets will be discarded due to wrong TCP CSUM */
9299         if (!data) {
9300                 u32 flags = ethtool_op_get_flags(dev);
9301
9302                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9303         }
9304
9305         return rc;
9306 }
9307
9308 static int bnx2x_set_tso(struct net_device *dev, u32 data)
9309 {
9310         if (data) {
9311                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9312                 dev->features |= NETIF_F_TSO6;
9313         } else {
9314                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
9315                 dev->features &= ~NETIF_F_TSO6;
9316         }
9317
9318         return 0;
9319 }
9320
9321 static const struct {
9322         char string[ETH_GSTRING_LEN];
9323 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
9324         { "register_test (offline)" },
9325         { "memory_test (offline)" },
9326         { "loopback_test (offline)" },
9327         { "nvram_test (online)" },
9328         { "interrupt_test (online)" },
9329         { "link_test (online)" },
9330         { "idle check (online)" }
9331 };
9332
9333 static int bnx2x_self_test_count(struct net_device *dev)
9334 {
9335         return BNX2X_NUM_TESTS;
9336 }
9337
9338 static int bnx2x_test_registers(struct bnx2x *bp)
9339 {
9340         int idx, i, rc = -ENODEV;
9341         u32 wr_val = 0;
9342         int port = BP_PORT(bp);
9343         static const struct {
9344                 u32  offset0;
9345                 u32  offset1;
9346                 u32  mask;
9347         } reg_tbl[] = {
9348 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
9349                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
9350                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
9351                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
9352                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
9353                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
9354                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
9355                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
9356                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
9357                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
9358 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
9359                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
9360                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
9361                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
9362                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
9363                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9364                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
9365                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
9366                 { NIG_REG_EGRESS_MNG0_FIFO,           20, 0xffffffff },
9367                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
9368 /* 20 */        { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
9369                 { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
9370                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
9371                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
9372                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
9373                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
9374                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
9375                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
9376                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
9377                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
9378 /* 30 */        { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
9379                 { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
9380                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
9381                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
9382                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9383                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
9384                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9385                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
9386
9387                 { 0xffffffff, 0, 0x00000000 }
9388         };
9389
9390         if (!netif_running(bp->dev))
9391                 return rc;
9392
9393         /* Repeat the test twice:
9394            First by writing 0x00000000, second by writing 0xffffffff */
9395         for (idx = 0; idx < 2; idx++) {
9396
9397                 switch (idx) {
9398                 case 0:
9399                         wr_val = 0;
9400                         break;
9401                 case 1:
9402                         wr_val = 0xffffffff;
9403                         break;
9404                 }
9405
9406                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9407                         u32 offset, mask, save_val, val;
9408
9409                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9410                         mask = reg_tbl[i].mask;
9411
9412                         save_val = REG_RD(bp, offset);
9413
9414                         REG_WR(bp, offset, wr_val);
9415                         val = REG_RD(bp, offset);
9416
9417                         /* Restore the original register's value */
9418                         REG_WR(bp, offset, save_val);
9419
9420                         /* verify that value is as expected value */
9421                         if ((val & mask) != (wr_val & mask))
9422                                 goto test_reg_exit;
9423                 }
9424         }
9425
9426         rc = 0;
9427
9428 test_reg_exit:
9429         return rc;
9430 }
9431
9432 static int bnx2x_test_memory(struct bnx2x *bp)
9433 {
9434         int i, j, rc = -ENODEV;
9435         u32 val;
9436         static const struct {
9437                 u32 offset;
9438                 int size;
9439         } mem_tbl[] = {
9440                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
9441                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9442                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
9443                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
9444                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
9445                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
9446                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
9447
9448                 { 0xffffffff, 0 }
9449         };
9450         static const struct {
9451                 char *name;
9452                 u32 offset;
9453                 u32 e1_mask;
9454                 u32 e1h_mask;
9455         } prty_tbl[] = {
9456                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
9457                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
9458                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
9459                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
9460                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
9461                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
9462
9463                 { NULL, 0xffffffff, 0, 0 }
9464         };
9465
9466         if (!netif_running(bp->dev))
9467                 return rc;
9468
9469         /* Go through all the memories */
9470         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9471                 for (j = 0; j < mem_tbl[i].size; j++)
9472                         REG_RD(bp, mem_tbl[i].offset + j*4);
9473
9474         /* Check the parity status */
9475         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9476                 val = REG_RD(bp, prty_tbl[i].offset);
9477                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9478                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
9479                         DP(NETIF_MSG_HW,
9480                            "%s is 0x%x\n", prty_tbl[i].name, val);
9481                         goto test_mem_exit;
9482                 }
9483         }
9484
9485         rc = 0;
9486
9487 test_mem_exit:
9488         return rc;
9489 }
9490
9491 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9492 {
9493         int cnt = 1000;
9494
9495         if (link_up)
9496                 while (bnx2x_link_test(bp) && cnt--)
9497                         msleep(10);
9498 }
9499
9500 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9501 {
9502         unsigned int pkt_size, num_pkts, i;
9503         struct sk_buff *skb;
9504         unsigned char *packet;
9505         struct bnx2x_fastpath *fp = &bp->fp[0];
9506         u16 tx_start_idx, tx_idx;
9507         u16 rx_start_idx, rx_idx;
9508         u16 pkt_prod;
9509         struct sw_tx_bd *tx_buf;
9510         struct eth_tx_bd *tx_bd;
9511         dma_addr_t mapping;
9512         union eth_rx_cqe *cqe;
9513         u8 cqe_fp_flags;
9514         struct sw_rx_bd *rx_buf;
9515         u16 len;
9516         int rc = -ENODEV;
9517
9518         /* check the loopback mode */
9519         switch (loopback_mode) {
9520         case BNX2X_PHY_LOOPBACK:
9521                 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9522                         return -EINVAL;
9523                 break;
9524         case BNX2X_MAC_LOOPBACK:
9525                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
9526                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
9527                 break;
9528         default:
9529                 return -EINVAL;
9530         }
9531
9532         /* prepare the loopback packet */
9533         pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9534                      bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
9535         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9536         if (!skb) {
9537                 rc = -ENOMEM;
9538                 goto test_loopback_exit;
9539         }
9540         packet = skb_put(skb, pkt_size);
9541         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9542         memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9543         for (i = ETH_HLEN; i < pkt_size; i++)
9544                 packet[i] = (unsigned char) (i & 0xff);
9545
9546         /* send the loopback packet */
9547         num_pkts = 0;
9548         tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9549         rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9550
9551         pkt_prod = fp->tx_pkt_prod++;
9552         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9553         tx_buf->first_bd = fp->tx_bd_prod;
9554         tx_buf->skb = skb;
9555
9556         tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9557         mapping = pci_map_single(bp->pdev, skb->data,
9558                                  skb_headlen(skb), PCI_DMA_TODEVICE);
9559         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9560         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9561         tx_bd->nbd = cpu_to_le16(1);
9562         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9563         tx_bd->vlan = cpu_to_le16(pkt_prod);
9564         tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9565                                        ETH_TX_BD_FLAGS_END_BD);
9566         tx_bd->general_data = ((UNICAST_ADDRESS <<
9567                                 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9568
9569         wmb();
9570
9571         le16_add_cpu(&fp->hw_tx_prods->bds_prod, 1);
9572         mb(); /* FW restriction: must not reorder writing nbd and packets */
9573         le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
9574         DOORBELL(bp, fp->index, 0);
9575
9576         mmiowb();
9577
9578         num_pkts++;
9579         fp->tx_bd_prod++;
9580         bp->dev->trans_start = jiffies;
9581
9582         udelay(100);
9583
9584         tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9585         if (tx_idx != tx_start_idx + num_pkts)
9586                 goto test_loopback_exit;
9587
9588         rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9589         if (rx_idx != rx_start_idx + num_pkts)
9590                 goto test_loopback_exit;
9591
9592         cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9593         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9594         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9595                 goto test_loopback_rx_exit;
9596
9597         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9598         if (len != pkt_size)
9599                 goto test_loopback_rx_exit;
9600
9601         rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9602         skb = rx_buf->skb;
9603         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9604         for (i = ETH_HLEN; i < pkt_size; i++)
9605                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9606                         goto test_loopback_rx_exit;
9607
9608         rc = 0;
9609
9610 test_loopback_rx_exit:
9611
9612         fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9613         fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9614         fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9615         fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9616
9617         /* Update producers */
9618         bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9619                              fp->rx_sge_prod);
9620
9621 test_loopback_exit:
9622         bp->link_params.loopback_mode = LOOPBACK_NONE;
9623
9624         return rc;
9625 }
9626
9627 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9628 {
9629         int rc = 0, res;
9630
9631         if (!netif_running(bp->dev))
9632                 return BNX2X_LOOPBACK_FAILED;
9633
9634         bnx2x_netif_stop(bp, 1);
9635         bnx2x_acquire_phy_lock(bp);
9636
9637         res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9638         if (res) {
9639                 DP(NETIF_MSG_PROBE, "  PHY loopback failed  (res %d)\n", res);
9640                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
9641         }
9642
9643         res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9644         if (res) {
9645                 DP(NETIF_MSG_PROBE, "  MAC loopback failed  (res %d)\n", res);
9646                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
9647         }
9648
9649         bnx2x_release_phy_lock(bp);
9650         bnx2x_netif_start(bp);
9651
9652         return rc;
9653 }
9654
9655 #define CRC32_RESIDUAL                  0xdebb20e3
9656
9657 static int bnx2x_test_nvram(struct bnx2x *bp)
9658 {
9659         static const struct {
9660                 int offset;
9661                 int size;
9662         } nvram_tbl[] = {
9663                 {     0,  0x14 }, /* bootstrap */
9664                 {  0x14,  0xec }, /* dir */
9665                 { 0x100, 0x350 }, /* manuf_info */
9666                 { 0x450,  0xf0 }, /* feature_info */
9667                 { 0x640,  0x64 }, /* upgrade_key_info */
9668                 { 0x6a4,  0x64 },
9669                 { 0x708,  0x70 }, /* manuf_key_info */
9670                 { 0x778,  0x70 },
9671                 {     0,     0 }
9672         };
9673         __be32 buf[0x350 / 4];
9674         u8 *data = (u8 *)buf;
9675         int i, rc;
9676         u32 magic, csum;
9677
9678         rc = bnx2x_nvram_read(bp, 0, data, 4);
9679         if (rc) {
9680                 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
9681                 goto test_nvram_exit;
9682         }
9683
9684         magic = be32_to_cpu(buf[0]);
9685         if (magic != 0x669955aa) {
9686                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9687                 rc = -ENODEV;
9688                 goto test_nvram_exit;
9689         }
9690
9691         for (i = 0; nvram_tbl[i].size; i++) {
9692
9693                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9694                                       nvram_tbl[i].size);
9695                 if (rc) {
9696                         DP(NETIF_MSG_PROBE,
9697                            "nvram_tbl[%d] read data (rc %d)\n", i, rc);
9698                         goto test_nvram_exit;
9699                 }
9700
9701                 csum = ether_crc_le(nvram_tbl[i].size, data);
9702                 if (csum != CRC32_RESIDUAL) {
9703                         DP(NETIF_MSG_PROBE,
9704                            "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9705                         rc = -ENODEV;
9706                         goto test_nvram_exit;
9707                 }
9708         }
9709
9710 test_nvram_exit:
9711         return rc;
9712 }
9713
9714 static int bnx2x_test_intr(struct bnx2x *bp)
9715 {
9716         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9717         int i, rc;
9718
9719         if (!netif_running(bp->dev))
9720                 return -ENODEV;
9721
9722         config->hdr.length = 0;
9723         if (CHIP_IS_E1(bp))
9724                 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9725         else
9726                 config->hdr.offset = BP_FUNC(bp);
9727         config->hdr.client_id = bp->fp->cl_id;
9728         config->hdr.reserved1 = 0;
9729
9730         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9731                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9732                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9733         if (rc == 0) {
9734                 bp->set_mac_pending++;
9735                 for (i = 0; i < 10; i++) {
9736                         if (!bp->set_mac_pending)
9737                                 break;
9738                         msleep_interruptible(10);
9739                 }
9740                 if (i == 10)
9741                         rc = -ENODEV;
9742         }
9743
9744         return rc;
9745 }
9746
9747 static void bnx2x_self_test(struct net_device *dev,
9748                             struct ethtool_test *etest, u64 *buf)
9749 {
9750         struct bnx2x *bp = netdev_priv(dev);
9751
9752         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9753
9754         if (!netif_running(dev))
9755                 return;
9756
9757         /* offline tests are not supported in MF mode */
9758         if (IS_E1HMF(bp))
9759                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9760
9761         if (etest->flags & ETH_TEST_FL_OFFLINE) {
9762                 int port = BP_PORT(bp);
9763                 u32 val;
9764                 u8 link_up;
9765
9766                 /* save current value of input enable for TX port IF */
9767                 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
9768                 /* disable input for TX port IF */
9769                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
9770
9771                 link_up = bp->link_vars.link_up;
9772                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9773                 bnx2x_nic_load(bp, LOAD_DIAG);
9774                 /* wait until link state is restored */
9775                 bnx2x_wait_for_link(bp, link_up);
9776
9777                 if (bnx2x_test_registers(bp) != 0) {
9778                         buf[0] = 1;
9779                         etest->flags |= ETH_TEST_FL_FAILED;
9780                 }
9781                 if (bnx2x_test_memory(bp) != 0) {
9782                         buf[1] = 1;
9783                         etest->flags |= ETH_TEST_FL_FAILED;
9784                 }
9785                 buf[2] = bnx2x_test_loopback(bp, link_up);
9786                 if (buf[2] != 0)
9787                         etest->flags |= ETH_TEST_FL_FAILED;
9788
9789                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9790
9791                 /* restore input for TX port IF */
9792                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
9793
9794                 bnx2x_nic_load(bp, LOAD_NORMAL);
9795                 /* wait until link state is restored */
9796                 bnx2x_wait_for_link(bp, link_up);
9797         }
9798         if (bnx2x_test_nvram(bp) != 0) {
9799                 buf[3] = 1;
9800                 etest->flags |= ETH_TEST_FL_FAILED;
9801         }
9802         if (bnx2x_test_intr(bp) != 0) {
9803                 buf[4] = 1;
9804                 etest->flags |= ETH_TEST_FL_FAILED;
9805         }
9806         if (bp->port.pmf)
9807                 if (bnx2x_link_test(bp) != 0) {
9808                         buf[5] = 1;
9809                         etest->flags |= ETH_TEST_FL_FAILED;
9810                 }
9811
9812 #ifdef BNX2X_EXTRA_DEBUG
9813         bnx2x_panic_dump(bp);
9814 #endif
9815 }
9816
9817 static const struct {
9818         long offset;
9819         int size;
9820         u8 string[ETH_GSTRING_LEN];
9821 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9822 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9823         { Q_STATS_OFFSET32(error_bytes_received_hi),
9824                                                 8, "[%d]: rx_error_bytes" },
9825         { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9826                                                 8, "[%d]: rx_ucast_packets" },
9827         { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9828                                                 8, "[%d]: rx_mcast_packets" },
9829         { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9830                                                 8, "[%d]: rx_bcast_packets" },
9831         { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9832         { Q_STATS_OFFSET32(rx_err_discard_pkt),
9833                                          4, "[%d]: rx_phy_ip_err_discards"},
9834         { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9835                                          4, "[%d]: rx_skb_alloc_discard" },
9836         { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9837
9838 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9839         { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9840                                                         8, "[%d]: tx_packets" }
9841 };
9842
9843 static const struct {
9844         long offset;
9845         int size;
9846         u32 flags;
9847 #define STATS_FLAGS_PORT                1
9848 #define STATS_FLAGS_FUNC                2
9849 #define STATS_FLAGS_BOTH                (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
9850         u8 string[ETH_GSTRING_LEN];
9851 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9852 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9853                                 8, STATS_FLAGS_BOTH, "rx_bytes" },
9854         { STATS_OFFSET32(error_bytes_received_hi),
9855                                 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
9856         { STATS_OFFSET32(total_unicast_packets_received_hi),
9857                                 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
9858         { STATS_OFFSET32(total_multicast_packets_received_hi),
9859                                 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
9860         { STATS_OFFSET32(total_broadcast_packets_received_hi),
9861                                 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
9862         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9863                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9864         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9865                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
9866         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9867                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9868         { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9869                                 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9870 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9871                                 8, STATS_FLAGS_PORT, "rx_fragments" },
9872         { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9873                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
9874         { STATS_OFFSET32(no_buff_discard_hi),
9875                                 8, STATS_FLAGS_BOTH, "rx_discards" },
9876         { STATS_OFFSET32(mac_filter_discard),
9877                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9878         { STATS_OFFSET32(xxoverflow_discard),
9879                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9880         { STATS_OFFSET32(brb_drop_hi),
9881                                 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9882         { STATS_OFFSET32(brb_truncate_hi),
9883                                 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9884         { STATS_OFFSET32(pause_frames_received_hi),
9885                                 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9886         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9887                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9888         { STATS_OFFSET32(nig_timer_max),
9889                         4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9890 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9891                                 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9892         { STATS_OFFSET32(rx_skb_alloc_failed),
9893                                 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9894         { STATS_OFFSET32(hw_csum_err),
9895                                 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9896
9897         { STATS_OFFSET32(total_bytes_transmitted_hi),
9898                                 8, STATS_FLAGS_BOTH, "tx_bytes" },
9899         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9900                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9901         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9902                                 8, STATS_FLAGS_BOTH, "tx_packets" },
9903         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9904                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9905         { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9906                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9907         { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9908                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9909         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9910                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9911 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9912                                 8, STATS_FLAGS_PORT, "tx_deferred" },
9913         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9914                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9915         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9916                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9917         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9918                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9919         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9920                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9921         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9922                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9923         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9924                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9925         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9926                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9927         { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9928                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9929         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9930                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9931 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
9932                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9933         { STATS_OFFSET32(pause_frames_sent_hi),
9934                                 8, STATS_FLAGS_PORT, "tx_pause_frames" }
9935 };
9936
9937 #define IS_PORT_STAT(i) \
9938         ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9939 #define IS_FUNC_STAT(i)         (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9940 #define IS_E1HMF_MODE_STAT(bp) \
9941                         (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
9942
9943 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9944 {
9945         struct bnx2x *bp = netdev_priv(dev);
9946         int i, j, k;
9947
9948         switch (stringset) {
9949         case ETH_SS_STATS:
9950                 if (is_multi(bp)) {
9951                         k = 0;
9952                         for_each_queue(bp, i) {
9953                                 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9954                                         sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9955                                                 bnx2x_q_stats_arr[j].string, i);
9956                                 k += BNX2X_NUM_Q_STATS;
9957                         }
9958                         if (IS_E1HMF_MODE_STAT(bp))
9959                                 break;
9960                         for (j = 0; j < BNX2X_NUM_STATS; j++)
9961                                 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9962                                        bnx2x_stats_arr[j].string);
9963                 } else {
9964                         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9965                                 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9966                                         continue;
9967                                 strcpy(buf + j*ETH_GSTRING_LEN,
9968                                        bnx2x_stats_arr[i].string);
9969                                 j++;
9970                         }
9971                 }
9972                 break;
9973
9974         case ETH_SS_TEST:
9975                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9976                 break;
9977         }
9978 }
9979
9980 static int bnx2x_get_stats_count(struct net_device *dev)
9981 {
9982         struct bnx2x *bp = netdev_priv(dev);
9983         int i, num_stats;
9984
9985         if (is_multi(bp)) {
9986                 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9987                 if (!IS_E1HMF_MODE_STAT(bp))
9988                         num_stats += BNX2X_NUM_STATS;
9989         } else {
9990                 if (IS_E1HMF_MODE_STAT(bp)) {
9991                         num_stats = 0;
9992                         for (i = 0; i < BNX2X_NUM_STATS; i++)
9993                                 if (IS_FUNC_STAT(i))
9994                                         num_stats++;
9995                 } else
9996                         num_stats = BNX2X_NUM_STATS;
9997         }
9998
9999         return num_stats;
10000 }
10001
10002 static void bnx2x_get_ethtool_stats(struct net_device *dev,
10003                                     struct ethtool_stats *stats, u64 *buf)
10004 {
10005         struct bnx2x *bp = netdev_priv(dev);
10006         u32 *hw_stats, *offset;
10007         int i, j, k;
10008
10009         if (is_multi(bp)) {
10010                 k = 0;
10011                 for_each_queue(bp, i) {
10012                         hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10013                         for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10014                                 if (bnx2x_q_stats_arr[j].size == 0) {
10015                                         /* skip this counter */
10016                                         buf[k + j] = 0;
10017                                         continue;
10018                                 }
10019                                 offset = (hw_stats +
10020                                           bnx2x_q_stats_arr[j].offset);
10021                                 if (bnx2x_q_stats_arr[j].size == 4) {
10022                                         /* 4-byte counter */
10023                                         buf[k + j] = (u64) *offset;
10024                                         continue;
10025                                 }
10026                                 /* 8-byte counter */
10027                                 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10028                         }
10029                         k += BNX2X_NUM_Q_STATS;
10030                 }
10031                 if (IS_E1HMF_MODE_STAT(bp))
10032                         return;
10033                 hw_stats = (u32 *)&bp->eth_stats;
10034                 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10035                         if (bnx2x_stats_arr[j].size == 0) {
10036                                 /* skip this counter */
10037                                 buf[k + j] = 0;
10038                                 continue;
10039                         }
10040                         offset = (hw_stats + bnx2x_stats_arr[j].offset);
10041                         if (bnx2x_stats_arr[j].size == 4) {
10042                                 /* 4-byte counter */
10043                                 buf[k + j] = (u64) *offset;
10044                                 continue;
10045                         }
10046                         /* 8-byte counter */
10047                         buf[k + j] = HILO_U64(*offset, *(offset + 1));
10048                 }
10049         } else {
10050                 hw_stats = (u32 *)&bp->eth_stats;
10051                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10052                         if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10053                                 continue;
10054                         if (bnx2x_stats_arr[i].size == 0) {
10055                                 /* skip this counter */
10056                                 buf[j] = 0;
10057                                 j++;
10058                                 continue;
10059                         }
10060                         offset = (hw_stats + bnx2x_stats_arr[i].offset);
10061                         if (bnx2x_stats_arr[i].size == 4) {
10062                                 /* 4-byte counter */
10063                                 buf[j] = (u64) *offset;
10064                                 j++;
10065                                 continue;
10066                         }
10067                         /* 8-byte counter */
10068                         buf[j] = HILO_U64(*offset, *(offset + 1));
10069                         j++;
10070                 }
10071         }
10072 }
10073
10074 static int bnx2x_phys_id(struct net_device *dev, u32 data)
10075 {
10076         struct bnx2x *bp = netdev_priv(dev);
10077         int port = BP_PORT(bp);
10078         int i;
10079
10080         if (!netif_running(dev))
10081                 return 0;
10082
10083         if (!bp->port.pmf)
10084                 return 0;
10085
10086         if (data == 0)
10087                 data = 2;
10088
10089         for (i = 0; i < (data * 2); i++) {
10090                 if ((i % 2) == 0)
10091                         bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
10092                                       bp->link_params.hw_led_mode,
10093                                       bp->link_params.chip_id);
10094                 else
10095                         bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
10096                                       bp->link_params.hw_led_mode,
10097                                       bp->link_params.chip_id);
10098
10099                 msleep_interruptible(500);
10100                 if (signal_pending(current))
10101                         break;
10102         }
10103
10104         if (bp->link_vars.link_up)
10105                 bnx2x_set_led(bp, port, LED_MODE_OPER,
10106                               bp->link_vars.line_speed,
10107                               bp->link_params.hw_led_mode,
10108                               bp->link_params.chip_id);
10109
10110         return 0;
10111 }
10112
10113 static struct ethtool_ops bnx2x_ethtool_ops = {
10114         .get_settings           = bnx2x_get_settings,
10115         .set_settings           = bnx2x_set_settings,
10116         .get_drvinfo            = bnx2x_get_drvinfo,
10117         .get_regs_len           = bnx2x_get_regs_len,
10118         .get_regs               = bnx2x_get_regs,
10119         .get_wol                = bnx2x_get_wol,
10120         .set_wol                = bnx2x_set_wol,
10121         .get_msglevel           = bnx2x_get_msglevel,
10122         .set_msglevel           = bnx2x_set_msglevel,
10123         .nway_reset             = bnx2x_nway_reset,
10124         .get_link               = bnx2x_get_link,
10125         .get_eeprom_len         = bnx2x_get_eeprom_len,
10126         .get_eeprom             = bnx2x_get_eeprom,
10127         .set_eeprom             = bnx2x_set_eeprom,
10128         .get_coalesce           = bnx2x_get_coalesce,
10129         .set_coalesce           = bnx2x_set_coalesce,
10130         .get_ringparam          = bnx2x_get_ringparam,
10131         .set_ringparam          = bnx2x_set_ringparam,
10132         .get_pauseparam         = bnx2x_get_pauseparam,
10133         .set_pauseparam         = bnx2x_set_pauseparam,
10134         .get_rx_csum            = bnx2x_get_rx_csum,
10135         .set_rx_csum            = bnx2x_set_rx_csum,
10136         .get_tx_csum            = ethtool_op_get_tx_csum,
10137         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
10138         .set_flags              = bnx2x_set_flags,
10139         .get_flags              = ethtool_op_get_flags,
10140         .get_sg                 = ethtool_op_get_sg,
10141         .set_sg                 = ethtool_op_set_sg,
10142         .get_tso                = ethtool_op_get_tso,
10143         .set_tso                = bnx2x_set_tso,
10144         .self_test_count        = bnx2x_self_test_count,
10145         .self_test              = bnx2x_self_test,
10146         .get_strings            = bnx2x_get_strings,
10147         .phys_id                = bnx2x_phys_id,
10148         .get_stats_count        = bnx2x_get_stats_count,
10149         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
10150 };
10151
10152 /* end of ethtool_ops */
10153
10154 /****************************************************************************
10155 * General service functions
10156 ****************************************************************************/
10157
10158 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10159 {
10160         u16 pmcsr;
10161
10162         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10163
10164         switch (state) {
10165         case PCI_D0:
10166                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10167                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10168                                        PCI_PM_CTRL_PME_STATUS));
10169
10170                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10171                         /* delay required during transition out of D3hot */
10172                         msleep(20);
10173                 break;
10174
10175         case PCI_D3hot:
10176                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10177                 pmcsr |= 3;
10178
10179                 if (bp->wol)
10180                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10181
10182                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10183                                       pmcsr);
10184
10185                 /* No more memory access after this point until
10186                 * device is brought back to D0.
10187                 */
10188                 break;
10189
10190         default:
10191                 return -EINVAL;
10192         }
10193         return 0;
10194 }
10195
10196 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10197 {
10198         u16 rx_cons_sb;
10199
10200         /* Tell compiler that status block fields can change */
10201         barrier();
10202         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10203         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10204                 rx_cons_sb++;
10205         return (fp->rx_comp_cons != rx_cons_sb);
10206 }
10207
10208 /*
10209  * net_device service functions
10210  */
10211
10212 static int bnx2x_poll(struct napi_struct *napi, int budget)
10213 {
10214         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10215                                                  napi);
10216         struct bnx2x *bp = fp->bp;
10217         int work_done = 0;
10218
10219 #ifdef BNX2X_STOP_ON_ERROR
10220         if (unlikely(bp->panic))
10221                 goto poll_panic;
10222 #endif
10223
10224         prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
10225         prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10226         prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10227
10228         bnx2x_update_fpsb_idx(fp);
10229
10230         if (bnx2x_has_tx_work(fp))
10231                 bnx2x_tx_int(fp);
10232
10233         if (bnx2x_has_rx_work(fp)) {
10234                 work_done = bnx2x_rx_int(fp, budget);
10235
10236                 /* must not complete if we consumed full budget */
10237                 if (work_done >= budget)
10238                         goto poll_again;
10239         }
10240
10241         /* BNX2X_HAS_WORK() reads the status block, thus we need to
10242          * ensure that status block indices have been actually read
10243          * (bnx2x_update_fpsb_idx) prior to this check (BNX2X_HAS_WORK)
10244          * so that we won't write the "newer" value of the status block to IGU
10245          * (if there was a DMA right after BNX2X_HAS_WORK and
10246          * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10247          * may be postponed to right before bnx2x_ack_sb). In this case
10248          * there will never be another interrupt until there is another update
10249          * of the status block, while there is still unhandled work.
10250          */
10251         rmb();
10252
10253         if (!BNX2X_HAS_WORK(fp)) {
10254 #ifdef BNX2X_STOP_ON_ERROR
10255 poll_panic:
10256 #endif
10257                 napi_complete(napi);
10258
10259                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10260                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
10261                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10262                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10263         }
10264
10265 poll_again:
10266         return work_done;
10267 }
10268
10269
10270 /* we split the first BD into headers and data BDs
10271  * to ease the pain of our fellow microcode engineers
10272  * we use one mapping for both BDs
10273  * So far this has only been observed to happen
10274  * in Other Operating Systems(TM)
10275  */
10276 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10277                                    struct bnx2x_fastpath *fp,
10278                                    struct eth_tx_bd **tx_bd, u16 hlen,
10279                                    u16 bd_prod, int nbd)
10280 {
10281         struct eth_tx_bd *h_tx_bd = *tx_bd;
10282         struct eth_tx_bd *d_tx_bd;
10283         dma_addr_t mapping;
10284         int old_len = le16_to_cpu(h_tx_bd->nbytes);
10285
10286         /* first fix first BD */
10287         h_tx_bd->nbd = cpu_to_le16(nbd);
10288         h_tx_bd->nbytes = cpu_to_le16(hlen);
10289
10290         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10291            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10292            h_tx_bd->addr_lo, h_tx_bd->nbd);
10293
10294         /* now get a new data BD
10295          * (after the pbd) and fill it */
10296         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10297         d_tx_bd = &fp->tx_desc_ring[bd_prod];
10298
10299         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10300                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10301
10302         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10303         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10304         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10305         d_tx_bd->vlan = 0;
10306         /* this marks the BD as one that has no individual mapping
10307          * the FW ignores this flag in a BD not marked start
10308          */
10309         d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10310         DP(NETIF_MSG_TX_QUEUED,
10311            "TSO split data size is %d (%x:%x)\n",
10312            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10313
10314         /* update tx_bd for marking the last BD flag */
10315         *tx_bd = d_tx_bd;
10316
10317         return bd_prod;
10318 }
10319
10320 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10321 {
10322         if (fix > 0)
10323                 csum = (u16) ~csum_fold(csum_sub(csum,
10324                                 csum_partial(t_header - fix, fix, 0)));
10325
10326         else if (fix < 0)
10327                 csum = (u16) ~csum_fold(csum_add(csum,
10328                                 csum_partial(t_header, -fix, 0)));
10329
10330         return swab16(csum);
10331 }
10332
10333 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10334 {
10335         u32 rc;
10336
10337         if (skb->ip_summed != CHECKSUM_PARTIAL)
10338                 rc = XMIT_PLAIN;
10339
10340         else {
10341                 if (skb->protocol == htons(ETH_P_IPV6)) {
10342                         rc = XMIT_CSUM_V6;
10343                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10344                                 rc |= XMIT_CSUM_TCP;
10345
10346                 } else {
10347                         rc = XMIT_CSUM_V4;
10348                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10349                                 rc |= XMIT_CSUM_TCP;
10350                 }
10351         }
10352
10353         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10354                 rc |= XMIT_GSO_V4;
10355
10356         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10357                 rc |= XMIT_GSO_V6;
10358
10359         return rc;
10360 }
10361
10362 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10363 /* check if packet requires linearization (packet is too fragmented)
10364    no need to check fragmentation if page size > 8K (there will be no
10365    violation to FW restrictions) */
10366 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10367                              u32 xmit_type)
10368 {
10369         int to_copy = 0;
10370         int hlen = 0;
10371         int first_bd_sz = 0;
10372
10373         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10374         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10375
10376                 if (xmit_type & XMIT_GSO) {
10377                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10378                         /* Check if LSO packet needs to be copied:
10379                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10380                         int wnd_size = MAX_FETCH_BD - 3;
10381                         /* Number of windows to check */
10382                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10383                         int wnd_idx = 0;
10384                         int frag_idx = 0;
10385                         u32 wnd_sum = 0;
10386
10387                         /* Headers length */
10388                         hlen = (int)(skb_transport_header(skb) - skb->data) +
10389                                 tcp_hdrlen(skb);
10390
10391                         /* Amount of data (w/o headers) on linear part of SKB*/
10392                         first_bd_sz = skb_headlen(skb) - hlen;
10393
10394                         wnd_sum  = first_bd_sz;
10395
10396                         /* Calculate the first sum - it's special */
10397                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10398                                 wnd_sum +=
10399                                         skb_shinfo(skb)->frags[frag_idx].size;
10400
10401                         /* If there was data on linear skb data - check it */
10402                         if (first_bd_sz > 0) {
10403                                 if (unlikely(wnd_sum < lso_mss)) {
10404                                         to_copy = 1;
10405                                         goto exit_lbl;
10406                                 }
10407
10408                                 wnd_sum -= first_bd_sz;
10409                         }
10410
10411                         /* Others are easier: run through the frag list and
10412                            check all windows */
10413                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10414                                 wnd_sum +=
10415                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10416
10417                                 if (unlikely(wnd_sum < lso_mss)) {
10418                                         to_copy = 1;
10419                                         break;
10420                                 }
10421                                 wnd_sum -=
10422                                         skb_shinfo(skb)->frags[wnd_idx].size;
10423                         }
10424                 } else {
10425                         /* in non-LSO too fragmented packet should always
10426                            be linearized */
10427                         to_copy = 1;
10428                 }
10429         }
10430
10431 exit_lbl:
10432         if (unlikely(to_copy))
10433                 DP(NETIF_MSG_TX_QUEUED,
10434                    "Linearization IS REQUIRED for %s packet. "
10435                    "num_frags %d  hlen %d  first_bd_sz %d\n",
10436                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10437                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10438
10439         return to_copy;
10440 }
10441 #endif
10442
10443 /* called with netif_tx_lock
10444  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
10445  * netif_wake_queue()
10446  */
10447 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10448 {
10449         struct bnx2x *bp = netdev_priv(dev);
10450         struct bnx2x_fastpath *fp;
10451         struct netdev_queue *txq;
10452         struct sw_tx_bd *tx_buf;
10453         struct eth_tx_bd *tx_bd;
10454         struct eth_tx_parse_bd *pbd = NULL;
10455         u16 pkt_prod, bd_prod;
10456         int nbd, fp_index;
10457         dma_addr_t mapping;
10458         u32 xmit_type = bnx2x_xmit_type(bp, skb);
10459         int vlan_off = (bp->e1hov ? 4 : 0);
10460         int i;
10461         u8 hlen = 0;
10462
10463 #ifdef BNX2X_STOP_ON_ERROR
10464         if (unlikely(bp->panic))
10465                 return NETDEV_TX_BUSY;
10466 #endif
10467
10468         fp_index = skb_get_queue_mapping(skb);
10469         txq = netdev_get_tx_queue(dev, fp_index);
10470
10471         fp = &bp->fp[fp_index];
10472
10473         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
10474                 fp->eth_q_stats.driver_xoff++,
10475                 netif_tx_stop_queue(txq);
10476                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10477                 return NETDEV_TX_BUSY;
10478         }
10479
10480         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
10481            "  gso type %x  xmit_type %x\n",
10482            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10483            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10484
10485 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10486         /* First, check if we need to linearize the skb (due to FW
10487            restrictions). No need to check fragmentation if page size > 8K
10488            (there will be no violation to FW restrictions) */
10489         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10490                 /* Statistics of linearization */
10491                 bp->lin_cnt++;
10492                 if (skb_linearize(skb) != 0) {
10493                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10494                            "silently dropping this SKB\n");
10495                         dev_kfree_skb_any(skb);
10496                         return NETDEV_TX_OK;
10497                 }
10498         }
10499 #endif
10500
10501         /*
10502         Please read carefully. First we use one BD which we mark as start,
10503         then for TSO or xsum we have a parsing info BD,
10504         and only then we have the rest of the TSO BDs.
10505         (don't forget to mark the last one as last,
10506         and to unmap only AFTER you write to the BD ...)
10507         And above all, all pdb sizes are in words - NOT DWORDS!
10508         */
10509
10510         pkt_prod = fp->tx_pkt_prod++;
10511         bd_prod = TX_BD(fp->tx_bd_prod);
10512
10513         /* get a tx_buf and first BD */
10514         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10515         tx_bd = &fp->tx_desc_ring[bd_prod];
10516
10517         tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10518         tx_bd->general_data = (UNICAST_ADDRESS <<
10519                                ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
10520         /* header nbd */
10521         tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
10522
10523         /* remember the first BD of the packet */
10524         tx_buf->first_bd = fp->tx_bd_prod;
10525         tx_buf->skb = skb;
10526
10527         DP(NETIF_MSG_TX_QUEUED,
10528            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
10529            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10530
10531 #ifdef BCM_VLAN
10532         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10533             (bp->flags & HW_VLAN_TX_FLAG)) {
10534                 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10535                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10536                 vlan_off += 4;
10537         } else
10538 #endif
10539                 tx_bd->vlan = cpu_to_le16(pkt_prod);
10540
10541         if (xmit_type) {
10542                 /* turn on parsing and get a BD */
10543                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10544                 pbd = (void *)&fp->tx_desc_ring[bd_prod];
10545
10546                 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10547         }
10548
10549         if (xmit_type & XMIT_CSUM) {
10550                 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
10551
10552                 /* for now NS flag is not used in Linux */
10553                 pbd->global_data =
10554                         (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10555                                  ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
10556
10557                 pbd->ip_hlen = (skb_transport_header(skb) -
10558                                 skb_network_header(skb)) / 2;
10559
10560                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
10561
10562                 pbd->total_hlen = cpu_to_le16(hlen);
10563                 hlen = hlen*2 - vlan_off;
10564
10565                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10566
10567                 if (xmit_type & XMIT_CSUM_V4)
10568                         tx_bd->bd_flags.as_bitfield |=
10569                                                 ETH_TX_BD_FLAGS_IP_CSUM;
10570                 else
10571                         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10572
10573                 if (xmit_type & XMIT_CSUM_TCP) {
10574                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10575
10576                 } else {
10577                         s8 fix = SKB_CS_OFF(skb); /* signed! */
10578
10579                         pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
10580                         pbd->cs_offset = fix / 2;
10581
10582                         DP(NETIF_MSG_TX_QUEUED,
10583                            "hlen %d  offset %d  fix %d  csum before fix %x\n",
10584                            le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10585                            SKB_CS(skb));
10586
10587                         /* HW bug: fixup the CSUM */
10588                         pbd->tcp_pseudo_csum =
10589                                 bnx2x_csum_fix(skb_transport_header(skb),
10590                                                SKB_CS(skb), fix);
10591
10592                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10593                            pbd->tcp_pseudo_csum);
10594                 }
10595         }
10596
10597         mapping = pci_map_single(bp->pdev, skb->data,
10598                                  skb_headlen(skb), PCI_DMA_TODEVICE);
10599
10600         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10601         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10602         nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
10603         tx_bd->nbd = cpu_to_le16(nbd);
10604         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10605
10606         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
10607            "  nbytes %d  flags %x  vlan %x\n",
10608            tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10609            le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10610            le16_to_cpu(tx_bd->vlan));
10611
10612         if (xmit_type & XMIT_GSO) {
10613
10614                 DP(NETIF_MSG_TX_QUEUED,
10615                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
10616                    skb->len, hlen, skb_headlen(skb),
10617                    skb_shinfo(skb)->gso_size);
10618
10619                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10620
10621                 if (unlikely(skb_headlen(skb) > hlen))
10622                         bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10623                                                  bd_prod, ++nbd);
10624
10625                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10626                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
10627                 pbd->tcp_flags = pbd_tcp_flags(skb);
10628
10629                 if (xmit_type & XMIT_GSO_V4) {
10630                         pbd->ip_id = swab16(ip_hdr(skb)->id);
10631                         pbd->tcp_pseudo_csum =
10632                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10633                                                           ip_hdr(skb)->daddr,
10634                                                           0, IPPROTO_TCP, 0));
10635
10636                 } else
10637                         pbd->tcp_pseudo_csum =
10638                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10639                                                         &ipv6_hdr(skb)->daddr,
10640                                                         0, IPPROTO_TCP, 0));
10641
10642                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10643         }
10644
10645         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10646                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
10647
10648                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10649                 tx_bd = &fp->tx_desc_ring[bd_prod];
10650
10651                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10652                                        frag->size, PCI_DMA_TODEVICE);
10653
10654                 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10655                 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10656                 tx_bd->nbytes = cpu_to_le16(frag->size);
10657                 tx_bd->vlan = cpu_to_le16(pkt_prod);
10658                 tx_bd->bd_flags.as_bitfield = 0;
10659
10660                 DP(NETIF_MSG_TX_QUEUED,
10661                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d  flags %x\n",
10662                    i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10663                    le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
10664         }
10665
10666         /* now at last mark the BD as the last BD */
10667         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10668
10669         DP(NETIF_MSG_TX_QUEUED, "last bd @%p  flags %x\n",
10670            tx_bd, tx_bd->bd_flags.as_bitfield);
10671
10672         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10673
10674         /* now send a tx doorbell, counting the next BD
10675          * if the packet contains or ends with it
10676          */
10677         if (TX_BD_POFF(bd_prod) < nbd)
10678                 nbd++;
10679
10680         if (pbd)
10681                 DP(NETIF_MSG_TX_QUEUED,
10682                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
10683                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
10684                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10685                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
10686                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
10687
10688         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
10689
10690         /*
10691          * Make sure that the BD data is updated before updating the producer
10692          * since FW might read the BD right after the producer is updated.
10693          * This is only applicable for weak-ordered memory model archs such
10694          * as IA-64. The following barrier is also mandatory since FW will
10695          * assumes packets must have BDs.
10696          */
10697         wmb();
10698
10699         le16_add_cpu(&fp->hw_tx_prods->bds_prod, nbd);
10700         mb(); /* FW restriction: must not reorder writing nbd and packets */
10701         le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
10702         DOORBELL(bp, fp->index, 0);
10703
10704         mmiowb();
10705
10706         fp->tx_bd_prod += nbd;
10707
10708         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
10709                 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10710                    if we put Tx into XOFF state. */
10711                 smp_mb();
10712                 netif_tx_stop_queue(txq);
10713                 fp->eth_q_stats.driver_xoff++;
10714                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
10715                         netif_tx_wake_queue(txq);
10716         }
10717         fp->tx_pkt++;
10718
10719         return NETDEV_TX_OK;
10720 }
10721
10722 /* called with rtnl_lock */
10723 static int bnx2x_open(struct net_device *dev)
10724 {
10725         struct bnx2x *bp = netdev_priv(dev);
10726
10727         netif_carrier_off(dev);
10728
10729         bnx2x_set_power_state(bp, PCI_D0);
10730
10731         return bnx2x_nic_load(bp, LOAD_OPEN);
10732 }
10733
10734 /* called with rtnl_lock */
10735 static int bnx2x_close(struct net_device *dev)
10736 {
10737         struct bnx2x *bp = netdev_priv(dev);
10738
10739         /* Unload the driver, release IRQs */
10740         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10741         if (atomic_read(&bp->pdev->enable_cnt) == 1)
10742                 if (!CHIP_REV_IS_SLOW(bp))
10743                         bnx2x_set_power_state(bp, PCI_D3hot);
10744
10745         return 0;
10746 }
10747
10748 /* called with netif_tx_lock from dev_mcast.c */
10749 static void bnx2x_set_rx_mode(struct net_device *dev)
10750 {
10751         struct bnx2x *bp = netdev_priv(dev);
10752         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10753         int port = BP_PORT(bp);
10754
10755         if (bp->state != BNX2X_STATE_OPEN) {
10756                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10757                 return;
10758         }
10759
10760         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10761
10762         if (dev->flags & IFF_PROMISC)
10763                 rx_mode = BNX2X_RX_MODE_PROMISC;
10764
10765         else if ((dev->flags & IFF_ALLMULTI) ||
10766                  ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10767                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10768
10769         else { /* some multicasts */
10770                 if (CHIP_IS_E1(bp)) {
10771                         int i, old, offset;
10772                         struct dev_mc_list *mclist;
10773                         struct mac_configuration_cmd *config =
10774                                                 bnx2x_sp(bp, mcast_config);
10775
10776                         for (i = 0, mclist = dev->mc_list;
10777                              mclist && (i < dev->mc_count);
10778                              i++, mclist = mclist->next) {
10779
10780                                 config->config_table[i].
10781                                         cam_entry.msb_mac_addr =
10782                                         swab16(*(u16 *)&mclist->dmi_addr[0]);
10783                                 config->config_table[i].
10784                                         cam_entry.middle_mac_addr =
10785                                         swab16(*(u16 *)&mclist->dmi_addr[2]);
10786                                 config->config_table[i].
10787                                         cam_entry.lsb_mac_addr =
10788                                         swab16(*(u16 *)&mclist->dmi_addr[4]);
10789                                 config->config_table[i].cam_entry.flags =
10790                                                         cpu_to_le16(port);
10791                                 config->config_table[i].
10792                                         target_table_entry.flags = 0;
10793                                 config->config_table[i].
10794                                         target_table_entry.client_id = 0;
10795                                 config->config_table[i].
10796                                         target_table_entry.vlan_id = 0;
10797
10798                                 DP(NETIF_MSG_IFUP,
10799                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10800                                    config->config_table[i].
10801                                                 cam_entry.msb_mac_addr,
10802                                    config->config_table[i].
10803                                                 cam_entry.middle_mac_addr,
10804                                    config->config_table[i].
10805                                                 cam_entry.lsb_mac_addr);
10806                         }
10807                         old = config->hdr.length;
10808                         if (old > i) {
10809                                 for (; i < old; i++) {
10810                                         if (CAM_IS_INVALID(config->
10811                                                            config_table[i])) {
10812                                                 /* already invalidated */
10813                                                 break;
10814                                         }
10815                                         /* invalidate */
10816                                         CAM_INVALIDATE(config->
10817                                                        config_table[i]);
10818                                 }
10819                         }
10820
10821                         if (CHIP_REV_IS_SLOW(bp))
10822                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10823                         else
10824                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
10825
10826                         config->hdr.length = i;
10827                         config->hdr.offset = offset;
10828                         config->hdr.client_id = bp->fp->cl_id;
10829                         config->hdr.reserved1 = 0;
10830
10831                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10832                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10833                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10834                                       0);
10835                 } else { /* E1H */
10836                         /* Accept one or more multicasts */
10837                         struct dev_mc_list *mclist;
10838                         u32 mc_filter[MC_HASH_SIZE];
10839                         u32 crc, bit, regidx;
10840                         int i;
10841
10842                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10843
10844                         for (i = 0, mclist = dev->mc_list;
10845                              mclist && (i < dev->mc_count);
10846                              i++, mclist = mclist->next) {
10847
10848                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10849                                    mclist->dmi_addr);
10850
10851                                 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10852                                 bit = (crc >> 24) & 0xff;
10853                                 regidx = bit >> 5;
10854                                 bit &= 0x1f;
10855                                 mc_filter[regidx] |= (1 << bit);
10856                         }
10857
10858                         for (i = 0; i < MC_HASH_SIZE; i++)
10859                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10860                                        mc_filter[i]);
10861                 }
10862         }
10863
10864         bp->rx_mode = rx_mode;
10865         bnx2x_set_storm_rx_mode(bp);
10866 }
10867
10868 /* called with rtnl_lock */
10869 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10870 {
10871         struct sockaddr *addr = p;
10872         struct bnx2x *bp = netdev_priv(dev);
10873
10874         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
10875                 return -EINVAL;
10876
10877         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
10878         if (netif_running(dev)) {
10879                 if (CHIP_IS_E1(bp))
10880                         bnx2x_set_mac_addr_e1(bp, 1);
10881                 else
10882                         bnx2x_set_mac_addr_e1h(bp, 1);
10883         }
10884
10885         return 0;
10886 }
10887
10888 /* called with rtnl_lock */
10889 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10890 {
10891         struct mii_ioctl_data *data = if_mii(ifr);
10892         struct bnx2x *bp = netdev_priv(dev);
10893         int port = BP_PORT(bp);
10894         int err;
10895
10896         switch (cmd) {
10897         case SIOCGMIIPHY:
10898                 data->phy_id = bp->port.phy_addr;
10899
10900                 /* fallthrough */
10901
10902         case SIOCGMIIREG: {
10903                 u16 mii_regval;
10904
10905                 if (!netif_running(dev))
10906                         return -EAGAIN;
10907
10908                 mutex_lock(&bp->port.phy_mutex);
10909                 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
10910                                       DEFAULT_PHY_DEV_ADDR,
10911                                       (data->reg_num & 0x1f), &mii_regval);
10912                 data->val_out = mii_regval;
10913                 mutex_unlock(&bp->port.phy_mutex);
10914                 return err;
10915         }
10916
10917         case SIOCSMIIREG:
10918                 if (!capable(CAP_NET_ADMIN))
10919                         return -EPERM;
10920
10921                 if (!netif_running(dev))
10922                         return -EAGAIN;
10923
10924                 mutex_lock(&bp->port.phy_mutex);
10925                 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
10926                                        DEFAULT_PHY_DEV_ADDR,
10927                                        (data->reg_num & 0x1f), data->val_in);
10928                 mutex_unlock(&bp->port.phy_mutex);
10929                 return err;
10930
10931         default:
10932                 /* do nothing */
10933                 break;
10934         }
10935
10936         return -EOPNOTSUPP;
10937 }
10938
10939 /* called with rtnl_lock */
10940 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10941 {
10942         struct bnx2x *bp = netdev_priv(dev);
10943         int rc = 0;
10944
10945         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10946             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10947                 return -EINVAL;
10948
10949         /* This does not race with packet allocation
10950          * because the actual alloc size is
10951          * only updated as part of load
10952          */
10953         dev->mtu = new_mtu;
10954
10955         if (netif_running(dev)) {
10956                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10957                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10958         }
10959
10960         return rc;
10961 }
10962
10963 static void bnx2x_tx_timeout(struct net_device *dev)
10964 {
10965         struct bnx2x *bp = netdev_priv(dev);
10966
10967 #ifdef BNX2X_STOP_ON_ERROR
10968         if (!bp->panic)
10969                 bnx2x_panic();
10970 #endif
10971         /* This allows the netif to be shutdown gracefully before resetting */
10972         schedule_work(&bp->reset_task);
10973 }
10974
10975 #ifdef BCM_VLAN
10976 /* called with rtnl_lock */
10977 static void bnx2x_vlan_rx_register(struct net_device *dev,
10978                                    struct vlan_group *vlgrp)
10979 {
10980         struct bnx2x *bp = netdev_priv(dev);
10981
10982         bp->vlgrp = vlgrp;
10983
10984         /* Set flags according to the required capabilities */
10985         bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10986
10987         if (dev->features & NETIF_F_HW_VLAN_TX)
10988                 bp->flags |= HW_VLAN_TX_FLAG;
10989
10990         if (dev->features & NETIF_F_HW_VLAN_RX)
10991                 bp->flags |= HW_VLAN_RX_FLAG;
10992
10993         if (netif_running(dev))
10994                 bnx2x_set_client_config(bp);
10995 }
10996
10997 #endif
10998
10999 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11000 static void poll_bnx2x(struct net_device *dev)
11001 {
11002         struct bnx2x *bp = netdev_priv(dev);
11003
11004         disable_irq(bp->pdev->irq);
11005         bnx2x_interrupt(bp->pdev->irq, dev);
11006         enable_irq(bp->pdev->irq);
11007 }
11008 #endif
11009
11010 static const struct net_device_ops bnx2x_netdev_ops = {
11011         .ndo_open               = bnx2x_open,
11012         .ndo_stop               = bnx2x_close,
11013         .ndo_start_xmit         = bnx2x_start_xmit,
11014         .ndo_set_multicast_list = bnx2x_set_rx_mode,
11015         .ndo_set_mac_address    = bnx2x_change_mac_addr,
11016         .ndo_validate_addr      = eth_validate_addr,
11017         .ndo_do_ioctl           = bnx2x_ioctl,
11018         .ndo_change_mtu         = bnx2x_change_mtu,
11019         .ndo_tx_timeout         = bnx2x_tx_timeout,
11020 #ifdef BCM_VLAN
11021         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
11022 #endif
11023 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11024         .ndo_poll_controller    = poll_bnx2x,
11025 #endif
11026 };
11027
11028 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11029                                     struct net_device *dev)
11030 {
11031         struct bnx2x *bp;
11032         int rc;
11033
11034         SET_NETDEV_DEV(dev, &pdev->dev);
11035         bp = netdev_priv(dev);
11036
11037         bp->dev = dev;
11038         bp->pdev = pdev;
11039         bp->flags = 0;
11040         bp->func = PCI_FUNC(pdev->devfn);
11041
11042         rc = pci_enable_device(pdev);
11043         if (rc) {
11044                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11045                 goto err_out;
11046         }
11047
11048         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11049                 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11050                        " aborting\n");
11051                 rc = -ENODEV;
11052                 goto err_out_disable;
11053         }
11054
11055         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11056                 printk(KERN_ERR PFX "Cannot find second PCI device"
11057                        " base address, aborting\n");
11058                 rc = -ENODEV;
11059                 goto err_out_disable;
11060         }
11061
11062         if (atomic_read(&pdev->enable_cnt) == 1) {
11063                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11064                 if (rc) {
11065                         printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11066                                " aborting\n");
11067                         goto err_out_disable;
11068                 }
11069
11070                 pci_set_master(pdev);
11071                 pci_save_state(pdev);
11072         }
11073
11074         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11075         if (bp->pm_cap == 0) {
11076                 printk(KERN_ERR PFX "Cannot find power management"
11077                        " capability, aborting\n");
11078                 rc = -EIO;
11079                 goto err_out_release;
11080         }
11081
11082         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11083         if (bp->pcie_cap == 0) {
11084                 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11085                        " aborting\n");
11086                 rc = -EIO;
11087                 goto err_out_release;
11088         }
11089
11090         if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
11091                 bp->flags |= USING_DAC_FLAG;
11092                 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
11093                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11094                                " failed, aborting\n");
11095                         rc = -EIO;
11096                         goto err_out_release;
11097                 }
11098
11099         } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
11100                 printk(KERN_ERR PFX "System does not support DMA,"
11101                        " aborting\n");
11102                 rc = -EIO;
11103                 goto err_out_release;
11104         }
11105
11106         dev->mem_start = pci_resource_start(pdev, 0);
11107         dev->base_addr = dev->mem_start;
11108         dev->mem_end = pci_resource_end(pdev, 0);
11109
11110         dev->irq = pdev->irq;
11111
11112         bp->regview = pci_ioremap_bar(pdev, 0);
11113         if (!bp->regview) {
11114                 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11115                 rc = -ENOMEM;
11116                 goto err_out_release;
11117         }
11118
11119         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11120                                         min_t(u64, BNX2X_DB_SIZE,
11121                                               pci_resource_len(pdev, 2)));
11122         if (!bp->doorbells) {
11123                 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11124                 rc = -ENOMEM;
11125                 goto err_out_unmap;
11126         }
11127
11128         bnx2x_set_power_state(bp, PCI_D0);
11129
11130         /* clean indirect addresses */
11131         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11132                                PCICFG_VENDOR_ID_OFFSET);
11133         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11134         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11135         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11136         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11137
11138         dev->watchdog_timeo = TX_TIMEOUT;
11139
11140         dev->netdev_ops = &bnx2x_netdev_ops;
11141         dev->ethtool_ops = &bnx2x_ethtool_ops;
11142         dev->features |= NETIF_F_SG;
11143         dev->features |= NETIF_F_HW_CSUM;
11144         if (bp->flags & USING_DAC_FLAG)
11145                 dev->features |= NETIF_F_HIGHDMA;
11146 #ifdef BCM_VLAN
11147         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
11148         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11149 #endif
11150         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11151         dev->features |= NETIF_F_TSO6;
11152
11153         return 0;
11154
11155 err_out_unmap:
11156         if (bp->regview) {
11157                 iounmap(bp->regview);
11158                 bp->regview = NULL;
11159         }
11160         if (bp->doorbells) {
11161                 iounmap(bp->doorbells);
11162                 bp->doorbells = NULL;
11163         }
11164
11165 err_out_release:
11166         if (atomic_read(&pdev->enable_cnt) == 1)
11167                 pci_release_regions(pdev);
11168
11169 err_out_disable:
11170         pci_disable_device(pdev);
11171         pci_set_drvdata(pdev, NULL);
11172
11173 err_out:
11174         return rc;
11175 }
11176
11177 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
11178 {
11179         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11180
11181         val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11182         return val;
11183 }
11184
11185 /* return value of 1=2.5GHz 2=5GHz */
11186 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11187 {
11188         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11189
11190         val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11191         return val;
11192 }
11193 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11194 {
11195         struct bnx2x_fw_file_hdr *fw_hdr;
11196         struct bnx2x_fw_file_section *sections;
11197         u16 *ops_offsets;
11198         u32 offset, len, num_ops;
11199         int i;
11200         const struct firmware *firmware = bp->firmware;
11201         const u8 * fw_ver;
11202
11203         if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11204                 return -EINVAL;
11205
11206         fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11207         sections = (struct bnx2x_fw_file_section *)fw_hdr;
11208
11209         /* Make sure none of the offsets and sizes make us read beyond
11210          * the end of the firmware data */
11211         for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11212                 offset = be32_to_cpu(sections[i].offset);
11213                 len = be32_to_cpu(sections[i].len);
11214                 if (offset + len > firmware->size) {
11215                         printk(KERN_ERR PFX "Section %d length is out of bounds\n", i);
11216                         return -EINVAL;
11217                 }
11218         }
11219
11220         /* Likewise for the init_ops offsets */
11221         offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11222         ops_offsets = (u16 *)(firmware->data + offset);
11223         num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11224
11225         for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11226                 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11227                         printk(KERN_ERR PFX "Section offset %d is out of bounds\n", i);
11228                         return -EINVAL;
11229                 }
11230         }
11231
11232         /* Check FW version */
11233         offset = be32_to_cpu(fw_hdr->fw_version.offset);
11234         fw_ver = firmware->data + offset;
11235         if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11236             (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11237             (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11238             (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11239                 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11240                                     " Should be %d.%d.%d.%d\n",
11241                        fw_ver[0], fw_ver[1], fw_ver[2],
11242                        fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11243                        BCM_5710_FW_MINOR_VERSION,
11244                        BCM_5710_FW_REVISION_VERSION,
11245                        BCM_5710_FW_ENGINEERING_VERSION);
11246                 return -EINVAL;
11247         }
11248
11249         return 0;
11250 }
11251
11252 static void inline be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11253 {
11254         u32 i;
11255         const __be32 *source = (const __be32*)_source;
11256         u32 *target = (u32*)_target;
11257
11258         for (i = 0; i < n/4; i++)
11259                 target[i] = be32_to_cpu(source[i]);
11260 }
11261
11262 /*
11263    Ops array is stored in the following format:
11264    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11265  */
11266 static void inline bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11267 {
11268         u32 i, j, tmp;
11269         const __be32 *source = (const __be32*)_source;
11270         struct raw_op *target = (struct raw_op*)_target;
11271
11272         for (i = 0, j = 0; i < n/8; i++, j+=2) {
11273                 tmp = be32_to_cpu(source[j]);
11274                 target[i].op = (tmp >> 24) & 0xff;
11275                 target[i].offset =  tmp & 0xffffff;
11276                 target[i].raw_data = be32_to_cpu(source[j+1]);
11277         }
11278 }
11279 static void inline be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11280 {
11281         u32 i;
11282         u16 *target = (u16*)_target;
11283         const __be16 *source = (const __be16*)_source;
11284
11285         for (i = 0; i < n/2; i++)
11286                 target[i] = be16_to_cpu(source[i]);
11287 }
11288
11289 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11290         do {   \
11291                 u32 len = be32_to_cpu(fw_hdr->arr.len);   \
11292                 bp->arr = kmalloc(len, GFP_KERNEL);  \
11293                 if (!bp->arr) { \
11294                         printk(KERN_ERR PFX "Failed to allocate %d bytes for "#arr"\n", len); \
11295                         goto lbl; \
11296                 } \
11297                 func(bp->firmware->data + \
11298                         be32_to_cpu(fw_hdr->arr.offset), \
11299                         (u8*)bp->arr, len); \
11300         } while (0)
11301
11302
11303 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11304 {
11305         char fw_file_name[40] = {0};
11306         int rc, offset;
11307         struct bnx2x_fw_file_hdr *fw_hdr;
11308
11309         /* Create a FW file name */
11310         if (CHIP_IS_E1(bp))
11311                 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
11312         else
11313                 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
11314
11315         sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11316                 BCM_5710_FW_MAJOR_VERSION,
11317                 BCM_5710_FW_MINOR_VERSION,
11318                 BCM_5710_FW_REVISION_VERSION,
11319                 BCM_5710_FW_ENGINEERING_VERSION);
11320
11321         printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11322
11323         rc = request_firmware(&bp->firmware, fw_file_name, dev);
11324         if (rc) {
11325                 printk(KERN_ERR PFX "Can't load firmware file %s\n", fw_file_name);
11326                 goto request_firmware_exit;
11327         }
11328
11329         rc = bnx2x_check_firmware(bp);
11330         if (rc) {
11331                 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
11332                 goto request_firmware_exit;
11333         }
11334
11335         fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11336
11337         /* Initialize the pointers to the init arrays */
11338         /* Blob */
11339         BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11340
11341         /* Opcodes */
11342         BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11343
11344         /* Offsets */
11345         BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, be16_to_cpu_n);
11346
11347         /* STORMs firmware */
11348         bp->tsem_int_table_data = bp->firmware->data +
11349                 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11350         bp->tsem_pram_data      = bp->firmware->data +
11351                 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11352         bp->usem_int_table_data = bp->firmware->data +
11353                 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11354         bp->usem_pram_data      = bp->firmware->data +
11355                 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11356         bp->xsem_int_table_data = bp->firmware->data +
11357                 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11358         bp->xsem_pram_data      = bp->firmware->data +
11359                 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11360         bp->csem_int_table_data = bp->firmware->data +
11361                 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11362         bp->csem_pram_data      = bp->firmware->data +
11363                 be32_to_cpu(fw_hdr->csem_pram_data.offset);
11364
11365         return 0;
11366 init_offsets_alloc_err:
11367         kfree(bp->init_ops);
11368 init_ops_alloc_err:
11369         kfree(bp->init_data);
11370 request_firmware_exit:
11371         release_firmware(bp->firmware);
11372
11373         return rc;
11374 }
11375
11376
11377
11378 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11379                                     const struct pci_device_id *ent)
11380 {
11381         static int version_printed;
11382         struct net_device *dev = NULL;
11383         struct bnx2x *bp;
11384         int rc;
11385
11386         if (version_printed++ == 0)
11387                 printk(KERN_INFO "%s", version);
11388
11389         /* dev zeroed in init_etherdev */
11390         dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
11391         if (!dev) {
11392                 printk(KERN_ERR PFX "Cannot allocate net device\n");
11393                 return -ENOMEM;
11394         }
11395
11396         bp = netdev_priv(dev);
11397         bp->msglevel = debug;
11398
11399         rc = bnx2x_init_dev(pdev, dev);
11400         if (rc < 0) {
11401                 free_netdev(dev);
11402                 return rc;
11403         }
11404
11405         pci_set_drvdata(pdev, dev);
11406
11407         rc = bnx2x_init_bp(bp);
11408         if (rc)
11409                 goto init_one_exit;
11410
11411         /* Set init arrays */
11412         rc = bnx2x_init_firmware(bp, &pdev->dev);
11413         if (rc) {
11414                 printk(KERN_ERR PFX "Error loading firmware\n");
11415                 goto init_one_exit;
11416         }
11417
11418         rc = register_netdev(dev);
11419         if (rc) {
11420                 dev_err(&pdev->dev, "Cannot register net device\n");
11421                 goto init_one_exit;
11422         }
11423
11424         printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
11425                " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
11426                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
11427                bnx2x_get_pcie_width(bp),
11428                (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11429                dev->base_addr, bp->pdev->irq);
11430         printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
11431
11432         return 0;
11433
11434 init_one_exit:
11435         if (bp->regview)
11436                 iounmap(bp->regview);
11437
11438         if (bp->doorbells)
11439                 iounmap(bp->doorbells);
11440
11441         free_netdev(dev);
11442
11443         if (atomic_read(&pdev->enable_cnt) == 1)
11444                 pci_release_regions(pdev);
11445
11446         pci_disable_device(pdev);
11447         pci_set_drvdata(pdev, NULL);
11448
11449         return rc;
11450 }
11451
11452 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11453 {
11454         struct net_device *dev = pci_get_drvdata(pdev);
11455         struct bnx2x *bp;
11456
11457         if (!dev) {
11458                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11459                 return;
11460         }
11461         bp = netdev_priv(dev);
11462
11463         unregister_netdev(dev);
11464
11465         kfree(bp->init_ops_offsets);
11466         kfree(bp->init_ops);
11467         kfree(bp->init_data);
11468         release_firmware(bp->firmware);
11469
11470         if (bp->regview)
11471                 iounmap(bp->regview);
11472
11473         if (bp->doorbells)
11474                 iounmap(bp->doorbells);
11475
11476         free_netdev(dev);
11477
11478         if (atomic_read(&pdev->enable_cnt) == 1)
11479                 pci_release_regions(pdev);
11480
11481         pci_disable_device(pdev);
11482         pci_set_drvdata(pdev, NULL);
11483 }
11484
11485 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11486 {
11487         struct net_device *dev = pci_get_drvdata(pdev);
11488         struct bnx2x *bp;
11489
11490         if (!dev) {
11491                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11492                 return -ENODEV;
11493         }
11494         bp = netdev_priv(dev);
11495
11496         rtnl_lock();
11497
11498         pci_save_state(pdev);
11499
11500         if (!netif_running(dev)) {
11501                 rtnl_unlock();
11502                 return 0;
11503         }
11504
11505         netif_device_detach(dev);
11506
11507         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11508
11509         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
11510
11511         rtnl_unlock();
11512
11513         return 0;
11514 }
11515
11516 static int bnx2x_resume(struct pci_dev *pdev)
11517 {
11518         struct net_device *dev = pci_get_drvdata(pdev);
11519         struct bnx2x *bp;
11520         int rc;
11521
11522         if (!dev) {
11523                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11524                 return -ENODEV;
11525         }
11526         bp = netdev_priv(dev);
11527
11528         rtnl_lock();
11529
11530         pci_restore_state(pdev);
11531
11532         if (!netif_running(dev)) {
11533                 rtnl_unlock();
11534                 return 0;
11535         }
11536
11537         bnx2x_set_power_state(bp, PCI_D0);
11538         netif_device_attach(dev);
11539
11540         rc = bnx2x_nic_load(bp, LOAD_OPEN);
11541
11542         rtnl_unlock();
11543
11544         return rc;
11545 }
11546
11547 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11548 {
11549         int i;
11550
11551         bp->state = BNX2X_STATE_ERROR;
11552
11553         bp->rx_mode = BNX2X_RX_MODE_NONE;
11554
11555         bnx2x_netif_stop(bp, 0);
11556
11557         del_timer_sync(&bp->timer);
11558         bp->stats_state = STATS_STATE_DISABLED;
11559         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11560
11561         /* Release IRQs */
11562         bnx2x_free_irq(bp);
11563
11564         if (CHIP_IS_E1(bp)) {
11565                 struct mac_configuration_cmd *config =
11566                                                 bnx2x_sp(bp, mcast_config);
11567
11568                 for (i = 0; i < config->hdr.length; i++)
11569                         CAM_INVALIDATE(config->config_table[i]);
11570         }
11571
11572         /* Free SKBs, SGEs, TPA pool and driver internals */
11573         bnx2x_free_skbs(bp);
11574         for_each_rx_queue(bp, i)
11575                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
11576         for_each_rx_queue(bp, i)
11577                 netif_napi_del(&bnx2x_fp(bp, i, napi));
11578         bnx2x_free_mem(bp);
11579
11580         bp->state = BNX2X_STATE_CLOSED;
11581
11582         netif_carrier_off(bp->dev);
11583
11584         return 0;
11585 }
11586
11587 static void bnx2x_eeh_recover(struct bnx2x *bp)
11588 {
11589         u32 val;
11590
11591         mutex_init(&bp->port.phy_mutex);
11592
11593         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11594         bp->link_params.shmem_base = bp->common.shmem_base;
11595         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11596
11597         if (!bp->common.shmem_base ||
11598             (bp->common.shmem_base < 0xA0000) ||
11599             (bp->common.shmem_base >= 0xC0000)) {
11600                 BNX2X_DEV_INFO("MCP not active\n");
11601                 bp->flags |= NO_MCP_FLAG;
11602                 return;
11603         }
11604
11605         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11606         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11607                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11608                 BNX2X_ERR("BAD MCP validity signature\n");
11609
11610         if (!BP_NOMCP(bp)) {
11611                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11612                               & DRV_MSG_SEQ_NUMBER_MASK);
11613                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11614         }
11615 }
11616
11617 /**
11618  * bnx2x_io_error_detected - called when PCI error is detected
11619  * @pdev: Pointer to PCI device
11620  * @state: The current pci connection state
11621  *
11622  * This function is called after a PCI bus error affecting
11623  * this device has been detected.
11624  */
11625 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11626                                                 pci_channel_state_t state)
11627 {
11628         struct net_device *dev = pci_get_drvdata(pdev);
11629         struct bnx2x *bp = netdev_priv(dev);
11630
11631         rtnl_lock();
11632
11633         netif_device_detach(dev);
11634
11635         if (netif_running(dev))
11636                 bnx2x_eeh_nic_unload(bp);
11637
11638         pci_disable_device(pdev);
11639
11640         rtnl_unlock();
11641
11642         /* Request a slot reset */
11643         return PCI_ERS_RESULT_NEED_RESET;
11644 }
11645
11646 /**
11647  * bnx2x_io_slot_reset - called after the PCI bus has been reset
11648  * @pdev: Pointer to PCI device
11649  *
11650  * Restart the card from scratch, as if from a cold-boot.
11651  */
11652 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11653 {
11654         struct net_device *dev = pci_get_drvdata(pdev);
11655         struct bnx2x *bp = netdev_priv(dev);
11656
11657         rtnl_lock();
11658
11659         if (pci_enable_device(pdev)) {
11660                 dev_err(&pdev->dev,
11661                         "Cannot re-enable PCI device after reset\n");
11662                 rtnl_unlock();
11663                 return PCI_ERS_RESULT_DISCONNECT;
11664         }
11665
11666         pci_set_master(pdev);
11667         pci_restore_state(pdev);
11668
11669         if (netif_running(dev))
11670                 bnx2x_set_power_state(bp, PCI_D0);
11671
11672         rtnl_unlock();
11673
11674         return PCI_ERS_RESULT_RECOVERED;
11675 }
11676
11677 /**
11678  * bnx2x_io_resume - called when traffic can start flowing again
11679  * @pdev: Pointer to PCI device
11680  *
11681  * This callback is called when the error recovery driver tells us that
11682  * its OK to resume normal operation.
11683  */
11684 static void bnx2x_io_resume(struct pci_dev *pdev)
11685 {
11686         struct net_device *dev = pci_get_drvdata(pdev);
11687         struct bnx2x *bp = netdev_priv(dev);
11688
11689         rtnl_lock();
11690
11691         bnx2x_eeh_recover(bp);
11692
11693         if (netif_running(dev))
11694                 bnx2x_nic_load(bp, LOAD_NORMAL);
11695
11696         netif_device_attach(dev);
11697
11698         rtnl_unlock();
11699 }
11700
11701 static struct pci_error_handlers bnx2x_err_handler = {
11702         .error_detected = bnx2x_io_error_detected,
11703         .slot_reset     = bnx2x_io_slot_reset,
11704         .resume         = bnx2x_io_resume,
11705 };
11706
11707 static struct pci_driver bnx2x_pci_driver = {
11708         .name        = DRV_MODULE_NAME,
11709         .id_table    = bnx2x_pci_tbl,
11710         .probe       = bnx2x_init_one,
11711         .remove      = __devexit_p(bnx2x_remove_one),
11712         .suspend     = bnx2x_suspend,
11713         .resume      = bnx2x_resume,
11714         .err_handler = &bnx2x_err_handler,
11715 };
11716
11717 static int __init bnx2x_init(void)
11718 {
11719         int ret;
11720
11721         bnx2x_wq = create_singlethread_workqueue("bnx2x");
11722         if (bnx2x_wq == NULL) {
11723                 printk(KERN_ERR PFX "Cannot create workqueue\n");
11724                 return -ENOMEM;
11725         }
11726
11727         ret = pci_register_driver(&bnx2x_pci_driver);
11728         if (ret) {
11729                 printk(KERN_ERR PFX "Cannot register driver\n");
11730                 destroy_workqueue(bnx2x_wq);
11731         }
11732         return ret;
11733 }
11734
11735 static void __exit bnx2x_cleanup(void)
11736 {
11737         pci_unregister_driver(&bnx2x_pci_driver);
11738
11739         destroy_workqueue(bnx2x_wq);
11740 }
11741
11742 module_init(bnx2x_init);
11743 module_exit(bnx2x_cleanup);
11744
11745