bnx2x: Version 1.48.113-1
[safe/jmp/linux-2.6] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52
53
54 #include "bnx2x.h"
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_dump.h"
58
59 #define DRV_MODULE_VERSION      "1.48.113-1"
60 #define DRV_MODULE_RELDATE      "2009/07/21"
61 #define BNX2X_BC_VER            0x040200
62
63 #include <linux/firmware.h>
64 #include "bnx2x_fw_file_hdr.h"
65 /* FW files */
66 #define FW_FILE_PREFIX_E1               "bnx2x-e1-"
67 #define FW_FILE_PREFIX_E1H              "bnx2x-e1h-"
68
69 /* Time in jiffies before concluding the transmitter is hung */
70 #define TX_TIMEOUT              (5*HZ)
71
72 static char version[] __devinitdata =
73         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
74         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
76 MODULE_AUTHOR("Eliezer Tamir");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
80
81 static int multi_mode = 1;
82 module_param(multi_mode, int, 0);
83 MODULE_PARM_DESC(multi_mode, " Use per-CPU queues");
84
85 static int disable_tpa;
86 module_param(disable_tpa, int, 0);
87 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
88
89 static int int_mode;
90 module_param(int_mode, int, 0);
91 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
92
93 static int poll;
94 module_param(poll, int, 0);
95 MODULE_PARM_DESC(poll, " Use polling (for debug)");
96
97 static int mrrs = -1;
98 module_param(mrrs, int, 0);
99 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
100
101 static int debug;
102 module_param(debug, int, 0);
103 MODULE_PARM_DESC(debug, " Default debug msglevel");
104
105 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
106
107 static struct workqueue_struct *bnx2x_wq;
108
109 enum bnx2x_board_type {
110         BCM57710 = 0,
111         BCM57711 = 1,
112         BCM57711E = 2,
113 };
114
115 /* indexed by board_type, above */
116 static struct {
117         char *name;
118 } board_info[] __devinitdata = {
119         { "Broadcom NetXtreme II BCM57710 XGb" },
120         { "Broadcom NetXtreme II BCM57711 XGb" },
121         { "Broadcom NetXtreme II BCM57711E XGb" }
122 };
123
124
125 static const struct pci_device_id bnx2x_pci_tbl[] = {
126         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
127                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
128         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
129                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
130         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
131                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
132         { 0 }
133 };
134
135 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
136
137 /****************************************************************************
138 * General service functions
139 ****************************************************************************/
140
141 /* used only at init
142  * locking is done by mcp
143  */
144 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
145 {
146         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
147         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
148         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
149                                PCICFG_VENDOR_ID_OFFSET);
150 }
151
152 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
153 {
154         u32 val;
155
156         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
157         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
158         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
159                                PCICFG_VENDOR_ID_OFFSET);
160
161         return val;
162 }
163
164 static const u32 dmae_reg_go_c[] = {
165         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
166         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
167         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
168         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
169 };
170
171 /* copy command into DMAE command memory and set DMAE command go */
172 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
173                             int idx)
174 {
175         u32 cmd_offset;
176         int i;
177
178         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
179         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
180                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
181
182                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
183                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
184         }
185         REG_WR(bp, dmae_reg_go_c[idx], 1);
186 }
187
188 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
189                       u32 len32)
190 {
191         struct dmae_command *dmae = &bp->init_dmae;
192         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
193         int cnt = 200;
194
195         if (!bp->dmae_ready) {
196                 u32 *data = bnx2x_sp(bp, wb_data[0]);
197
198                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
199                    "  using indirect\n", dst_addr, len32);
200                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
201                 return;
202         }
203
204         mutex_lock(&bp->dmae_mutex);
205
206         memset(dmae, 0, sizeof(struct dmae_command));
207
208         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
209                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
210                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
211 #ifdef __BIG_ENDIAN
212                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
213 #else
214                         DMAE_CMD_ENDIANITY_DW_SWAP |
215 #endif
216                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
217                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
218         dmae->src_addr_lo = U64_LO(dma_addr);
219         dmae->src_addr_hi = U64_HI(dma_addr);
220         dmae->dst_addr_lo = dst_addr >> 2;
221         dmae->dst_addr_hi = 0;
222         dmae->len = len32;
223         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
224         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
225         dmae->comp_val = DMAE_COMP_VAL;
226
227         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
228            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
229                     "dst_addr [%x:%08x (%08x)]\n"
230            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
231            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
232            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
233            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
234         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
235            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
236            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
237
238         *wb_comp = 0;
239
240         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
241
242         udelay(5);
243
244         while (*wb_comp != DMAE_COMP_VAL) {
245                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
246
247                 if (!cnt) {
248                         BNX2X_ERR("DMAE timeout!\n");
249                         break;
250                 }
251                 cnt--;
252                 /* adjust delay for emulation/FPGA */
253                 if (CHIP_REV_IS_SLOW(bp))
254                         msleep(100);
255                 else
256                         udelay(5);
257         }
258
259         mutex_unlock(&bp->dmae_mutex);
260 }
261
262 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
263 {
264         struct dmae_command *dmae = &bp->init_dmae;
265         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
266         int cnt = 200;
267
268         if (!bp->dmae_ready) {
269                 u32 *data = bnx2x_sp(bp, wb_data[0]);
270                 int i;
271
272                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
273                    "  using indirect\n", src_addr, len32);
274                 for (i = 0; i < len32; i++)
275                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
276                 return;
277         }
278
279         mutex_lock(&bp->dmae_mutex);
280
281         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
282         memset(dmae, 0, sizeof(struct dmae_command));
283
284         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
285                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
286                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
287 #ifdef __BIG_ENDIAN
288                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
289 #else
290                         DMAE_CMD_ENDIANITY_DW_SWAP |
291 #endif
292                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
293                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
294         dmae->src_addr_lo = src_addr >> 2;
295         dmae->src_addr_hi = 0;
296         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
297         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
298         dmae->len = len32;
299         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
300         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
301         dmae->comp_val = DMAE_COMP_VAL;
302
303         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
304            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
305                     "dst_addr [%x:%08x (%08x)]\n"
306            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
307            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
308            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
309            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
310
311         *wb_comp = 0;
312
313         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
314
315         udelay(5);
316
317         while (*wb_comp != DMAE_COMP_VAL) {
318
319                 if (!cnt) {
320                         BNX2X_ERR("DMAE timeout!\n");
321                         break;
322                 }
323                 cnt--;
324                 /* adjust delay for emulation/FPGA */
325                 if (CHIP_REV_IS_SLOW(bp))
326                         msleep(100);
327                 else
328                         udelay(5);
329         }
330         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
331            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
332            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
333
334         mutex_unlock(&bp->dmae_mutex);
335 }
336
337 /* used only for slowpath so not inlined */
338 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
339 {
340         u32 wb_write[2];
341
342         wb_write[0] = val_hi;
343         wb_write[1] = val_lo;
344         REG_WR_DMAE(bp, reg, wb_write, 2);
345 }
346
347 #ifdef USE_WB_RD
348 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
349 {
350         u32 wb_data[2];
351
352         REG_RD_DMAE(bp, reg, wb_data, 2);
353
354         return HILO_U64(wb_data[0], wb_data[1]);
355 }
356 #endif
357
358 static int bnx2x_mc_assert(struct bnx2x *bp)
359 {
360         char last_idx;
361         int i, rc = 0;
362         u32 row0, row1, row2, row3;
363
364         /* XSTORM */
365         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
366                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
367         if (last_idx)
368                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
369
370         /* print the asserts */
371         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
372
373                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
374                               XSTORM_ASSERT_LIST_OFFSET(i));
375                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
376                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
377                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
378                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
379                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
380                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
381
382                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
383                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
384                                   " 0x%08x 0x%08x 0x%08x\n",
385                                   i, row3, row2, row1, row0);
386                         rc++;
387                 } else {
388                         break;
389                 }
390         }
391
392         /* TSTORM */
393         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
394                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
395         if (last_idx)
396                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
397
398         /* print the asserts */
399         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
400
401                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
402                               TSTORM_ASSERT_LIST_OFFSET(i));
403                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
404                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
405                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
406                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
407                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
408                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
409
410                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
411                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
412                                   " 0x%08x 0x%08x 0x%08x\n",
413                                   i, row3, row2, row1, row0);
414                         rc++;
415                 } else {
416                         break;
417                 }
418         }
419
420         /* CSTORM */
421         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
422                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
423         if (last_idx)
424                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
425
426         /* print the asserts */
427         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
428
429                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
430                               CSTORM_ASSERT_LIST_OFFSET(i));
431                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
432                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
433                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
434                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
435                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
436                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
437
438                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
439                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
440                                   " 0x%08x 0x%08x 0x%08x\n",
441                                   i, row3, row2, row1, row0);
442                         rc++;
443                 } else {
444                         break;
445                 }
446         }
447
448         /* USTORM */
449         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
450                            USTORM_ASSERT_LIST_INDEX_OFFSET);
451         if (last_idx)
452                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
453
454         /* print the asserts */
455         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
456
457                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
458                               USTORM_ASSERT_LIST_OFFSET(i));
459                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
460                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
461                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
462                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
463                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
464                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
465
466                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
467                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
468                                   " 0x%08x 0x%08x 0x%08x\n",
469                                   i, row3, row2, row1, row0);
470                         rc++;
471                 } else {
472                         break;
473                 }
474         }
475
476         return rc;
477 }
478
479 static void bnx2x_fw_dump(struct bnx2x *bp)
480 {
481         u32 mark, offset;
482         __be32 data[9];
483         int word;
484
485         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
486         mark = ((mark + 0x3) & ~0x3);
487         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
488
489         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
490                 for (word = 0; word < 8; word++)
491                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
492                                                   offset + 4*word));
493                 data[8] = 0x0;
494                 printk(KERN_CONT "%s", (char *)data);
495         }
496         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
497                 for (word = 0; word < 8; word++)
498                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
499                                                   offset + 4*word));
500                 data[8] = 0x0;
501                 printk(KERN_CONT "%s", (char *)data);
502         }
503         printk("\n" KERN_ERR PFX "end of fw dump\n");
504 }
505
506 static void bnx2x_panic_dump(struct bnx2x *bp)
507 {
508         int i;
509         u16 j, start, end;
510
511         bp->stats_state = STATS_STATE_DISABLED;
512         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
513
514         BNX2X_ERR("begin crash dump -----------------\n");
515
516         /* Indices */
517         /* Common */
518         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
519                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
520                   "  spq_prod_idx(%u)\n",
521                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
522                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
523
524         /* Rx */
525         for_each_rx_queue(bp, i) {
526                 struct bnx2x_fastpath *fp = &bp->fp[i];
527
528                 BNX2X_ERR("fp%d: rx_bd_prod(%x)  rx_bd_cons(%x)"
529                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
530                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
531                           i, fp->rx_bd_prod, fp->rx_bd_cons,
532                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
533                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
534                 BNX2X_ERR("      rx_sge_prod(%x)  last_max_sge(%x)"
535                           "  fp_u_idx(%x) *sb_u_idx(%x)\n",
536                           fp->rx_sge_prod, fp->last_max_sge,
537                           le16_to_cpu(fp->fp_u_idx),
538                           fp->status_blk->u_status_block.status_block_index);
539         }
540
541         /* Tx */
542         for_each_tx_queue(bp, i) {
543                 struct bnx2x_fastpath *fp = &bp->fp[i];
544                 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
545
546                 BNX2X_ERR("fp%d: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
547                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
548                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
549                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
550                 BNX2X_ERR("      fp_c_idx(%x)  *sb_c_idx(%x)"
551                           "  bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx),
552                           fp->status_blk->c_status_block.status_block_index,
553                           hw_prods->packets_prod, hw_prods->bds_prod);
554         }
555
556         /* Rings */
557         /* Rx */
558         for_each_rx_queue(bp, i) {
559                 struct bnx2x_fastpath *fp = &bp->fp[i];
560
561                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
562                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
563                 for (j = start; j != end; j = RX_BD(j + 1)) {
564                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
565                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
566
567                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
568                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
569                 }
570
571                 start = RX_SGE(fp->rx_sge_prod);
572                 end = RX_SGE(fp->last_max_sge);
573                 for (j = start; j != end; j = RX_SGE(j + 1)) {
574                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
575                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
576
577                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
578                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
579                 }
580
581                 start = RCQ_BD(fp->rx_comp_cons - 10);
582                 end = RCQ_BD(fp->rx_comp_cons + 503);
583                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
584                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
585
586                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
587                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
588                 }
589         }
590
591         /* Tx */
592         for_each_tx_queue(bp, i) {
593                 struct bnx2x_fastpath *fp = &bp->fp[i];
594
595                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
596                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
597                 for (j = start; j != end; j = TX_BD(j + 1)) {
598                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
599
600                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
601                                   i, j, sw_bd->skb, sw_bd->first_bd);
602                 }
603
604                 start = TX_BD(fp->tx_bd_cons - 10);
605                 end = TX_BD(fp->tx_bd_cons + 254);
606                 for (j = start; j != end; j = TX_BD(j + 1)) {
607                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
608
609                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
610                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
611                 }
612         }
613
614         bnx2x_fw_dump(bp);
615         bnx2x_mc_assert(bp);
616         BNX2X_ERR("end crash dump -----------------\n");
617 }
618
619 static void bnx2x_int_enable(struct bnx2x *bp)
620 {
621         int port = BP_PORT(bp);
622         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
623         u32 val = REG_RD(bp, addr);
624         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
625         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
626
627         if (msix) {
628                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
629                          HC_CONFIG_0_REG_INT_LINE_EN_0);
630                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
631                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
632         } else if (msi) {
633                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
634                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
635                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
636                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
637         } else {
638                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
639                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
640                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
641                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
642
643                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
644                    val, port, addr);
645
646                 REG_WR(bp, addr, val);
647
648                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
649         }
650
651         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
652            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
653
654         REG_WR(bp, addr, val);
655         /*
656          * Ensure that HC_CONFIG is written before leading/trailing edge config
657          */
658         mmiowb();
659         barrier();
660
661         if (CHIP_IS_E1H(bp)) {
662                 /* init leading/trailing edge */
663                 if (IS_E1HMF(bp)) {
664                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
665                         if (bp->port.pmf)
666                                 /* enable nig and gpio3 attention */
667                                 val |= 0x1100;
668                 } else
669                         val = 0xffff;
670
671                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
672                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
673         }
674
675         /* Make sure that interrupts are indeed enabled from here on */
676         mmiowb();
677 }
678
679 static void bnx2x_int_disable(struct bnx2x *bp)
680 {
681         int port = BP_PORT(bp);
682         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
683         u32 val = REG_RD(bp, addr);
684
685         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
686                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
687                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
688                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
689
690         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
691            val, port, addr);
692
693         /* flush all outstanding writes */
694         mmiowb();
695
696         REG_WR(bp, addr, val);
697         if (REG_RD(bp, addr) != val)
698                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
699
700 }
701
702 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
703 {
704         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
705         int i, offset;
706
707         /* disable interrupt handling */
708         atomic_inc(&bp->intr_sem);
709         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
710
711         if (disable_hw)
712                 /* prevent the HW from sending interrupts */
713                 bnx2x_int_disable(bp);
714
715         /* make sure all ISRs are done */
716         if (msix) {
717                 synchronize_irq(bp->msix_table[0].vector);
718                 offset = 1;
719                 for_each_queue(bp, i)
720                         synchronize_irq(bp->msix_table[i + offset].vector);
721         } else
722                 synchronize_irq(bp->pdev->irq);
723
724         /* make sure sp_task is not running */
725         cancel_delayed_work(&bp->sp_task);
726         flush_workqueue(bnx2x_wq);
727 }
728
729 /* fast path */
730
731 /*
732  * General service functions
733  */
734
735 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
736                                 u8 storm, u16 index, u8 op, u8 update)
737 {
738         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
739                        COMMAND_REG_INT_ACK);
740         struct igu_ack_register igu_ack;
741
742         igu_ack.status_block_index = index;
743         igu_ack.sb_id_and_flags =
744                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
745                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
746                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
747                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
748
749         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
750            (*(u32 *)&igu_ack), hc_addr);
751         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
752
753         /* Make sure that ACK is written */
754         mmiowb();
755         barrier();
756 }
757
758 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
759 {
760         struct host_status_block *fpsb = fp->status_blk;
761         u16 rc = 0;
762
763         barrier(); /* status block is written to by the chip */
764         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
765                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
766                 rc |= 1;
767         }
768         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
769                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
770                 rc |= 2;
771         }
772         return rc;
773 }
774
775 static u16 bnx2x_ack_int(struct bnx2x *bp)
776 {
777         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
778                        COMMAND_REG_SIMD_MASK);
779         u32 result = REG_RD(bp, hc_addr);
780
781         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
782            result, hc_addr);
783
784         return result;
785 }
786
787
788 /*
789  * fast path service functions
790  */
791
792 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
793 {
794         u16 tx_cons_sb;
795
796         /* Tell compiler that status block fields can change */
797         barrier();
798         tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
799         return (fp->tx_pkt_cons != tx_cons_sb);
800 }
801
802 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
803 {
804         /* Tell compiler that consumer and producer can change */
805         barrier();
806         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
807 }
808
809 /* free skb in the packet ring at pos idx
810  * return idx of last bd freed
811  */
812 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
813                              u16 idx)
814 {
815         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
816         struct eth_tx_bd *tx_bd;
817         struct sk_buff *skb = tx_buf->skb;
818         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
819         int nbd;
820
821         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
822            idx, tx_buf, skb);
823
824         /* unmap first bd */
825         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
826         tx_bd = &fp->tx_desc_ring[bd_idx];
827         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
828                          BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
829
830         nbd = le16_to_cpu(tx_bd->nbd) - 1;
831         new_cons = nbd + tx_buf->first_bd;
832 #ifdef BNX2X_STOP_ON_ERROR
833         if (nbd > (MAX_SKB_FRAGS + 2)) {
834                 BNX2X_ERR("BAD nbd!\n");
835                 bnx2x_panic();
836         }
837 #endif
838
839         /* Skip a parse bd and the TSO split header bd
840            since they have no mapping */
841         if (nbd)
842                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
843
844         if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
845                                            ETH_TX_BD_FLAGS_TCP_CSUM |
846                                            ETH_TX_BD_FLAGS_SW_LSO)) {
847                 if (--nbd)
848                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
849                 tx_bd = &fp->tx_desc_ring[bd_idx];
850                 /* is this a TSO split header bd? */
851                 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
852                         if (--nbd)
853                                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
854                 }
855         }
856
857         /* now free frags */
858         while (nbd > 0) {
859
860                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
861                 tx_bd = &fp->tx_desc_ring[bd_idx];
862                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
863                                BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
864                 if (--nbd)
865                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
866         }
867
868         /* release skb */
869         WARN_ON(!skb);
870         dev_kfree_skb(skb);
871         tx_buf->first_bd = 0;
872         tx_buf->skb = NULL;
873
874         return new_cons;
875 }
876
877 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
878 {
879         s16 used;
880         u16 prod;
881         u16 cons;
882
883         barrier(); /* Tell compiler that prod and cons can change */
884         prod = fp->tx_bd_prod;
885         cons = fp->tx_bd_cons;
886
887         /* NUM_TX_RINGS = number of "next-page" entries
888            It will be used as a threshold */
889         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
890
891 #ifdef BNX2X_STOP_ON_ERROR
892         WARN_ON(used < 0);
893         WARN_ON(used > fp->bp->tx_ring_size);
894         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
895 #endif
896
897         return (s16)(fp->bp->tx_ring_size) - used;
898 }
899
900 static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
901 {
902         struct bnx2x *bp = fp->bp;
903         struct netdev_queue *txq;
904         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
905         int done = 0;
906
907 #ifdef BNX2X_STOP_ON_ERROR
908         if (unlikely(bp->panic))
909                 return;
910 #endif
911
912         txq = netdev_get_tx_queue(bp->dev, fp->index);
913         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
914         sw_cons = fp->tx_pkt_cons;
915
916         while (sw_cons != hw_cons) {
917                 u16 pkt_cons;
918
919                 pkt_cons = TX_BD(sw_cons);
920
921                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
922
923                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
924                    hw_cons, sw_cons, pkt_cons);
925
926 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
927                         rmb();
928                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
929                 }
930 */
931                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
932                 sw_cons++;
933                 done++;
934         }
935
936         fp->tx_pkt_cons = sw_cons;
937         fp->tx_bd_cons = bd_cons;
938
939         /* TBD need a thresh? */
940         if (unlikely(netif_tx_queue_stopped(txq))) {
941
942                 __netif_tx_lock(txq, smp_processor_id());
943
944                 /* Need to make the tx_bd_cons update visible to start_xmit()
945                  * before checking for netif_tx_queue_stopped().  Without the
946                  * memory barrier, there is a small possibility that
947                  * start_xmit() will miss it and cause the queue to be stopped
948                  * forever.
949                  */
950                 smp_mb();
951
952                 if ((netif_tx_queue_stopped(txq)) &&
953                     (bp->state == BNX2X_STATE_OPEN) &&
954                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
955                         netif_tx_wake_queue(txq);
956
957                 __netif_tx_unlock(txq);
958         }
959 }
960
961
962 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
963                            union eth_rx_cqe *rr_cqe)
964 {
965         struct bnx2x *bp = fp->bp;
966         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
967         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
968
969         DP(BNX2X_MSG_SP,
970            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
971            fp->index, cid, command, bp->state,
972            rr_cqe->ramrod_cqe.ramrod_type);
973
974         bp->spq_left++;
975
976         if (fp->index) {
977                 switch (command | fp->state) {
978                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
979                                                 BNX2X_FP_STATE_OPENING):
980                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
981                            cid);
982                         fp->state = BNX2X_FP_STATE_OPEN;
983                         break;
984
985                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
986                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
987                            cid);
988                         fp->state = BNX2X_FP_STATE_HALTED;
989                         break;
990
991                 default:
992                         BNX2X_ERR("unexpected MC reply (%d)  "
993                                   "fp->state is %x\n", command, fp->state);
994                         break;
995                 }
996                 mb(); /* force bnx2x_wait_ramrod() to see the change */
997                 return;
998         }
999
1000         switch (command | bp->state) {
1001         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1002                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1003                 bp->state = BNX2X_STATE_OPEN;
1004                 break;
1005
1006         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1007                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1008                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1009                 fp->state = BNX2X_FP_STATE_HALTED;
1010                 break;
1011
1012         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1013                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1014                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1015                 break;
1016
1017
1018         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1019         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1020                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1021                 bp->set_mac_pending = 0;
1022                 break;
1023
1024         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1025                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1026                 break;
1027
1028         default:
1029                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
1030                           command, bp->state);
1031                 break;
1032         }
1033         mb(); /* force bnx2x_wait_ramrod() to see the change */
1034 }
1035
1036 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1037                                      struct bnx2x_fastpath *fp, u16 index)
1038 {
1039         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1040         struct page *page = sw_buf->page;
1041         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1042
1043         /* Skip "next page" elements */
1044         if (!page)
1045                 return;
1046
1047         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1048                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1049         __free_pages(page, PAGES_PER_SGE_SHIFT);
1050
1051         sw_buf->page = NULL;
1052         sge->addr_hi = 0;
1053         sge->addr_lo = 0;
1054 }
1055
1056 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1057                                            struct bnx2x_fastpath *fp, int last)
1058 {
1059         int i;
1060
1061         for (i = 0; i < last; i++)
1062                 bnx2x_free_rx_sge(bp, fp, i);
1063 }
1064
1065 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1066                                      struct bnx2x_fastpath *fp, u16 index)
1067 {
1068         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1069         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1070         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1071         dma_addr_t mapping;
1072
1073         if (unlikely(page == NULL))
1074                 return -ENOMEM;
1075
1076         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1077                                PCI_DMA_FROMDEVICE);
1078         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1079                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1080                 return -ENOMEM;
1081         }
1082
1083         sw_buf->page = page;
1084         pci_unmap_addr_set(sw_buf, mapping, mapping);
1085
1086         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1087         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1088
1089         return 0;
1090 }
1091
1092 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1093                                      struct bnx2x_fastpath *fp, u16 index)
1094 {
1095         struct sk_buff *skb;
1096         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1097         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1098         dma_addr_t mapping;
1099
1100         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1101         if (unlikely(skb == NULL))
1102                 return -ENOMEM;
1103
1104         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1105                                  PCI_DMA_FROMDEVICE);
1106         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1107                 dev_kfree_skb(skb);
1108                 return -ENOMEM;
1109         }
1110
1111         rx_buf->skb = skb;
1112         pci_unmap_addr_set(rx_buf, mapping, mapping);
1113
1114         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1115         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1116
1117         return 0;
1118 }
1119
1120 /* note that we are not allocating a new skb,
1121  * we are just moving one from cons to prod
1122  * we are not creating a new mapping,
1123  * so there is no need to check for dma_mapping_error().
1124  */
1125 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1126                                struct sk_buff *skb, u16 cons, u16 prod)
1127 {
1128         struct bnx2x *bp = fp->bp;
1129         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1130         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1131         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1132         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1133
1134         pci_dma_sync_single_for_device(bp->pdev,
1135                                        pci_unmap_addr(cons_rx_buf, mapping),
1136                                        RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1137
1138         prod_rx_buf->skb = cons_rx_buf->skb;
1139         pci_unmap_addr_set(prod_rx_buf, mapping,
1140                            pci_unmap_addr(cons_rx_buf, mapping));
1141         *prod_bd = *cons_bd;
1142 }
1143
1144 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1145                                              u16 idx)
1146 {
1147         u16 last_max = fp->last_max_sge;
1148
1149         if (SUB_S16(idx, last_max) > 0)
1150                 fp->last_max_sge = idx;
1151 }
1152
1153 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1154 {
1155         int i, j;
1156
1157         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1158                 int idx = RX_SGE_CNT * i - 1;
1159
1160                 for (j = 0; j < 2; j++) {
1161                         SGE_MASK_CLEAR_BIT(fp, idx);
1162                         idx--;
1163                 }
1164         }
1165 }
1166
1167 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1168                                   struct eth_fast_path_rx_cqe *fp_cqe)
1169 {
1170         struct bnx2x *bp = fp->bp;
1171         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1172                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1173                       SGE_PAGE_SHIFT;
1174         u16 last_max, last_elem, first_elem;
1175         u16 delta = 0;
1176         u16 i;
1177
1178         if (!sge_len)
1179                 return;
1180
1181         /* First mark all used pages */
1182         for (i = 0; i < sge_len; i++)
1183                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1184
1185         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1186            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1187
1188         /* Here we assume that the last SGE index is the biggest */
1189         prefetch((void *)(fp->sge_mask));
1190         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1191
1192         last_max = RX_SGE(fp->last_max_sge);
1193         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1194         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1195
1196         /* If ring is not full */
1197         if (last_elem + 1 != first_elem)
1198                 last_elem++;
1199
1200         /* Now update the prod */
1201         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1202                 if (likely(fp->sge_mask[i]))
1203                         break;
1204
1205                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1206                 delta += RX_SGE_MASK_ELEM_SZ;
1207         }
1208
1209         if (delta > 0) {
1210                 fp->rx_sge_prod += delta;
1211                 /* clear page-end entries */
1212                 bnx2x_clear_sge_mask_next_elems(fp);
1213         }
1214
1215         DP(NETIF_MSG_RX_STATUS,
1216            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1217            fp->last_max_sge, fp->rx_sge_prod);
1218 }
1219
1220 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1221 {
1222         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1223         memset(fp->sge_mask, 0xff,
1224                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1225
1226         /* Clear the two last indices in the page to 1:
1227            these are the indices that correspond to the "next" element,
1228            hence will never be indicated and should be removed from
1229            the calculations. */
1230         bnx2x_clear_sge_mask_next_elems(fp);
1231 }
1232
1233 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1234                             struct sk_buff *skb, u16 cons, u16 prod)
1235 {
1236         struct bnx2x *bp = fp->bp;
1237         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1238         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1239         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1240         dma_addr_t mapping;
1241
1242         /* move empty skb from pool to prod and map it */
1243         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1244         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1245                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1246         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1247
1248         /* move partial skb from cons to pool (don't unmap yet) */
1249         fp->tpa_pool[queue] = *cons_rx_buf;
1250
1251         /* mark bin state as start - print error if current state != stop */
1252         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1253                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1254
1255         fp->tpa_state[queue] = BNX2X_TPA_START;
1256
1257         /* point prod_bd to new skb */
1258         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1259         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1260
1261 #ifdef BNX2X_STOP_ON_ERROR
1262         fp->tpa_queue_used |= (1 << queue);
1263 #ifdef __powerpc64__
1264         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1265 #else
1266         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1267 #endif
1268            fp->tpa_queue_used);
1269 #endif
1270 }
1271
1272 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1273                                struct sk_buff *skb,
1274                                struct eth_fast_path_rx_cqe *fp_cqe,
1275                                u16 cqe_idx)
1276 {
1277         struct sw_rx_page *rx_pg, old_rx_pg;
1278         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1279         u32 i, frag_len, frag_size, pages;
1280         int err;
1281         int j;
1282
1283         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1284         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1285
1286         /* This is needed in order to enable forwarding support */
1287         if (frag_size)
1288                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1289                                                max(frag_size, (u32)len_on_bd));
1290
1291 #ifdef BNX2X_STOP_ON_ERROR
1292         if (pages >
1293             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1294                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1295                           pages, cqe_idx);
1296                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1297                           fp_cqe->pkt_len, len_on_bd);
1298                 bnx2x_panic();
1299                 return -EINVAL;
1300         }
1301 #endif
1302
1303         /* Run through the SGL and compose the fragmented skb */
1304         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1305                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1306
1307                 /* FW gives the indices of the SGE as if the ring is an array
1308                    (meaning that "next" element will consume 2 indices) */
1309                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1310                 rx_pg = &fp->rx_page_ring[sge_idx];
1311                 old_rx_pg = *rx_pg;
1312
1313                 /* If we fail to allocate a substitute page, we simply stop
1314                    where we are and drop the whole packet */
1315                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1316                 if (unlikely(err)) {
1317                         fp->eth_q_stats.rx_skb_alloc_failed++;
1318                         return err;
1319                 }
1320
1321                 /* Unmap the page as we r going to pass it to the stack */
1322                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1323                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1324
1325                 /* Add one frag and update the appropriate fields in the skb */
1326                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1327
1328                 skb->data_len += frag_len;
1329                 skb->truesize += frag_len;
1330                 skb->len += frag_len;
1331
1332                 frag_size -= frag_len;
1333         }
1334
1335         return 0;
1336 }
1337
1338 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1339                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1340                            u16 cqe_idx)
1341 {
1342         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1343         struct sk_buff *skb = rx_buf->skb;
1344         /* alloc new skb */
1345         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1346
1347         /* Unmap skb in the pool anyway, as we are going to change
1348            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1349            fails. */
1350         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1351                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1352
1353         if (likely(new_skb)) {
1354                 /* fix ip xsum and give it to the stack */
1355                 /* (no need to map the new skb) */
1356 #ifdef BCM_VLAN
1357                 int is_vlan_cqe =
1358                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1359                          PARSING_FLAGS_VLAN);
1360                 int is_not_hwaccel_vlan_cqe =
1361                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1362 #endif
1363
1364                 prefetch(skb);
1365                 prefetch(((char *)(skb)) + 128);
1366
1367 #ifdef BNX2X_STOP_ON_ERROR
1368                 if (pad + len > bp->rx_buf_size) {
1369                         BNX2X_ERR("skb_put is about to fail...  "
1370                                   "pad %d  len %d  rx_buf_size %d\n",
1371                                   pad, len, bp->rx_buf_size);
1372                         bnx2x_panic();
1373                         return;
1374                 }
1375 #endif
1376
1377                 skb_reserve(skb, pad);
1378                 skb_put(skb, len);
1379
1380                 skb->protocol = eth_type_trans(skb, bp->dev);
1381                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1382
1383                 {
1384                         struct iphdr *iph;
1385
1386                         iph = (struct iphdr *)skb->data;
1387 #ifdef BCM_VLAN
1388                         /* If there is no Rx VLAN offloading -
1389                            take VLAN tag into an account */
1390                         if (unlikely(is_not_hwaccel_vlan_cqe))
1391                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1392 #endif
1393                         iph->check = 0;
1394                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1395                 }
1396
1397                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1398                                          &cqe->fast_path_cqe, cqe_idx)) {
1399 #ifdef BCM_VLAN
1400                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1401                             (!is_not_hwaccel_vlan_cqe))
1402                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1403                                                 le16_to_cpu(cqe->fast_path_cqe.
1404                                                             vlan_tag));
1405                         else
1406 #endif
1407                                 netif_receive_skb(skb);
1408                 } else {
1409                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1410                            " - dropping packet!\n");
1411                         dev_kfree_skb(skb);
1412                 }
1413
1414
1415                 /* put new skb in bin */
1416                 fp->tpa_pool[queue].skb = new_skb;
1417
1418         } else {
1419                 /* else drop the packet and keep the buffer in the bin */
1420                 DP(NETIF_MSG_RX_STATUS,
1421                    "Failed to allocate new skb - dropping packet!\n");
1422                 fp->eth_q_stats.rx_skb_alloc_failed++;
1423         }
1424
1425         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1426 }
1427
1428 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1429                                         struct bnx2x_fastpath *fp,
1430                                         u16 bd_prod, u16 rx_comp_prod,
1431                                         u16 rx_sge_prod)
1432 {
1433         struct ustorm_eth_rx_producers rx_prods = {0};
1434         int i;
1435
1436         /* Update producers */
1437         rx_prods.bd_prod = bd_prod;
1438         rx_prods.cqe_prod = rx_comp_prod;
1439         rx_prods.sge_prod = rx_sge_prod;
1440
1441         /*
1442          * Make sure that the BD and SGE data is updated before updating the
1443          * producers since FW might read the BD/SGE right after the producer
1444          * is updated.
1445          * This is only applicable for weak-ordered memory model archs such
1446          * as IA-64. The following barrier is also mandatory since FW will
1447          * assumes BDs must have buffers.
1448          */
1449         wmb();
1450
1451         for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1452                 REG_WR(bp, BAR_USTRORM_INTMEM +
1453                        USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1454                        ((u32 *)&rx_prods)[i]);
1455
1456         mmiowb(); /* keep prod updates ordered */
1457
1458         DP(NETIF_MSG_RX_STATUS,
1459            "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
1460            fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1461 }
1462
1463 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1464 {
1465         struct bnx2x *bp = fp->bp;
1466         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1467         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1468         int rx_pkt = 0;
1469
1470 #ifdef BNX2X_STOP_ON_ERROR
1471         if (unlikely(bp->panic))
1472                 return 0;
1473 #endif
1474
1475         /* CQ "next element" is of the size of the regular element,
1476            that's why it's ok here */
1477         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1478         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1479                 hw_comp_cons++;
1480
1481         bd_cons = fp->rx_bd_cons;
1482         bd_prod = fp->rx_bd_prod;
1483         bd_prod_fw = bd_prod;
1484         sw_comp_cons = fp->rx_comp_cons;
1485         sw_comp_prod = fp->rx_comp_prod;
1486
1487         /* Memory barrier necessary as speculative reads of the rx
1488          * buffer can be ahead of the index in the status block
1489          */
1490         rmb();
1491
1492         DP(NETIF_MSG_RX_STATUS,
1493            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1494            fp->index, hw_comp_cons, sw_comp_cons);
1495
1496         while (sw_comp_cons != hw_comp_cons) {
1497                 struct sw_rx_bd *rx_buf = NULL;
1498                 struct sk_buff *skb;
1499                 union eth_rx_cqe *cqe;
1500                 u8 cqe_fp_flags;
1501                 u16 len, pad;
1502
1503                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1504                 bd_prod = RX_BD(bd_prod);
1505                 bd_cons = RX_BD(bd_cons);
1506
1507                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1508                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1509
1510                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1511                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1512                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1513                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1514                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1515                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1516
1517                 /* is this a slowpath msg? */
1518                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1519                         bnx2x_sp_event(fp, cqe);
1520                         goto next_cqe;
1521
1522                 /* this is an rx packet */
1523                 } else {
1524                         rx_buf = &fp->rx_buf_ring[bd_cons];
1525                         skb = rx_buf->skb;
1526                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1527                         pad = cqe->fast_path_cqe.placement_offset;
1528
1529                         /* If CQE is marked both TPA_START and TPA_END
1530                            it is a non-TPA CQE */
1531                         if ((!fp->disable_tpa) &&
1532                             (TPA_TYPE(cqe_fp_flags) !=
1533                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1534                                 u16 queue = cqe->fast_path_cqe.queue_index;
1535
1536                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1537                                         DP(NETIF_MSG_RX_STATUS,
1538                                            "calling tpa_start on queue %d\n",
1539                                            queue);
1540
1541                                         bnx2x_tpa_start(fp, queue, skb,
1542                                                         bd_cons, bd_prod);
1543                                         goto next_rx;
1544                                 }
1545
1546                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1547                                         DP(NETIF_MSG_RX_STATUS,
1548                                            "calling tpa_stop on queue %d\n",
1549                                            queue);
1550
1551                                         if (!BNX2X_RX_SUM_FIX(cqe))
1552                                                 BNX2X_ERR("STOP on none TCP "
1553                                                           "data\n");
1554
1555                                         /* This is a size of the linear data
1556                                            on this skb */
1557                                         len = le16_to_cpu(cqe->fast_path_cqe.
1558                                                                 len_on_bd);
1559                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1560                                                     len, cqe, comp_ring_cons);
1561 #ifdef BNX2X_STOP_ON_ERROR
1562                                         if (bp->panic)
1563                                                 return 0;
1564 #endif
1565
1566                                         bnx2x_update_sge_prod(fp,
1567                                                         &cqe->fast_path_cqe);
1568                                         goto next_cqe;
1569                                 }
1570                         }
1571
1572                         pci_dma_sync_single_for_device(bp->pdev,
1573                                         pci_unmap_addr(rx_buf, mapping),
1574                                                        pad + RX_COPY_THRESH,
1575                                                        PCI_DMA_FROMDEVICE);
1576                         prefetch(skb);
1577                         prefetch(((char *)(skb)) + 128);
1578
1579                         /* is this an error packet? */
1580                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1581                                 DP(NETIF_MSG_RX_ERR,
1582                                    "ERROR  flags %x  rx packet %u\n",
1583                                    cqe_fp_flags, sw_comp_cons);
1584                                 fp->eth_q_stats.rx_err_discard_pkt++;
1585                                 goto reuse_rx;
1586                         }
1587
1588                         /* Since we don't have a jumbo ring
1589                          * copy small packets if mtu > 1500
1590                          */
1591                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1592                             (len <= RX_COPY_THRESH)) {
1593                                 struct sk_buff *new_skb;
1594
1595                                 new_skb = netdev_alloc_skb(bp->dev,
1596                                                            len + pad);
1597                                 if (new_skb == NULL) {
1598                                         DP(NETIF_MSG_RX_ERR,
1599                                            "ERROR  packet dropped "
1600                                            "because of alloc failure\n");
1601                                         fp->eth_q_stats.rx_skb_alloc_failed++;
1602                                         goto reuse_rx;
1603                                 }
1604
1605                                 /* aligned copy */
1606                                 skb_copy_from_linear_data_offset(skb, pad,
1607                                                     new_skb->data + pad, len);
1608                                 skb_reserve(new_skb, pad);
1609                                 skb_put(new_skb, len);
1610
1611                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1612
1613                                 skb = new_skb;
1614
1615                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1616                                 pci_unmap_single(bp->pdev,
1617                                         pci_unmap_addr(rx_buf, mapping),
1618                                                  bp->rx_buf_size,
1619                                                  PCI_DMA_FROMDEVICE);
1620                                 skb_reserve(skb, pad);
1621                                 skb_put(skb, len);
1622
1623                         } else {
1624                                 DP(NETIF_MSG_RX_ERR,
1625                                    "ERROR  packet dropped because "
1626                                    "of alloc failure\n");
1627                                 fp->eth_q_stats.rx_skb_alloc_failed++;
1628 reuse_rx:
1629                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1630                                 goto next_rx;
1631                         }
1632
1633                         skb->protocol = eth_type_trans(skb, bp->dev);
1634
1635                         skb->ip_summed = CHECKSUM_NONE;
1636                         if (bp->rx_csum) {
1637                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1638                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1639                                 else
1640                                         fp->eth_q_stats.hw_csum_err++;
1641                         }
1642                 }
1643
1644                 skb_record_rx_queue(skb, fp->index);
1645 #ifdef BCM_VLAN
1646                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1647                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1648                      PARSING_FLAGS_VLAN))
1649                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1650                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1651                 else
1652 #endif
1653                         netif_receive_skb(skb);
1654
1655
1656 next_rx:
1657                 rx_buf->skb = NULL;
1658
1659                 bd_cons = NEXT_RX_IDX(bd_cons);
1660                 bd_prod = NEXT_RX_IDX(bd_prod);
1661                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1662                 rx_pkt++;
1663 next_cqe:
1664                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1665                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1666
1667                 if (rx_pkt == budget)
1668                         break;
1669         } /* while */
1670
1671         fp->rx_bd_cons = bd_cons;
1672         fp->rx_bd_prod = bd_prod_fw;
1673         fp->rx_comp_cons = sw_comp_cons;
1674         fp->rx_comp_prod = sw_comp_prod;
1675
1676         /* Update producers */
1677         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1678                              fp->rx_sge_prod);
1679
1680         fp->rx_pkt += rx_pkt;
1681         fp->rx_calls++;
1682
1683         return rx_pkt;
1684 }
1685
1686 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1687 {
1688         struct bnx2x_fastpath *fp = fp_cookie;
1689         struct bnx2x *bp = fp->bp;
1690         int index = fp->index;
1691
1692         /* Return here if interrupt is disabled */
1693         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1694                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1695                 return IRQ_HANDLED;
1696         }
1697
1698         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1699            index, fp->sb_id);
1700         bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1701
1702 #ifdef BNX2X_STOP_ON_ERROR
1703         if (unlikely(bp->panic))
1704                 return IRQ_HANDLED;
1705 #endif
1706
1707         prefetch(fp->rx_cons_sb);
1708         prefetch(fp->tx_cons_sb);
1709         prefetch(&fp->status_blk->c_status_block.status_block_index);
1710         prefetch(&fp->status_blk->u_status_block.status_block_index);
1711
1712         napi_schedule(&bnx2x_fp(bp, index, napi));
1713
1714         return IRQ_HANDLED;
1715 }
1716
1717 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1718 {
1719         struct bnx2x *bp = netdev_priv(dev_instance);
1720         u16 status = bnx2x_ack_int(bp);
1721         u16 mask;
1722
1723         /* Return here if interrupt is shared and it's not for us */
1724         if (unlikely(status == 0)) {
1725                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1726                 return IRQ_NONE;
1727         }
1728         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1729
1730         /* Return here if interrupt is disabled */
1731         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1732                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1733                 return IRQ_HANDLED;
1734         }
1735
1736 #ifdef BNX2X_STOP_ON_ERROR
1737         if (unlikely(bp->panic))
1738                 return IRQ_HANDLED;
1739 #endif
1740
1741         mask = 0x2 << bp->fp[0].sb_id;
1742         if (status & mask) {
1743                 struct bnx2x_fastpath *fp = &bp->fp[0];
1744
1745                 prefetch(fp->rx_cons_sb);
1746                 prefetch(fp->tx_cons_sb);
1747                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1748                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1749
1750                 napi_schedule(&bnx2x_fp(bp, 0, napi));
1751
1752                 status &= ~mask;
1753         }
1754
1755
1756         if (unlikely(status & 0x1)) {
1757                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1758
1759                 status &= ~0x1;
1760                 if (!status)
1761                         return IRQ_HANDLED;
1762         }
1763
1764         if (status)
1765                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1766                    status);
1767
1768         return IRQ_HANDLED;
1769 }
1770
1771 /* end of fast path */
1772
1773 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1774
1775 /* Link */
1776
1777 /*
1778  * General service functions
1779  */
1780
1781 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1782 {
1783         u32 lock_status;
1784         u32 resource_bit = (1 << resource);
1785         int func = BP_FUNC(bp);
1786         u32 hw_lock_control_reg;
1787         int cnt;
1788
1789         /* Validating that the resource is within range */
1790         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1791                 DP(NETIF_MSG_HW,
1792                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1793                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1794                 return -EINVAL;
1795         }
1796
1797         if (func <= 5) {
1798                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1799         } else {
1800                 hw_lock_control_reg =
1801                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1802         }
1803
1804         /* Validating that the resource is not already taken */
1805         lock_status = REG_RD(bp, hw_lock_control_reg);
1806         if (lock_status & resource_bit) {
1807                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1808                    lock_status, resource_bit);
1809                 return -EEXIST;
1810         }
1811
1812         /* Try for 5 second every 5ms */
1813         for (cnt = 0; cnt < 1000; cnt++) {
1814                 /* Try to acquire the lock */
1815                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1816                 lock_status = REG_RD(bp, hw_lock_control_reg);
1817                 if (lock_status & resource_bit)
1818                         return 0;
1819
1820                 msleep(5);
1821         }
1822         DP(NETIF_MSG_HW, "Timeout\n");
1823         return -EAGAIN;
1824 }
1825
1826 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1827 {
1828         u32 lock_status;
1829         u32 resource_bit = (1 << resource);
1830         int func = BP_FUNC(bp);
1831         u32 hw_lock_control_reg;
1832
1833         /* Validating that the resource is within range */
1834         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1835                 DP(NETIF_MSG_HW,
1836                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1837                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1838                 return -EINVAL;
1839         }
1840
1841         if (func <= 5) {
1842                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1843         } else {
1844                 hw_lock_control_reg =
1845                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1846         }
1847
1848         /* Validating that the resource is currently taken */
1849         lock_status = REG_RD(bp, hw_lock_control_reg);
1850         if (!(lock_status & resource_bit)) {
1851                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1852                    lock_status, resource_bit);
1853                 return -EFAULT;
1854         }
1855
1856         REG_WR(bp, hw_lock_control_reg, resource_bit);
1857         return 0;
1858 }
1859
1860 /* HW Lock for shared dual port PHYs */
1861 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1862 {
1863         mutex_lock(&bp->port.phy_mutex);
1864
1865         if (bp->port.need_hw_lock)
1866                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1867 }
1868
1869 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1870 {
1871         if (bp->port.need_hw_lock)
1872                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1873
1874         mutex_unlock(&bp->port.phy_mutex);
1875 }
1876
1877 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1878 {
1879         /* The GPIO should be swapped if swap register is set and active */
1880         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1881                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1882         int gpio_shift = gpio_num +
1883                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1884         u32 gpio_mask = (1 << gpio_shift);
1885         u32 gpio_reg;
1886         int value;
1887
1888         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1889                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1890                 return -EINVAL;
1891         }
1892
1893         /* read GPIO value */
1894         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1895
1896         /* get the requested pin value */
1897         if ((gpio_reg & gpio_mask) == gpio_mask)
1898                 value = 1;
1899         else
1900                 value = 0;
1901
1902         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1903
1904         return value;
1905 }
1906
1907 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1908 {
1909         /* The GPIO should be swapped if swap register is set and active */
1910         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1911                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1912         int gpio_shift = gpio_num +
1913                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1914         u32 gpio_mask = (1 << gpio_shift);
1915         u32 gpio_reg;
1916
1917         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1918                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1919                 return -EINVAL;
1920         }
1921
1922         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1923         /* read GPIO and mask except the float bits */
1924         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1925
1926         switch (mode) {
1927         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1928                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1929                    gpio_num, gpio_shift);
1930                 /* clear FLOAT and set CLR */
1931                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1932                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1933                 break;
1934
1935         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1936                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1937                    gpio_num, gpio_shift);
1938                 /* clear FLOAT and set SET */
1939                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1940                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1941                 break;
1942
1943         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1944                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1945                    gpio_num, gpio_shift);
1946                 /* set FLOAT */
1947                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1948                 break;
1949
1950         default:
1951                 break;
1952         }
1953
1954         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1955         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1956
1957         return 0;
1958 }
1959
1960 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1961 {
1962         /* The GPIO should be swapped if swap register is set and active */
1963         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1964                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1965         int gpio_shift = gpio_num +
1966                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1967         u32 gpio_mask = (1 << gpio_shift);
1968         u32 gpio_reg;
1969
1970         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1971                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1972                 return -EINVAL;
1973         }
1974
1975         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1976         /* read GPIO int */
1977         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1978
1979         switch (mode) {
1980         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1981                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1982                                    "output low\n", gpio_num, gpio_shift);
1983                 /* clear SET and set CLR */
1984                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1985                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1986                 break;
1987
1988         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1989                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1990                                    "output high\n", gpio_num, gpio_shift);
1991                 /* clear CLR and set SET */
1992                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1993                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1994                 break;
1995
1996         default:
1997                 break;
1998         }
1999
2000         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2001         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2002
2003         return 0;
2004 }
2005
2006 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2007 {
2008         u32 spio_mask = (1 << spio_num);
2009         u32 spio_reg;
2010
2011         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2012             (spio_num > MISC_REGISTERS_SPIO_7)) {
2013                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2014                 return -EINVAL;
2015         }
2016
2017         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2018         /* read SPIO and mask except the float bits */
2019         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2020
2021         switch (mode) {
2022         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2023                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2024                 /* clear FLOAT and set CLR */
2025                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2026                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2027                 break;
2028
2029         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2030                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2031                 /* clear FLOAT and set SET */
2032                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2033                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2034                 break;
2035
2036         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2037                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2038                 /* set FLOAT */
2039                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2040                 break;
2041
2042         default:
2043                 break;
2044         }
2045
2046         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2047         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2048
2049         return 0;
2050 }
2051
2052 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2053 {
2054         switch (bp->link_vars.ieee_fc &
2055                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2056         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2057                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2058                                           ADVERTISED_Pause);
2059                 break;
2060
2061         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2062                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2063                                          ADVERTISED_Pause);
2064                 break;
2065
2066         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2067                 bp->port.advertising |= ADVERTISED_Asym_Pause;
2068                 break;
2069
2070         default:
2071                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2072                                           ADVERTISED_Pause);
2073                 break;
2074         }
2075 }
2076
2077 static void bnx2x_link_report(struct bnx2x *bp)
2078 {
2079         if (bp->link_vars.link_up) {
2080                 if (bp->state == BNX2X_STATE_OPEN)
2081                         netif_carrier_on(bp->dev);
2082                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2083
2084                 printk("%d Mbps ", bp->link_vars.line_speed);
2085
2086                 if (bp->link_vars.duplex == DUPLEX_FULL)
2087                         printk("full duplex");
2088                 else
2089                         printk("half duplex");
2090
2091                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2092                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2093                                 printk(", receive ");
2094                                 if (bp->link_vars.flow_ctrl &
2095                                     BNX2X_FLOW_CTRL_TX)
2096                                         printk("& transmit ");
2097                         } else {
2098                                 printk(", transmit ");
2099                         }
2100                         printk("flow control ON");
2101                 }
2102                 printk("\n");
2103
2104         } else { /* link_down */
2105                 netif_carrier_off(bp->dev);
2106                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2107         }
2108 }
2109
2110 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2111 {
2112         if (!BP_NOMCP(bp)) {
2113                 u8 rc;
2114
2115                 /* Initialize link parameters structure variables */
2116                 /* It is recommended to turn off RX FC for jumbo frames
2117                    for better performance */
2118                 if (IS_E1HMF(bp))
2119                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2120                 else if (bp->dev->mtu > 5000)
2121                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2122                 else
2123                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2124
2125                 bnx2x_acquire_phy_lock(bp);
2126
2127                 if (load_mode == LOAD_DIAG)
2128                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2129
2130                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2131
2132                 bnx2x_release_phy_lock(bp);
2133
2134                 bnx2x_calc_fc_adv(bp);
2135
2136                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2137                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2138                         bnx2x_link_report(bp);
2139                 }
2140
2141                 return rc;
2142         }
2143         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2144         return -EINVAL;
2145 }
2146
2147 static void bnx2x_link_set(struct bnx2x *bp)
2148 {
2149         if (!BP_NOMCP(bp)) {
2150                 bnx2x_acquire_phy_lock(bp);
2151                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2152                 bnx2x_release_phy_lock(bp);
2153
2154                 bnx2x_calc_fc_adv(bp);
2155         } else
2156                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2157 }
2158
2159 static void bnx2x__link_reset(struct bnx2x *bp)
2160 {
2161         if (!BP_NOMCP(bp)) {
2162                 bnx2x_acquire_phy_lock(bp);
2163                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2164                 bnx2x_release_phy_lock(bp);
2165         } else
2166                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2167 }
2168
2169 static u8 bnx2x_link_test(struct bnx2x *bp)
2170 {
2171         u8 rc;
2172
2173         bnx2x_acquire_phy_lock(bp);
2174         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2175         bnx2x_release_phy_lock(bp);
2176
2177         return rc;
2178 }
2179
2180 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2181 {
2182         u32 r_param = bp->link_vars.line_speed / 8;
2183         u32 fair_periodic_timeout_usec;
2184         u32 t_fair;
2185
2186         memset(&(bp->cmng.rs_vars), 0,
2187                sizeof(struct rate_shaping_vars_per_port));
2188         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2189
2190         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2191         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2192
2193         /* this is the threshold below which no timer arming will occur
2194            1.25 coefficient is for the threshold to be a little bigger
2195            than the real time, to compensate for timer in-accuracy */
2196         bp->cmng.rs_vars.rs_threshold =
2197                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2198
2199         /* resolution of fairness timer */
2200         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2201         /* for 10G it is 1000usec. for 1G it is 10000usec. */
2202         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2203
2204         /* this is the threshold below which we won't arm the timer anymore */
2205         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2206
2207         /* we multiply by 1e3/8 to get bytes/msec.
2208            We don't want the credits to pass a credit
2209            of the t_fair*FAIR_MEM (algorithm resolution) */
2210         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2211         /* since each tick is 4 usec */
2212         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2213 }
2214
2215 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2216 {
2217         struct rate_shaping_vars_per_vn m_rs_vn;
2218         struct fairness_vars_per_vn m_fair_vn;
2219         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2220         u16 vn_min_rate, vn_max_rate;
2221         int i;
2222
2223         /* If function is hidden - set min and max to zeroes */
2224         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2225                 vn_min_rate = 0;
2226                 vn_max_rate = 0;
2227
2228         } else {
2229                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2230                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2231                 /* If fairness is enabled (not all min rates are zeroes) and
2232                    if current min rate is zero - set it to 1.
2233                    This is a requirement of the algorithm. */
2234                 if (bp->vn_weight_sum && (vn_min_rate == 0))
2235                         vn_min_rate = DEF_MIN_RATE;
2236                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2237                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2238         }
2239
2240         DP(NETIF_MSG_IFUP,
2241            "func %d: vn_min_rate=%d  vn_max_rate=%d  vn_weight_sum=%d\n",
2242            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2243
2244         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2245         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2246
2247         /* global vn counter - maximal Mbps for this vn */
2248         m_rs_vn.vn_counter.rate = vn_max_rate;
2249
2250         /* quota - number of bytes transmitted in this period */
2251         m_rs_vn.vn_counter.quota =
2252                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2253
2254         if (bp->vn_weight_sum) {
2255                 /* credit for each period of the fairness algorithm:
2256                    number of bytes in T_FAIR (the vn share the port rate).
2257                    vn_weight_sum should not be larger than 10000, thus
2258                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2259                    than zero */
2260                 m_fair_vn.vn_credit_delta =
2261                         max((u32)(vn_min_rate * (T_FAIR_COEF /
2262                                                  (8 * bp->vn_weight_sum))),
2263                             (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2264                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2265                    m_fair_vn.vn_credit_delta);
2266         }
2267
2268         /* Store it to internal memory */
2269         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2270                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2271                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2272                        ((u32 *)(&m_rs_vn))[i]);
2273
2274         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2275                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2276                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2277                        ((u32 *)(&m_fair_vn))[i]);
2278 }
2279
2280
2281 /* This function is called upon link interrupt */
2282 static void bnx2x_link_attn(struct bnx2x *bp)
2283 {
2284         /* Make sure that we are synced with the current statistics */
2285         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2286
2287         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2288
2289         if (bp->link_vars.link_up) {
2290
2291                 /* dropless flow control */
2292                 if (CHIP_IS_E1H(bp)) {
2293                         int port = BP_PORT(bp);
2294                         u32 pause_enabled = 0;
2295
2296                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2297                                 pause_enabled = 1;
2298
2299                         REG_WR(bp, BAR_USTRORM_INTMEM +
2300                                USTORM_PAUSE_ENABLED_OFFSET(port),
2301                                pause_enabled);
2302                 }
2303
2304                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2305                         struct host_port_stats *pstats;
2306
2307                         pstats = bnx2x_sp(bp, port_stats);
2308                         /* reset old bmac stats */
2309                         memset(&(pstats->mac_stx[0]), 0,
2310                                sizeof(struct mac_stx));
2311                 }
2312                 if ((bp->state == BNX2X_STATE_OPEN) ||
2313                     (bp->state == BNX2X_STATE_DISABLED))
2314                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2315         }
2316
2317         /* indicate link status */
2318         bnx2x_link_report(bp);
2319
2320         if (IS_E1HMF(bp)) {
2321                 int port = BP_PORT(bp);
2322                 int func;
2323                 int vn;
2324
2325                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2326                         if (vn == BP_E1HVN(bp))
2327                                 continue;
2328
2329                         func = ((vn << 1) | port);
2330
2331                         /* Set the attention towards other drivers
2332                            on the same port */
2333                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2334                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2335                 }
2336
2337                 if (bp->link_vars.link_up) {
2338                         int i;
2339
2340                         /* Init rate shaping and fairness contexts */
2341                         bnx2x_init_port_minmax(bp);
2342
2343                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2344                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2345
2346                         /* Store it to internal memory */
2347                         for (i = 0;
2348                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
2349                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2350                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2351                                        ((u32 *)(&bp->cmng))[i]);
2352                 }
2353         }
2354 }
2355
2356 static void bnx2x__link_status_update(struct bnx2x *bp)
2357 {
2358         if (bp->state != BNX2X_STATE_OPEN)
2359                 return;
2360
2361         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2362
2363         if (bp->link_vars.link_up)
2364                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2365         else
2366                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2367
2368         /* indicate link status */
2369         bnx2x_link_report(bp);
2370 }
2371
2372 static void bnx2x_pmf_update(struct bnx2x *bp)
2373 {
2374         int port = BP_PORT(bp);
2375         u32 val;
2376
2377         bp->port.pmf = 1;
2378         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2379
2380         /* enable nig attention */
2381         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2382         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2383         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2384
2385         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2386 }
2387
2388 /* end of Link */
2389
2390 /* slow path */
2391
2392 /*
2393  * General service functions
2394  */
2395
2396 /* the slow path queue is odd since completions arrive on the fastpath ring */
2397 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2398                          u32 data_hi, u32 data_lo, int common)
2399 {
2400         int func = BP_FUNC(bp);
2401
2402         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2403            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2404            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2405            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2406            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2407
2408 #ifdef BNX2X_STOP_ON_ERROR
2409         if (unlikely(bp->panic))
2410                 return -EIO;
2411 #endif
2412
2413         spin_lock_bh(&bp->spq_lock);
2414
2415         if (!bp->spq_left) {
2416                 BNX2X_ERR("BUG! SPQ ring full!\n");
2417                 spin_unlock_bh(&bp->spq_lock);
2418                 bnx2x_panic();
2419                 return -EBUSY;
2420         }
2421
2422         /* CID needs port number to be encoded int it */
2423         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2424                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2425                                      HW_CID(bp, cid)));
2426         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2427         if (common)
2428                 bp->spq_prod_bd->hdr.type |=
2429                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2430
2431         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2432         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2433
2434         bp->spq_left--;
2435
2436         if (bp->spq_prod_bd == bp->spq_last_bd) {
2437                 bp->spq_prod_bd = bp->spq;
2438                 bp->spq_prod_idx = 0;
2439                 DP(NETIF_MSG_TIMER, "end of spq\n");
2440
2441         } else {
2442                 bp->spq_prod_bd++;
2443                 bp->spq_prod_idx++;
2444         }
2445
2446         /* Make sure that BD data is updated before writing the producer */
2447         wmb();
2448
2449         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2450                bp->spq_prod_idx);
2451
2452         mmiowb();
2453
2454         spin_unlock_bh(&bp->spq_lock);
2455         return 0;
2456 }
2457
2458 /* acquire split MCP access lock register */
2459 static int bnx2x_acquire_alr(struct bnx2x *bp)
2460 {
2461         u32 i, j, val;
2462         int rc = 0;
2463
2464         might_sleep();
2465         i = 100;
2466         for (j = 0; j < i*10; j++) {
2467                 val = (1UL << 31);
2468                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2469                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2470                 if (val & (1L << 31))
2471                         break;
2472
2473                 msleep(5);
2474         }
2475         if (!(val & (1L << 31))) {
2476                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2477                 rc = -EBUSY;
2478         }
2479
2480         return rc;
2481 }
2482
2483 /* release split MCP access lock register */
2484 static void bnx2x_release_alr(struct bnx2x *bp)
2485 {
2486         u32 val = 0;
2487
2488         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2489 }
2490
2491 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2492 {
2493         struct host_def_status_block *def_sb = bp->def_status_blk;
2494         u16 rc = 0;
2495
2496         barrier(); /* status block is written to by the chip */
2497         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2498                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2499                 rc |= 1;
2500         }
2501         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2502                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2503                 rc |= 2;
2504         }
2505         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2506                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2507                 rc |= 4;
2508         }
2509         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2510                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2511                 rc |= 8;
2512         }
2513         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2514                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2515                 rc |= 16;
2516         }
2517         return rc;
2518 }
2519
2520 /*
2521  * slow path service functions
2522  */
2523
2524 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2525 {
2526         int port = BP_PORT(bp);
2527         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2528                        COMMAND_REG_ATTN_BITS_SET);
2529         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2530                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2531         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2532                                        NIG_REG_MASK_INTERRUPT_PORT0;
2533         u32 aeu_mask;
2534         u32 nig_mask = 0;
2535
2536         if (bp->attn_state & asserted)
2537                 BNX2X_ERR("IGU ERROR\n");
2538
2539         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2540         aeu_mask = REG_RD(bp, aeu_addr);
2541
2542         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2543            aeu_mask, asserted);
2544         aeu_mask &= ~(asserted & 0xff);
2545         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2546
2547         REG_WR(bp, aeu_addr, aeu_mask);
2548         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2549
2550         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2551         bp->attn_state |= asserted;
2552         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2553
2554         if (asserted & ATTN_HARD_WIRED_MASK) {
2555                 if (asserted & ATTN_NIG_FOR_FUNC) {
2556
2557                         bnx2x_acquire_phy_lock(bp);
2558
2559                         /* save nig interrupt mask */
2560                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2561                         REG_WR(bp, nig_int_mask_addr, 0);
2562
2563                         bnx2x_link_attn(bp);
2564
2565                         /* handle unicore attn? */
2566                 }
2567                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2568                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2569
2570                 if (asserted & GPIO_2_FUNC)
2571                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2572
2573                 if (asserted & GPIO_3_FUNC)
2574                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2575
2576                 if (asserted & GPIO_4_FUNC)
2577                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2578
2579                 if (port == 0) {
2580                         if (asserted & ATTN_GENERAL_ATTN_1) {
2581                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2582                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2583                         }
2584                         if (asserted & ATTN_GENERAL_ATTN_2) {
2585                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2586                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2587                         }
2588                         if (asserted & ATTN_GENERAL_ATTN_3) {
2589                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2590                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2591                         }
2592                 } else {
2593                         if (asserted & ATTN_GENERAL_ATTN_4) {
2594                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2595                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2596                         }
2597                         if (asserted & ATTN_GENERAL_ATTN_5) {
2598                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2599                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2600                         }
2601                         if (asserted & ATTN_GENERAL_ATTN_6) {
2602                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2603                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2604                         }
2605                 }
2606
2607         } /* if hardwired */
2608
2609         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2610            asserted, hc_addr);
2611         REG_WR(bp, hc_addr, asserted);
2612
2613         /* now set back the mask */
2614         if (asserted & ATTN_NIG_FOR_FUNC) {
2615                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2616                 bnx2x_release_phy_lock(bp);
2617         }
2618 }
2619
2620 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2621 {
2622         int port = BP_PORT(bp);
2623
2624         /* mark the failure */
2625         bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2626         bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2627         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2628                  bp->link_params.ext_phy_config);
2629
2630         /* log the failure */
2631         printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2632                " the driver to shutdown the card to prevent permanent"
2633                " damage.  Please contact Dell Support for assistance\n",
2634                bp->dev->name);
2635 }
2636 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2637 {
2638         int port = BP_PORT(bp);
2639         int reg_offset;
2640         u32 val, swap_val, swap_override;
2641
2642         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2643                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2644
2645         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2646
2647                 val = REG_RD(bp, reg_offset);
2648                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2649                 REG_WR(bp, reg_offset, val);
2650
2651                 BNX2X_ERR("SPIO5 hw attention\n");
2652
2653                 /* Fan failure attention */
2654                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2655                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2656                         /* Low power mode is controlled by GPIO 2 */
2657                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2658                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2659                         /* The PHY reset is controlled by GPIO 1 */
2660                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2661                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2662                         break;
2663
2664                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2665                         /* The PHY reset is controlled by GPIO 1 */
2666                         /* fake the port number to cancel the swap done in
2667                            set_gpio() */
2668                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2669                         swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2670                         port = (swap_val && swap_override) ^ 1;
2671                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2672                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2673                         break;
2674
2675                 default:
2676                         break;
2677                 }
2678                 bnx2x_fan_failure(bp);
2679         }
2680
2681         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2682                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2683                 bnx2x_acquire_phy_lock(bp);
2684                 bnx2x_handle_module_detect_int(&bp->link_params);
2685                 bnx2x_release_phy_lock(bp);
2686         }
2687
2688         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2689
2690                 val = REG_RD(bp, reg_offset);
2691                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2692                 REG_WR(bp, reg_offset, val);
2693
2694                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2695                           (attn & HW_INTERRUT_ASSERT_SET_0));
2696                 bnx2x_panic();
2697         }
2698 }
2699
2700 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2701 {
2702         u32 val;
2703
2704         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2705
2706                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2707                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2708                 /* DORQ discard attention */
2709                 if (val & 0x2)
2710                         BNX2X_ERR("FATAL error from DORQ\n");
2711         }
2712
2713         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2714
2715                 int port = BP_PORT(bp);
2716                 int reg_offset;
2717
2718                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2719                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2720
2721                 val = REG_RD(bp, reg_offset);
2722                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2723                 REG_WR(bp, reg_offset, val);
2724
2725                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2726                           (attn & HW_INTERRUT_ASSERT_SET_1));
2727                 bnx2x_panic();
2728         }
2729 }
2730
2731 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2732 {
2733         u32 val;
2734
2735         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2736
2737                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2738                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2739                 /* CFC error attention */
2740                 if (val & 0x2)
2741                         BNX2X_ERR("FATAL error from CFC\n");
2742         }
2743
2744         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2745
2746                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2747                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2748                 /* RQ_USDMDP_FIFO_OVERFLOW */
2749                 if (val & 0x18000)
2750                         BNX2X_ERR("FATAL error from PXP\n");
2751         }
2752
2753         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2754
2755                 int port = BP_PORT(bp);
2756                 int reg_offset;
2757
2758                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2759                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2760
2761                 val = REG_RD(bp, reg_offset);
2762                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2763                 REG_WR(bp, reg_offset, val);
2764
2765                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2766                           (attn & HW_INTERRUT_ASSERT_SET_2));
2767                 bnx2x_panic();
2768         }
2769 }
2770
2771 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2772 {
2773         u32 val;
2774
2775         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2776
2777                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2778                         int func = BP_FUNC(bp);
2779
2780                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2781                         bnx2x__link_status_update(bp);
2782                         if (SHMEM_RD(bp, func_mb[func].drv_status) &
2783                                                         DRV_STATUS_PMF)
2784                                 bnx2x_pmf_update(bp);
2785
2786                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2787
2788                         BNX2X_ERR("MC assert!\n");
2789                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2790                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2791                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2792                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2793                         bnx2x_panic();
2794
2795                 } else if (attn & BNX2X_MCP_ASSERT) {
2796
2797                         BNX2X_ERR("MCP assert!\n");
2798                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2799                         bnx2x_fw_dump(bp);
2800
2801                 } else
2802                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2803         }
2804
2805         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2806                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2807                 if (attn & BNX2X_GRC_TIMEOUT) {
2808                         val = CHIP_IS_E1H(bp) ?
2809                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2810                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2811                 }
2812                 if (attn & BNX2X_GRC_RSV) {
2813                         val = CHIP_IS_E1H(bp) ?
2814                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2815                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2816                 }
2817                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2818         }
2819 }
2820
2821 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2822 {
2823         struct attn_route attn;
2824         struct attn_route group_mask;
2825         int port = BP_PORT(bp);
2826         int index;
2827         u32 reg_addr;
2828         u32 val;
2829         u32 aeu_mask;
2830
2831         /* need to take HW lock because MCP or other port might also
2832            try to handle this event */
2833         bnx2x_acquire_alr(bp);
2834
2835         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2836         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2837         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2838         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2839         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2840            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2841
2842         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2843                 if (deasserted & (1 << index)) {
2844                         group_mask = bp->attn_group[index];
2845
2846                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2847                            index, group_mask.sig[0], group_mask.sig[1],
2848                            group_mask.sig[2], group_mask.sig[3]);
2849
2850                         bnx2x_attn_int_deasserted3(bp,
2851                                         attn.sig[3] & group_mask.sig[3]);
2852                         bnx2x_attn_int_deasserted1(bp,
2853                                         attn.sig[1] & group_mask.sig[1]);
2854                         bnx2x_attn_int_deasserted2(bp,
2855                                         attn.sig[2] & group_mask.sig[2]);
2856                         bnx2x_attn_int_deasserted0(bp,
2857                                         attn.sig[0] & group_mask.sig[0]);
2858
2859                         if ((attn.sig[0] & group_mask.sig[0] &
2860                                                 HW_PRTY_ASSERT_SET_0) ||
2861                             (attn.sig[1] & group_mask.sig[1] &
2862                                                 HW_PRTY_ASSERT_SET_1) ||
2863                             (attn.sig[2] & group_mask.sig[2] &
2864                                                 HW_PRTY_ASSERT_SET_2))
2865                                 BNX2X_ERR("FATAL HW block parity attention\n");
2866                 }
2867         }
2868
2869         bnx2x_release_alr(bp);
2870
2871         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2872
2873         val = ~deasserted;
2874         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2875            val, reg_addr);
2876         REG_WR(bp, reg_addr, val);
2877
2878         if (~bp->attn_state & deasserted)
2879                 BNX2X_ERR("IGU ERROR\n");
2880
2881         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2882                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2883
2884         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2885         aeu_mask = REG_RD(bp, reg_addr);
2886
2887         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2888            aeu_mask, deasserted);
2889         aeu_mask |= (deasserted & 0xff);
2890         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2891
2892         REG_WR(bp, reg_addr, aeu_mask);
2893         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2894
2895         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2896         bp->attn_state &= ~deasserted;
2897         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2898 }
2899
2900 static void bnx2x_attn_int(struct bnx2x *bp)
2901 {
2902         /* read local copy of bits */
2903         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2904                                                                 attn_bits);
2905         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2906                                                                 attn_bits_ack);
2907         u32 attn_state = bp->attn_state;
2908
2909         /* look for changed bits */
2910         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2911         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2912
2913         DP(NETIF_MSG_HW,
2914            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2915            attn_bits, attn_ack, asserted, deasserted);
2916
2917         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2918                 BNX2X_ERR("BAD attention state\n");
2919
2920         /* handle bits that were raised */
2921         if (asserted)
2922                 bnx2x_attn_int_asserted(bp, asserted);
2923
2924         if (deasserted)
2925                 bnx2x_attn_int_deasserted(bp, deasserted);
2926 }
2927
2928 static void bnx2x_sp_task(struct work_struct *work)
2929 {
2930         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2931         u16 status;
2932
2933
2934         /* Return here if interrupt is disabled */
2935         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2936                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2937                 return;
2938         }
2939
2940         status = bnx2x_update_dsb_idx(bp);
2941 /*      if (status == 0)                                     */
2942 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2943
2944         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2945
2946         /* HW attentions */
2947         if (status & 0x1)
2948                 bnx2x_attn_int(bp);
2949
2950         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2951                      IGU_INT_NOP, 1);
2952         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2953                      IGU_INT_NOP, 1);
2954         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2955                      IGU_INT_NOP, 1);
2956         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2957                      IGU_INT_NOP, 1);
2958         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2959                      IGU_INT_ENABLE, 1);
2960
2961 }
2962
2963 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2964 {
2965         struct net_device *dev = dev_instance;
2966         struct bnx2x *bp = netdev_priv(dev);
2967
2968         /* Return here if interrupt is disabled */
2969         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2970                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2971                 return IRQ_HANDLED;
2972         }
2973
2974         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2975
2976 #ifdef BNX2X_STOP_ON_ERROR
2977         if (unlikely(bp->panic))
2978                 return IRQ_HANDLED;
2979 #endif
2980
2981         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2982
2983         return IRQ_HANDLED;
2984 }
2985
2986 /* end of slow path */
2987
2988 /* Statistics */
2989
2990 /****************************************************************************
2991 * Macros
2992 ****************************************************************************/
2993
2994 /* sum[hi:lo] += add[hi:lo] */
2995 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2996         do { \
2997                 s_lo += a_lo; \
2998                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2999         } while (0)
3000
3001 /* difference = minuend - subtrahend */
3002 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3003         do { \
3004                 if (m_lo < s_lo) { \
3005                         /* underflow */ \
3006                         d_hi = m_hi - s_hi; \
3007                         if (d_hi > 0) { \
3008                                 /* we can 'loan' 1 */ \
3009                                 d_hi--; \
3010                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3011                         } else { \
3012                                 /* m_hi <= s_hi */ \
3013                                 d_hi = 0; \
3014                                 d_lo = 0; \
3015                         } \
3016                 } else { \
3017                         /* m_lo >= s_lo */ \
3018                         if (m_hi < s_hi) { \
3019                                 d_hi = 0; \
3020                                 d_lo = 0; \
3021                         } else { \
3022                                 /* m_hi >= s_hi */ \
3023                                 d_hi = m_hi - s_hi; \
3024                                 d_lo = m_lo - s_lo; \
3025                         } \
3026                 } \
3027         } while (0)
3028
3029 #define UPDATE_STAT64(s, t) \
3030         do { \
3031                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3032                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3033                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3034                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3035                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3036                        pstats->mac_stx[1].t##_lo, diff.lo); \
3037         } while (0)
3038
3039 #define UPDATE_STAT64_NIG(s, t) \
3040         do { \
3041                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3042                         diff.lo, new->s##_lo, old->s##_lo); \
3043                 ADD_64(estats->t##_hi, diff.hi, \
3044                        estats->t##_lo, diff.lo); \
3045         } while (0)
3046
3047 /* sum[hi:lo] += add */
3048 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3049         do { \
3050                 s_lo += a; \
3051                 s_hi += (s_lo < a) ? 1 : 0; \
3052         } while (0)
3053
3054 #define UPDATE_EXTEND_STAT(s) \
3055         do { \
3056                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3057                               pstats->mac_stx[1].s##_lo, \
3058                               new->s); \
3059         } while (0)
3060
3061 #define UPDATE_EXTEND_TSTAT(s, t) \
3062         do { \
3063                 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3064                 old_tclient->s = tclient->s; \
3065                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3066         } while (0)
3067
3068 #define UPDATE_EXTEND_USTAT(s, t) \
3069         do { \
3070                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3071                 old_uclient->s = uclient->s; \
3072                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3073         } while (0)
3074
3075 #define UPDATE_EXTEND_XSTAT(s, t) \
3076         do { \
3077                 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3078                 old_xclient->s = xclient->s; \
3079                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3080         } while (0)
3081
3082 /* minuend -= subtrahend */
3083 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3084         do { \
3085                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3086         } while (0)
3087
3088 /* minuend[hi:lo] -= subtrahend */
3089 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3090         do { \
3091                 SUB_64(m_hi, 0, m_lo, s); \
3092         } while (0)
3093
3094 #define SUB_EXTEND_USTAT(s, t) \
3095         do { \
3096                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3097                 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3098         } while (0)
3099
3100 /*
3101  * General service functions
3102  */
3103
3104 static inline long bnx2x_hilo(u32 *hiref)
3105 {
3106         u32 lo = *(hiref + 1);
3107 #if (BITS_PER_LONG == 64)
3108         u32 hi = *hiref;
3109
3110         return HILO_U64(hi, lo);
3111 #else
3112         return lo;
3113 #endif
3114 }
3115
3116 /*
3117  * Init service functions
3118  */
3119
3120 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3121 {
3122         if (!bp->stats_pending) {
3123                 struct eth_query_ramrod_data ramrod_data = {0};
3124                 int i, rc;
3125
3126                 ramrod_data.drv_counter = bp->stats_counter++;
3127                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3128                 for_each_queue(bp, i)
3129                         ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3130
3131                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3132                                    ((u32 *)&ramrod_data)[1],
3133                                    ((u32 *)&ramrod_data)[0], 0);
3134                 if (rc == 0) {
3135                         /* stats ramrod has it's own slot on the spq */
3136                         bp->spq_left++;
3137                         bp->stats_pending = 1;
3138                 }
3139         }
3140 }
3141
3142 static void bnx2x_stats_init(struct bnx2x *bp)
3143 {
3144         int port = BP_PORT(bp);
3145         int i;
3146
3147         bp->stats_pending = 0;
3148         bp->executer_idx = 0;
3149         bp->stats_counter = 0;
3150
3151         /* port stats */
3152         if (!BP_NOMCP(bp))
3153                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3154         else
3155                 bp->port.port_stx = 0;
3156         DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3157
3158         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3159         bp->port.old_nig_stats.brb_discard =
3160                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3161         bp->port.old_nig_stats.brb_truncate =
3162                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3163         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3164                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3165         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3166                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3167
3168         /* function stats */
3169         for_each_queue(bp, i) {
3170                 struct bnx2x_fastpath *fp = &bp->fp[i];
3171
3172                 memset(&fp->old_tclient, 0,
3173                        sizeof(struct tstorm_per_client_stats));
3174                 memset(&fp->old_uclient, 0,
3175                        sizeof(struct ustorm_per_client_stats));
3176                 memset(&fp->old_xclient, 0,
3177                        sizeof(struct xstorm_per_client_stats));
3178                 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3179         }
3180
3181         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3182         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3183
3184         bp->stats_state = STATS_STATE_DISABLED;
3185         if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3186                 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3187 }
3188
3189 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3190 {
3191         struct dmae_command *dmae = &bp->stats_dmae;
3192         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3193
3194         *stats_comp = DMAE_COMP_VAL;
3195         if (CHIP_REV_IS_SLOW(bp))
3196                 return;
3197
3198         /* loader */
3199         if (bp->executer_idx) {
3200                 int loader_idx = PMF_DMAE_C(bp);
3201
3202                 memset(dmae, 0, sizeof(struct dmae_command));
3203
3204                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3205                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3206                                 DMAE_CMD_DST_RESET |
3207 #ifdef __BIG_ENDIAN
3208                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3209 #else
3210                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3211 #endif
3212                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3213                                                DMAE_CMD_PORT_0) |
3214                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3215                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3216                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3217                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3218                                      sizeof(struct dmae_command) *
3219                                      (loader_idx + 1)) >> 2;
3220                 dmae->dst_addr_hi = 0;
3221                 dmae->len = sizeof(struct dmae_command) >> 2;
3222                 if (CHIP_IS_E1(bp))
3223                         dmae->len--;
3224                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3225                 dmae->comp_addr_hi = 0;
3226                 dmae->comp_val = 1;
3227
3228                 *stats_comp = 0;
3229                 bnx2x_post_dmae(bp, dmae, loader_idx);
3230
3231         } else if (bp->func_stx) {
3232                 *stats_comp = 0;
3233                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3234         }
3235 }
3236
3237 static int bnx2x_stats_comp(struct bnx2x *bp)
3238 {
3239         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3240         int cnt = 10;
3241
3242         might_sleep();
3243         while (*stats_comp != DMAE_COMP_VAL) {
3244                 if (!cnt) {
3245                         BNX2X_ERR("timeout waiting for stats finished\n");
3246                         break;
3247                 }
3248                 cnt--;
3249                 msleep(1);
3250         }
3251         return 1;
3252 }
3253
3254 /*
3255  * Statistics service functions
3256  */
3257
3258 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3259 {
3260         struct dmae_command *dmae;
3261         u32 opcode;
3262         int loader_idx = PMF_DMAE_C(bp);
3263         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3264
3265         /* sanity */
3266         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3267                 BNX2X_ERR("BUG!\n");
3268                 return;
3269         }
3270
3271         bp->executer_idx = 0;
3272
3273         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3274                   DMAE_CMD_C_ENABLE |
3275                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3276 #ifdef __BIG_ENDIAN
3277                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3278 #else
3279                   DMAE_CMD_ENDIANITY_DW_SWAP |
3280 #endif
3281                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3282                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3283
3284         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3285         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3286         dmae->src_addr_lo = bp->port.port_stx >> 2;
3287         dmae->src_addr_hi = 0;
3288         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3289         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3290         dmae->len = DMAE_LEN32_RD_MAX;
3291         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3292         dmae->comp_addr_hi = 0;
3293         dmae->comp_val = 1;
3294
3295         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3296         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3297         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3298         dmae->src_addr_hi = 0;
3299         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3300                                    DMAE_LEN32_RD_MAX * 4);
3301         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3302                                    DMAE_LEN32_RD_MAX * 4);
3303         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3304         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3305         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3306         dmae->comp_val = DMAE_COMP_VAL;
3307
3308         *stats_comp = 0;
3309         bnx2x_hw_stats_post(bp);
3310         bnx2x_stats_comp(bp);
3311 }
3312
3313 static void bnx2x_port_stats_init(struct bnx2x *bp)
3314 {
3315         struct dmae_command *dmae;
3316         int port = BP_PORT(bp);
3317         int vn = BP_E1HVN(bp);
3318         u32 opcode;
3319         int loader_idx = PMF_DMAE_C(bp);
3320         u32 mac_addr;
3321         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3322
3323         /* sanity */
3324         if (!bp->link_vars.link_up || !bp->port.pmf) {
3325                 BNX2X_ERR("BUG!\n");
3326                 return;
3327         }
3328
3329         bp->executer_idx = 0;
3330
3331         /* MCP */
3332         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3333                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3334                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3335 #ifdef __BIG_ENDIAN
3336                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3337 #else
3338                   DMAE_CMD_ENDIANITY_DW_SWAP |
3339 #endif
3340                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3341                   (vn << DMAE_CMD_E1HVN_SHIFT));
3342
3343         if (bp->port.port_stx) {
3344
3345                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3346                 dmae->opcode = opcode;
3347                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3348                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3349                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3350                 dmae->dst_addr_hi = 0;
3351                 dmae->len = sizeof(struct host_port_stats) >> 2;
3352                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3353                 dmae->comp_addr_hi = 0;
3354                 dmae->comp_val = 1;
3355         }
3356
3357         if (bp->func_stx) {
3358
3359                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3360                 dmae->opcode = opcode;
3361                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3362                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3363                 dmae->dst_addr_lo = bp->func_stx >> 2;
3364                 dmae->dst_addr_hi = 0;
3365                 dmae->len = sizeof(struct host_func_stats) >> 2;
3366                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3367                 dmae->comp_addr_hi = 0;
3368                 dmae->comp_val = 1;
3369         }
3370
3371         /* MAC */
3372         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3373                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3374                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3375 #ifdef __BIG_ENDIAN
3376                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3377 #else
3378                   DMAE_CMD_ENDIANITY_DW_SWAP |
3379 #endif
3380                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3381                   (vn << DMAE_CMD_E1HVN_SHIFT));
3382
3383         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3384
3385                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3386                                    NIG_REG_INGRESS_BMAC0_MEM);
3387
3388                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3389                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3390                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3391                 dmae->opcode = opcode;
3392                 dmae->src_addr_lo = (mac_addr +
3393                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3394                 dmae->src_addr_hi = 0;
3395                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3396                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3397                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3398                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3399                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3400                 dmae->comp_addr_hi = 0;
3401                 dmae->comp_val = 1;
3402
3403                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3404                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3405                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3406                 dmae->opcode = opcode;
3407                 dmae->src_addr_lo = (mac_addr +
3408                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3409                 dmae->src_addr_hi = 0;
3410                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3411                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3412                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3413                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3414                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3415                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3416                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3417                 dmae->comp_addr_hi = 0;
3418                 dmae->comp_val = 1;
3419
3420         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3421
3422                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3423
3424                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3425                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3426                 dmae->opcode = opcode;
3427                 dmae->src_addr_lo = (mac_addr +
3428                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3429                 dmae->src_addr_hi = 0;
3430                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3431                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3432                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3433                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3434                 dmae->comp_addr_hi = 0;
3435                 dmae->comp_val = 1;
3436
3437                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3438                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3439                 dmae->opcode = opcode;
3440                 dmae->src_addr_lo = (mac_addr +
3441                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3442                 dmae->src_addr_hi = 0;
3443                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3444                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3445                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3446                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3447                 dmae->len = 1;
3448                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3449                 dmae->comp_addr_hi = 0;
3450                 dmae->comp_val = 1;
3451
3452                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3453                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3454                 dmae->opcode = opcode;
3455                 dmae->src_addr_lo = (mac_addr +
3456                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3457                 dmae->src_addr_hi = 0;
3458                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3459                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3460                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3461                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3462                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3463                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3464                 dmae->comp_addr_hi = 0;
3465                 dmae->comp_val = 1;
3466         }
3467
3468         /* NIG */
3469         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3470         dmae->opcode = opcode;
3471         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3472                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3473         dmae->src_addr_hi = 0;
3474         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3475         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3476         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3477         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3478         dmae->comp_addr_hi = 0;
3479         dmae->comp_val = 1;
3480
3481         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3482         dmae->opcode = opcode;
3483         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3484                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3485         dmae->src_addr_hi = 0;
3486         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3487                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3488         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3489                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3490         dmae->len = (2*sizeof(u32)) >> 2;
3491         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3492         dmae->comp_addr_hi = 0;
3493         dmae->comp_val = 1;
3494
3495         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3496         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3497                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3498                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3499 #ifdef __BIG_ENDIAN
3500                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3501 #else
3502                         DMAE_CMD_ENDIANITY_DW_SWAP |
3503 #endif
3504                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3505                         (vn << DMAE_CMD_E1HVN_SHIFT));
3506         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3507                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3508         dmae->src_addr_hi = 0;
3509         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3510                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3511         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3512                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3513         dmae->len = (2*sizeof(u32)) >> 2;
3514         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3515         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3516         dmae->comp_val = DMAE_COMP_VAL;
3517
3518         *stats_comp = 0;
3519 }
3520
3521 static void bnx2x_func_stats_init(struct bnx2x *bp)
3522 {
3523         struct dmae_command *dmae = &bp->stats_dmae;
3524         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3525
3526         /* sanity */
3527         if (!bp->func_stx) {
3528                 BNX2X_ERR("BUG!\n");
3529                 return;
3530         }
3531
3532         bp->executer_idx = 0;
3533         memset(dmae, 0, sizeof(struct dmae_command));
3534
3535         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3536                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3537                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3538 #ifdef __BIG_ENDIAN
3539                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3540 #else
3541                         DMAE_CMD_ENDIANITY_DW_SWAP |
3542 #endif
3543                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3544                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3545         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3546         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3547         dmae->dst_addr_lo = bp->func_stx >> 2;
3548         dmae->dst_addr_hi = 0;
3549         dmae->len = sizeof(struct host_func_stats) >> 2;
3550         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3551         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3552         dmae->comp_val = DMAE_COMP_VAL;
3553
3554         *stats_comp = 0;
3555 }
3556
3557 static void bnx2x_stats_start(struct bnx2x *bp)
3558 {
3559         if (bp->port.pmf)
3560                 bnx2x_port_stats_init(bp);
3561
3562         else if (bp->func_stx)
3563                 bnx2x_func_stats_init(bp);
3564
3565         bnx2x_hw_stats_post(bp);
3566         bnx2x_storm_stats_post(bp);
3567 }
3568
3569 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3570 {
3571         bnx2x_stats_comp(bp);
3572         bnx2x_stats_pmf_update(bp);
3573         bnx2x_stats_start(bp);
3574 }
3575
3576 static void bnx2x_stats_restart(struct bnx2x *bp)
3577 {
3578         bnx2x_stats_comp(bp);
3579         bnx2x_stats_start(bp);
3580 }
3581
3582 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3583 {
3584         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3585         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3586         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3587         struct {
3588                 u32 lo;
3589                 u32 hi;
3590         } diff;
3591
3592         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3593         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3594         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3595         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3596         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3597         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3598         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3599         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3600         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3601         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3602         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3603         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3604         UPDATE_STAT64(tx_stat_gt127,
3605                                 tx_stat_etherstatspkts65octetsto127octets);
3606         UPDATE_STAT64(tx_stat_gt255,
3607                                 tx_stat_etherstatspkts128octetsto255octets);
3608         UPDATE_STAT64(tx_stat_gt511,
3609                                 tx_stat_etherstatspkts256octetsto511octets);
3610         UPDATE_STAT64(tx_stat_gt1023,
3611                                 tx_stat_etherstatspkts512octetsto1023octets);
3612         UPDATE_STAT64(tx_stat_gt1518,
3613                                 tx_stat_etherstatspkts1024octetsto1522octets);
3614         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3615         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3616         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3617         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3618         UPDATE_STAT64(tx_stat_gterr,
3619                                 tx_stat_dot3statsinternalmactransmiterrors);
3620         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3621
3622         estats->pause_frames_received_hi =
3623                                 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3624         estats->pause_frames_received_lo =
3625                                 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3626
3627         estats->pause_frames_sent_hi =
3628                                 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3629         estats->pause_frames_sent_lo =
3630                                 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3631 }
3632
3633 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3634 {
3635         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3636         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3637         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3638
3639         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3640         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3641         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3642         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3643         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3644         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3645         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3646         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3647         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3648         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3649         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3650         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3651         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3652         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3653         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3654         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3655         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3656         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3657         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3658         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3659         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3660         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3661         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3662         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3663         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3664         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3665         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3666         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3667         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3668         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3669         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3670
3671         estats->pause_frames_received_hi =
3672                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3673         estats->pause_frames_received_lo =
3674                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3675         ADD_64(estats->pause_frames_received_hi,
3676                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3677                estats->pause_frames_received_lo,
3678                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3679
3680         estats->pause_frames_sent_hi =
3681                         pstats->mac_stx[1].tx_stat_outxonsent_hi;
3682         estats->pause_frames_sent_lo =
3683                         pstats->mac_stx[1].tx_stat_outxonsent_lo;
3684         ADD_64(estats->pause_frames_sent_hi,
3685                pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3686                estats->pause_frames_sent_lo,
3687                pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3688 }
3689
3690 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3691 {
3692         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3693         struct nig_stats *old = &(bp->port.old_nig_stats);
3694         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3695         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3696         struct {
3697                 u32 lo;
3698                 u32 hi;
3699         } diff;
3700         u32 nig_timer_max;
3701
3702         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3703                 bnx2x_bmac_stats_update(bp);
3704
3705         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3706                 bnx2x_emac_stats_update(bp);
3707
3708         else { /* unreached */
3709                 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3710                 return -1;
3711         }
3712
3713         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3714                       new->brb_discard - old->brb_discard);
3715         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3716                       new->brb_truncate - old->brb_truncate);
3717
3718         UPDATE_STAT64_NIG(egress_mac_pkt0,
3719                                         etherstatspkts1024octetsto1522octets);
3720         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3721
3722         memcpy(old, new, sizeof(struct nig_stats));
3723
3724         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3725                sizeof(struct mac_stx));
3726         estats->brb_drop_hi = pstats->brb_drop_hi;
3727         estats->brb_drop_lo = pstats->brb_drop_lo;
3728
3729         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3730
3731         nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3732         if (nig_timer_max != estats->nig_timer_max) {
3733                 estats->nig_timer_max = nig_timer_max;
3734                 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3735         }
3736
3737         return 0;
3738 }
3739
3740 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3741 {
3742         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3743         struct tstorm_per_port_stats *tport =
3744                                         &stats->tstorm_common.port_statistics;
3745         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3746         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3747         int i;
3748
3749         memset(&(fstats->total_bytes_received_hi), 0,
3750                sizeof(struct host_func_stats) - 2*sizeof(u32));
3751         estats->error_bytes_received_hi = 0;
3752         estats->error_bytes_received_lo = 0;
3753         estats->etherstatsoverrsizepkts_hi = 0;
3754         estats->etherstatsoverrsizepkts_lo = 0;
3755         estats->no_buff_discard_hi = 0;
3756         estats->no_buff_discard_lo = 0;
3757
3758         for_each_queue(bp, i) {
3759                 struct bnx2x_fastpath *fp = &bp->fp[i];
3760                 int cl_id = fp->cl_id;
3761                 struct tstorm_per_client_stats *tclient =
3762                                 &stats->tstorm_common.client_statistics[cl_id];
3763                 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3764                 struct ustorm_per_client_stats *uclient =
3765                                 &stats->ustorm_common.client_statistics[cl_id];
3766                 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3767                 struct xstorm_per_client_stats *xclient =
3768                                 &stats->xstorm_common.client_statistics[cl_id];
3769                 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3770                 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3771                 u32 diff;
3772
3773                 /* are storm stats valid? */
3774                 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3775                                                         bp->stats_counter) {
3776                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3777                            "  xstorm counter (%d) != stats_counter (%d)\n",
3778                            i, xclient->stats_counter, bp->stats_counter);
3779                         return -1;
3780                 }
3781                 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3782                                                         bp->stats_counter) {
3783                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3784                            "  tstorm counter (%d) != stats_counter (%d)\n",
3785                            i, tclient->stats_counter, bp->stats_counter);
3786                         return -2;
3787                 }
3788                 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3789                                                         bp->stats_counter) {
3790                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3791                            "  ustorm counter (%d) != stats_counter (%d)\n",
3792                            i, uclient->stats_counter, bp->stats_counter);
3793                         return -4;
3794                 }
3795
3796                 qstats->total_bytes_received_hi =
3797                 qstats->valid_bytes_received_hi =
3798                                 le32_to_cpu(tclient->total_rcv_bytes.hi);
3799                 qstats->total_bytes_received_lo =
3800                 qstats->valid_bytes_received_lo =
3801                                 le32_to_cpu(tclient->total_rcv_bytes.lo);
3802
3803                 qstats->error_bytes_received_hi =
3804                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
3805                 qstats->error_bytes_received_lo =
3806                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
3807
3808                 ADD_64(qstats->total_bytes_received_hi,
3809                        qstats->error_bytes_received_hi,
3810                        qstats->total_bytes_received_lo,
3811                        qstats->error_bytes_received_lo);
3812
3813                 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3814                                         total_unicast_packets_received);
3815                 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3816                                         total_multicast_packets_received);
3817                 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3818                                         total_broadcast_packets_received);
3819                 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3820                                         etherstatsoverrsizepkts);
3821                 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3822
3823                 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3824                                         total_unicast_packets_received);
3825                 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3826                                         total_multicast_packets_received);
3827                 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3828                                         total_broadcast_packets_received);
3829                 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3830                 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3831                 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3832
3833                 qstats->total_bytes_transmitted_hi =
3834                                 le32_to_cpu(xclient->total_sent_bytes.hi);
3835                 qstats->total_bytes_transmitted_lo =
3836                                 le32_to_cpu(xclient->total_sent_bytes.lo);
3837
3838                 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3839                                         total_unicast_packets_transmitted);
3840                 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3841                                         total_multicast_packets_transmitted);
3842                 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3843                                         total_broadcast_packets_transmitted);
3844
3845                 old_tclient->checksum_discard = tclient->checksum_discard;
3846                 old_tclient->ttl0_discard = tclient->ttl0_discard;
3847
3848                 ADD_64(fstats->total_bytes_received_hi,
3849                        qstats->total_bytes_received_hi,
3850                        fstats->total_bytes_received_lo,
3851                        qstats->total_bytes_received_lo);
3852                 ADD_64(fstats->total_bytes_transmitted_hi,
3853                        qstats->total_bytes_transmitted_hi,
3854                        fstats->total_bytes_transmitted_lo,
3855                        qstats->total_bytes_transmitted_lo);
3856                 ADD_64(fstats->total_unicast_packets_received_hi,
3857                        qstats->total_unicast_packets_received_hi,
3858                        fstats->total_unicast_packets_received_lo,
3859                        qstats->total_unicast_packets_received_lo);
3860                 ADD_64(fstats->total_multicast_packets_received_hi,
3861                        qstats->total_multicast_packets_received_hi,
3862                        fstats->total_multicast_packets_received_lo,
3863                        qstats->total_multicast_packets_received_lo);
3864                 ADD_64(fstats->total_broadcast_packets_received_hi,
3865                        qstats->total_broadcast_packets_received_hi,
3866                        fstats->total_broadcast_packets_received_lo,
3867                        qstats->total_broadcast_packets_received_lo);
3868                 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3869                        qstats->total_unicast_packets_transmitted_hi,
3870                        fstats->total_unicast_packets_transmitted_lo,
3871                        qstats->total_unicast_packets_transmitted_lo);
3872                 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3873                        qstats->total_multicast_packets_transmitted_hi,
3874                        fstats->total_multicast_packets_transmitted_lo,
3875                        qstats->total_multicast_packets_transmitted_lo);
3876                 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3877                        qstats->total_broadcast_packets_transmitted_hi,
3878                        fstats->total_broadcast_packets_transmitted_lo,
3879                        qstats->total_broadcast_packets_transmitted_lo);
3880                 ADD_64(fstats->valid_bytes_received_hi,
3881                        qstats->valid_bytes_received_hi,
3882                        fstats->valid_bytes_received_lo,
3883                        qstats->valid_bytes_received_lo);
3884
3885                 ADD_64(estats->error_bytes_received_hi,
3886                        qstats->error_bytes_received_hi,
3887                        estats->error_bytes_received_lo,
3888                        qstats->error_bytes_received_lo);
3889                 ADD_64(estats->etherstatsoverrsizepkts_hi,
3890                        qstats->etherstatsoverrsizepkts_hi,
3891                        estats->etherstatsoverrsizepkts_lo,
3892                        qstats->etherstatsoverrsizepkts_lo);
3893                 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3894                        estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3895         }
3896
3897         ADD_64(fstats->total_bytes_received_hi,
3898                estats->rx_stat_ifhcinbadoctets_hi,
3899                fstats->total_bytes_received_lo,
3900                estats->rx_stat_ifhcinbadoctets_lo);
3901
3902         memcpy(estats, &(fstats->total_bytes_received_hi),
3903                sizeof(struct host_func_stats) - 2*sizeof(u32));
3904
3905         ADD_64(estats->etherstatsoverrsizepkts_hi,
3906                estats->rx_stat_dot3statsframestoolong_hi,
3907                estats->etherstatsoverrsizepkts_lo,
3908                estats->rx_stat_dot3statsframestoolong_lo);
3909         ADD_64(estats->error_bytes_received_hi,
3910                estats->rx_stat_ifhcinbadoctets_hi,
3911                estats->error_bytes_received_lo,
3912                estats->rx_stat_ifhcinbadoctets_lo);
3913
3914         if (bp->port.pmf) {
3915                 estats->mac_filter_discard =
3916                                 le32_to_cpu(tport->mac_filter_discard);
3917                 estats->xxoverflow_discard =
3918                                 le32_to_cpu(tport->xxoverflow_discard);
3919                 estats->brb_truncate_discard =
3920                                 le32_to_cpu(tport->brb_truncate_discard);
3921                 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3922         }
3923
3924         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3925
3926         bp->stats_pending = 0;
3927
3928         return 0;
3929 }
3930
3931 static void bnx2x_net_stats_update(struct bnx2x *bp)
3932 {
3933         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3934         struct net_device_stats *nstats = &bp->dev->stats;
3935         int i;
3936
3937         nstats->rx_packets =
3938                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3939                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3940                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3941
3942         nstats->tx_packets =
3943                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3944                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3945                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3946
3947         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
3948
3949         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3950
3951         nstats->rx_dropped = estats->mac_discard;
3952         for_each_queue(bp, i)
3953                 nstats->rx_dropped +=
3954                         le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3955
3956         nstats->tx_dropped = 0;
3957
3958         nstats->multicast =
3959                 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
3960
3961         nstats->collisions =
3962                 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
3963
3964         nstats->rx_length_errors =
3965                 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3966                 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3967         nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3968                                  bnx2x_hilo(&estats->brb_truncate_hi);
3969         nstats->rx_crc_errors =
3970                 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3971         nstats->rx_frame_errors =
3972                 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3973         nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
3974         nstats->rx_missed_errors = estats->xxoverflow_discard;
3975
3976         nstats->rx_errors = nstats->rx_length_errors +
3977                             nstats->rx_over_errors +
3978                             nstats->rx_crc_errors +
3979                             nstats->rx_frame_errors +
3980                             nstats->rx_fifo_errors +
3981                             nstats->rx_missed_errors;
3982
3983         nstats->tx_aborted_errors =
3984                 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3985                 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3986         nstats->tx_carrier_errors =
3987                 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
3988         nstats->tx_fifo_errors = 0;
3989         nstats->tx_heartbeat_errors = 0;
3990         nstats->tx_window_errors = 0;
3991
3992         nstats->tx_errors = nstats->tx_aborted_errors +
3993                             nstats->tx_carrier_errors +
3994             bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3995 }
3996
3997 static void bnx2x_drv_stats_update(struct bnx2x *bp)
3998 {
3999         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4000         int i;
4001
4002         estats->driver_xoff = 0;
4003         estats->rx_err_discard_pkt = 0;
4004         estats->rx_skb_alloc_failed = 0;
4005         estats->hw_csum_err = 0;
4006         for_each_queue(bp, i) {
4007                 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4008
4009                 estats->driver_xoff += qstats->driver_xoff;
4010                 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4011                 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4012                 estats->hw_csum_err += qstats->hw_csum_err;
4013         }
4014 }
4015
4016 static void bnx2x_stats_update(struct bnx2x *bp)
4017 {
4018         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4019
4020         if (*stats_comp != DMAE_COMP_VAL)
4021                 return;
4022
4023         if (bp->port.pmf)
4024                 bnx2x_hw_stats_update(bp);
4025
4026         if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4027                 BNX2X_ERR("storm stats were not updated for 3 times\n");
4028                 bnx2x_panic();
4029                 return;
4030         }
4031
4032         bnx2x_net_stats_update(bp);
4033         bnx2x_drv_stats_update(bp);
4034
4035         if (bp->msglevel & NETIF_MSG_TIMER) {
4036                 struct tstorm_per_client_stats *old_tclient =
4037                                                         &bp->fp->old_tclient;
4038                 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4039                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4040                 struct net_device_stats *nstats = &bp->dev->stats;
4041                 int i;
4042
4043                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4044                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
4045                                   "  tx pkt (%lx)\n",
4046                        bnx2x_tx_avail(bp->fp),
4047                        le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
4048                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
4049                                   "  rx pkt (%lx)\n",
4050                        (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
4051                              bp->fp->rx_comp_cons),
4052                        le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
4053                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u  "
4054                                   "brb truncate %u\n",
4055                        (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4056                        qstats->driver_xoff,
4057                        estats->brb_drop_lo, estats->brb_truncate_lo);
4058                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
4059                         "packets_too_big_discard %lu  no_buff_discard %lu  "
4060                         "mac_discard %u  mac_filter_discard %u  "
4061                         "xxovrflow_discard %u  brb_truncate_discard %u  "
4062                         "ttl0_discard %u\n",
4063                        le32_to_cpu(old_tclient->checksum_discard),
4064                        bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4065                        bnx2x_hilo(&qstats->no_buff_discard_hi),
4066                        estats->mac_discard, estats->mac_filter_discard,
4067                        estats->xxoverflow_discard, estats->brb_truncate_discard,
4068                        le32_to_cpu(old_tclient->ttl0_discard));
4069
4070                 for_each_queue(bp, i) {
4071                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4072                                bnx2x_fp(bp, i, tx_pkt),
4073                                bnx2x_fp(bp, i, rx_pkt),
4074                                bnx2x_fp(bp, i, rx_calls));
4075                 }
4076         }
4077
4078         bnx2x_hw_stats_post(bp);
4079         bnx2x_storm_stats_post(bp);
4080 }
4081
4082 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4083 {
4084         struct dmae_command *dmae;
4085         u32 opcode;
4086         int loader_idx = PMF_DMAE_C(bp);
4087         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4088
4089         bp->executer_idx = 0;
4090
4091         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4092                   DMAE_CMD_C_ENABLE |
4093                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4094 #ifdef __BIG_ENDIAN
4095                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4096 #else
4097                   DMAE_CMD_ENDIANITY_DW_SWAP |
4098 #endif
4099                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4100                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4101
4102         if (bp->port.port_stx) {
4103
4104                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4105                 if (bp->func_stx)
4106                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4107                 else
4108                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4109                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4110                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4111                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4112                 dmae->dst_addr_hi = 0;
4113                 dmae->len = sizeof(struct host_port_stats) >> 2;
4114                 if (bp->func_stx) {
4115                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4116                         dmae->comp_addr_hi = 0;
4117                         dmae->comp_val = 1;
4118                 } else {
4119                         dmae->comp_addr_lo =
4120                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4121                         dmae->comp_addr_hi =
4122                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4123                         dmae->comp_val = DMAE_COMP_VAL;
4124
4125                         *stats_comp = 0;
4126                 }
4127         }
4128
4129         if (bp->func_stx) {
4130
4131                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4132                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4133                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4134                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4135                 dmae->dst_addr_lo = bp->func_stx >> 2;
4136                 dmae->dst_addr_hi = 0;
4137                 dmae->len = sizeof(struct host_func_stats) >> 2;
4138                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4139                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4140                 dmae->comp_val = DMAE_COMP_VAL;
4141
4142                 *stats_comp = 0;
4143         }
4144 }
4145
4146 static void bnx2x_stats_stop(struct bnx2x *bp)
4147 {
4148         int update = 0;
4149
4150         bnx2x_stats_comp(bp);
4151
4152         if (bp->port.pmf)
4153                 update = (bnx2x_hw_stats_update(bp) == 0);
4154
4155         update |= (bnx2x_storm_stats_update(bp) == 0);
4156
4157         if (update) {
4158                 bnx2x_net_stats_update(bp);
4159
4160                 if (bp->port.pmf)
4161                         bnx2x_port_stats_stop(bp);
4162
4163                 bnx2x_hw_stats_post(bp);
4164                 bnx2x_stats_comp(bp);
4165         }
4166 }
4167
4168 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4169 {
4170 }
4171
4172 static const struct {
4173         void (*action)(struct bnx2x *bp);
4174         enum bnx2x_stats_state next_state;
4175 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4176 /* state        event   */
4177 {
4178 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4179 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
4180 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4181 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4182 },
4183 {
4184 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
4185 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
4186 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
4187 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
4188 }
4189 };
4190
4191 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4192 {
4193         enum bnx2x_stats_state state = bp->stats_state;
4194
4195         bnx2x_stats_stm[state][event].action(bp);
4196         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4197
4198         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4199                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4200                    state, event, bp->stats_state);
4201 }
4202
4203 static void bnx2x_timer(unsigned long data)
4204 {
4205         struct bnx2x *bp = (struct bnx2x *) data;
4206
4207         if (!netif_running(bp->dev))
4208                 return;
4209
4210         if (atomic_read(&bp->intr_sem) != 0)
4211                 goto timer_restart;
4212
4213         if (poll) {
4214                 struct bnx2x_fastpath *fp = &bp->fp[0];
4215                 int rc;
4216
4217                 bnx2x_tx_int(fp);
4218                 rc = bnx2x_rx_int(fp, 1000);
4219         }
4220
4221         if (!BP_NOMCP(bp)) {
4222                 int func = BP_FUNC(bp);
4223                 u32 drv_pulse;
4224                 u32 mcp_pulse;
4225
4226                 ++bp->fw_drv_pulse_wr_seq;
4227                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4228                 /* TBD - add SYSTEM_TIME */
4229                 drv_pulse = bp->fw_drv_pulse_wr_seq;
4230                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4231
4232                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4233                              MCP_PULSE_SEQ_MASK);
4234                 /* The delta between driver pulse and mcp response
4235                  * should be 1 (before mcp response) or 0 (after mcp response)
4236                  */
4237                 if ((drv_pulse != mcp_pulse) &&
4238                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4239                         /* someone lost a heartbeat... */
4240                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4241                                   drv_pulse, mcp_pulse);
4242                 }
4243         }
4244
4245         if ((bp->state == BNX2X_STATE_OPEN) ||
4246             (bp->state == BNX2X_STATE_DISABLED))
4247                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4248
4249 timer_restart:
4250         mod_timer(&bp->timer, jiffies + bp->current_interval);
4251 }
4252
4253 /* end of Statistics */
4254
4255 /* nic init */
4256
4257 /*
4258  * nic init service functions
4259  */
4260
4261 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4262 {
4263         int port = BP_PORT(bp);
4264
4265         bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
4266                         USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4267                         sizeof(struct ustorm_status_block)/4);
4268         bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
4269                         CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4270                         sizeof(struct cstorm_status_block)/4);
4271 }
4272
4273 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4274                           dma_addr_t mapping, int sb_id)
4275 {
4276         int port = BP_PORT(bp);
4277         int func = BP_FUNC(bp);
4278         int index;
4279         u64 section;
4280
4281         /* USTORM */
4282         section = ((u64)mapping) + offsetof(struct host_status_block,
4283                                             u_status_block);
4284         sb->u_status_block.status_block_id = sb_id;
4285
4286         REG_WR(bp, BAR_USTRORM_INTMEM +
4287                USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4288         REG_WR(bp, BAR_USTRORM_INTMEM +
4289                ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4290                U64_HI(section));
4291         REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4292                 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4293
4294         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4295                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4296                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4297
4298         /* CSTORM */
4299         section = ((u64)mapping) + offsetof(struct host_status_block,
4300                                             c_status_block);
4301         sb->c_status_block.status_block_id = sb_id;
4302
4303         REG_WR(bp, BAR_CSTRORM_INTMEM +
4304                CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4305         REG_WR(bp, BAR_CSTRORM_INTMEM +
4306                ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4307                U64_HI(section));
4308         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4309                 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4310
4311         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4312                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4313                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4314
4315         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4316 }
4317
4318 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4319 {
4320         int func = BP_FUNC(bp);
4321
4322         bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR +
4323                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4324                         sizeof(struct tstorm_def_status_block)/4);
4325         bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
4326                         USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4327                         sizeof(struct ustorm_def_status_block)/4);
4328         bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
4329                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4330                         sizeof(struct cstorm_def_status_block)/4);
4331         bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR +
4332                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4333                         sizeof(struct xstorm_def_status_block)/4);
4334 }
4335
4336 static void bnx2x_init_def_sb(struct bnx2x *bp,
4337                               struct host_def_status_block *def_sb,
4338                               dma_addr_t mapping, int sb_id)
4339 {
4340         int port = BP_PORT(bp);
4341         int func = BP_FUNC(bp);
4342         int index, val, reg_offset;
4343         u64 section;
4344
4345         /* ATTN */
4346         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4347                                             atten_status_block);
4348         def_sb->atten_status_block.status_block_id = sb_id;
4349
4350         bp->attn_state = 0;
4351
4352         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4353                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4354
4355         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4356                 bp->attn_group[index].sig[0] = REG_RD(bp,
4357                                                      reg_offset + 0x10*index);
4358                 bp->attn_group[index].sig[1] = REG_RD(bp,
4359                                                reg_offset + 0x4 + 0x10*index);
4360                 bp->attn_group[index].sig[2] = REG_RD(bp,
4361                                                reg_offset + 0x8 + 0x10*index);
4362                 bp->attn_group[index].sig[3] = REG_RD(bp,
4363                                                reg_offset + 0xc + 0x10*index);
4364         }
4365
4366         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4367                              HC_REG_ATTN_MSG0_ADDR_L);
4368
4369         REG_WR(bp, reg_offset, U64_LO(section));
4370         REG_WR(bp, reg_offset + 4, U64_HI(section));
4371
4372         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4373
4374         val = REG_RD(bp, reg_offset);
4375         val |= sb_id;
4376         REG_WR(bp, reg_offset, val);
4377
4378         /* USTORM */
4379         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4380                                             u_def_status_block);
4381         def_sb->u_def_status_block.status_block_id = sb_id;
4382
4383         REG_WR(bp, BAR_USTRORM_INTMEM +
4384                USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4385         REG_WR(bp, BAR_USTRORM_INTMEM +
4386                ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4387                U64_HI(section));
4388         REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4389                 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4390
4391         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4392                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4393                          USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4394
4395         /* CSTORM */
4396         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4397                                             c_def_status_block);
4398         def_sb->c_def_status_block.status_block_id = sb_id;
4399
4400         REG_WR(bp, BAR_CSTRORM_INTMEM +
4401                CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4402         REG_WR(bp, BAR_CSTRORM_INTMEM +
4403                ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4404                U64_HI(section));
4405         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4406                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4407
4408         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4409                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4410                          CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4411
4412         /* TSTORM */
4413         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4414                                             t_def_status_block);
4415         def_sb->t_def_status_block.status_block_id = sb_id;
4416
4417         REG_WR(bp, BAR_TSTRORM_INTMEM +
4418                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4419         REG_WR(bp, BAR_TSTRORM_INTMEM +
4420                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4421                U64_HI(section));
4422         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4423                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4424
4425         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4426                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4427                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4428
4429         /* XSTORM */
4430         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4431                                             x_def_status_block);
4432         def_sb->x_def_status_block.status_block_id = sb_id;
4433
4434         REG_WR(bp, BAR_XSTRORM_INTMEM +
4435                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4436         REG_WR(bp, BAR_XSTRORM_INTMEM +
4437                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4438                U64_HI(section));
4439         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4440                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4441
4442         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4443                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4444                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4445
4446         bp->stats_pending = 0;
4447         bp->set_mac_pending = 0;
4448
4449         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4450 }
4451
4452 static void bnx2x_update_coalesce(struct bnx2x *bp)
4453 {
4454         int port = BP_PORT(bp);
4455         int i;
4456
4457         for_each_queue(bp, i) {
4458                 int sb_id = bp->fp[i].sb_id;
4459
4460                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4461                 REG_WR8(bp, BAR_USTRORM_INTMEM +
4462                         USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4463                                                     U_SB_ETH_RX_CQ_INDEX),
4464                         bp->rx_ticks/12);
4465                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4466                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4467                                                      U_SB_ETH_RX_CQ_INDEX),
4468                          (bp->rx_ticks/12) ? 0 : 1);
4469
4470                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4471                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4472                         CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4473                                                     C_SB_ETH_TX_CQ_INDEX),
4474                         bp->tx_ticks/12);
4475                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4476                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4477                                                      C_SB_ETH_TX_CQ_INDEX),
4478                          (bp->tx_ticks/12) ? 0 : 1);
4479         }
4480 }
4481
4482 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4483                                        struct bnx2x_fastpath *fp, int last)
4484 {
4485         int i;
4486
4487         for (i = 0; i < last; i++) {
4488                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4489                 struct sk_buff *skb = rx_buf->skb;
4490
4491                 if (skb == NULL) {
4492                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4493                         continue;
4494                 }
4495
4496                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4497                         pci_unmap_single(bp->pdev,
4498                                          pci_unmap_addr(rx_buf, mapping),
4499                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4500
4501                 dev_kfree_skb(skb);
4502                 rx_buf->skb = NULL;
4503         }
4504 }
4505
4506 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4507 {
4508         int func = BP_FUNC(bp);
4509         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4510                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4511         u16 ring_prod, cqe_ring_prod;
4512         int i, j;
4513
4514         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4515         DP(NETIF_MSG_IFUP,
4516            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4517
4518         if (bp->flags & TPA_ENABLE_FLAG) {
4519
4520                 for_each_rx_queue(bp, j) {
4521                         struct bnx2x_fastpath *fp = &bp->fp[j];
4522
4523                         for (i = 0; i < max_agg_queues; i++) {
4524                                 fp->tpa_pool[i].skb =
4525                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4526                                 if (!fp->tpa_pool[i].skb) {
4527                                         BNX2X_ERR("Failed to allocate TPA "
4528                                                   "skb pool for queue[%d] - "
4529                                                   "disabling TPA on this "
4530                                                   "queue!\n", j);
4531                                         bnx2x_free_tpa_pool(bp, fp, i);
4532                                         fp->disable_tpa = 1;
4533                                         break;
4534                                 }
4535                                 pci_unmap_addr_set((struct sw_rx_bd *)
4536                                                         &bp->fp->tpa_pool[i],
4537                                                    mapping, 0);
4538                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4539                         }
4540                 }
4541         }
4542
4543         for_each_rx_queue(bp, j) {
4544                 struct bnx2x_fastpath *fp = &bp->fp[j];
4545
4546                 fp->rx_bd_cons = 0;
4547                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4548                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4549
4550                 /* "next page" elements initialization */
4551                 /* SGE ring */
4552                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4553                         struct eth_rx_sge *sge;
4554
4555                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4556                         sge->addr_hi =
4557                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4558                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4559                         sge->addr_lo =
4560                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4561                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4562                 }
4563
4564                 bnx2x_init_sge_ring_bit_mask(fp);
4565
4566                 /* RX BD ring */
4567                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4568                         struct eth_rx_bd *rx_bd;
4569
4570                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4571                         rx_bd->addr_hi =
4572                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4573                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4574                         rx_bd->addr_lo =
4575                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4576                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4577                 }
4578
4579                 /* CQ ring */
4580                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4581                         struct eth_rx_cqe_next_page *nextpg;
4582
4583                         nextpg = (struct eth_rx_cqe_next_page *)
4584                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4585                         nextpg->addr_hi =
4586                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4587                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4588                         nextpg->addr_lo =
4589                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4590                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4591                 }
4592
4593                 /* Allocate SGEs and initialize the ring elements */
4594                 for (i = 0, ring_prod = 0;
4595                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4596
4597                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4598                                 BNX2X_ERR("was only able to allocate "
4599                                           "%d rx sges\n", i);
4600                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4601                                 /* Cleanup already allocated elements */
4602                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4603                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4604                                 fp->disable_tpa = 1;
4605                                 ring_prod = 0;
4606                                 break;
4607                         }
4608                         ring_prod = NEXT_SGE_IDX(ring_prod);
4609                 }
4610                 fp->rx_sge_prod = ring_prod;
4611
4612                 /* Allocate BDs and initialize BD ring */
4613                 fp->rx_comp_cons = 0;
4614                 cqe_ring_prod = ring_prod = 0;
4615                 for (i = 0; i < bp->rx_ring_size; i++) {
4616                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4617                                 BNX2X_ERR("was only able to allocate "
4618                                           "%d rx skbs on queue[%d]\n", i, j);
4619                                 fp->eth_q_stats.rx_skb_alloc_failed++;
4620                                 break;
4621                         }
4622                         ring_prod = NEXT_RX_IDX(ring_prod);
4623                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4624                         WARN_ON(ring_prod <= i);
4625                 }
4626
4627                 fp->rx_bd_prod = ring_prod;
4628                 /* must not have more available CQEs than BDs */
4629                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4630                                        cqe_ring_prod);
4631                 fp->rx_pkt = fp->rx_calls = 0;
4632
4633                 /* Warning!
4634                  * this will generate an interrupt (to the TSTORM)
4635                  * must only be done after chip is initialized
4636                  */
4637                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4638                                      fp->rx_sge_prod);
4639                 if (j != 0)
4640                         continue;
4641
4642                 REG_WR(bp, BAR_USTRORM_INTMEM +
4643                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4644                        U64_LO(fp->rx_comp_mapping));
4645                 REG_WR(bp, BAR_USTRORM_INTMEM +
4646                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4647                        U64_HI(fp->rx_comp_mapping));
4648         }
4649 }
4650
4651 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4652 {
4653         int i, j;
4654
4655         for_each_tx_queue(bp, j) {
4656                 struct bnx2x_fastpath *fp = &bp->fp[j];
4657
4658                 for (i = 1; i <= NUM_TX_RINGS; i++) {
4659                         struct eth_tx_bd *tx_bd =
4660                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4661
4662                         tx_bd->addr_hi =
4663                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4664                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4665                         tx_bd->addr_lo =
4666                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4667                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4668                 }
4669
4670                 fp->tx_pkt_prod = 0;
4671                 fp->tx_pkt_cons = 0;
4672                 fp->tx_bd_prod = 0;
4673                 fp->tx_bd_cons = 0;
4674                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4675                 fp->tx_pkt = 0;
4676         }
4677 }
4678
4679 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4680 {
4681         int func = BP_FUNC(bp);
4682
4683         spin_lock_init(&bp->spq_lock);
4684
4685         bp->spq_left = MAX_SPQ_PENDING;
4686         bp->spq_prod_idx = 0;
4687         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4688         bp->spq_prod_bd = bp->spq;
4689         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4690
4691         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4692                U64_LO(bp->spq_mapping));
4693         REG_WR(bp,
4694                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4695                U64_HI(bp->spq_mapping));
4696
4697         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4698                bp->spq_prod_idx);
4699 }
4700
4701 static void bnx2x_init_context(struct bnx2x *bp)
4702 {
4703         int i;
4704
4705         for_each_queue(bp, i) {
4706                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4707                 struct bnx2x_fastpath *fp = &bp->fp[i];
4708                 u8 cl_id = fp->cl_id;
4709                 u8 sb_id = fp->sb_id;
4710
4711                 context->ustorm_st_context.common.sb_index_numbers =
4712                                                 BNX2X_RX_SB_INDEX_NUM;
4713                 context->ustorm_st_context.common.clientId = cl_id;
4714                 context->ustorm_st_context.common.status_block_id = sb_id;
4715                 context->ustorm_st_context.common.flags =
4716                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4717                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4718                 context->ustorm_st_context.common.statistics_counter_id =
4719                                                 cl_id;
4720                 context->ustorm_st_context.common.mc_alignment_log_size =
4721                                                 BNX2X_RX_ALIGN_SHIFT;
4722                 context->ustorm_st_context.common.bd_buff_size =
4723                                                 bp->rx_buf_size;
4724                 context->ustorm_st_context.common.bd_page_base_hi =
4725                                                 U64_HI(fp->rx_desc_mapping);
4726                 context->ustorm_st_context.common.bd_page_base_lo =
4727                                                 U64_LO(fp->rx_desc_mapping);
4728                 if (!fp->disable_tpa) {
4729                         context->ustorm_st_context.common.flags |=
4730                                 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4731                                  USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4732                         context->ustorm_st_context.common.sge_buff_size =
4733                                 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4734                                          (u32)0xffff);
4735                         context->ustorm_st_context.common.sge_page_base_hi =
4736                                                 U64_HI(fp->rx_sge_mapping);
4737                         context->ustorm_st_context.common.sge_page_base_lo =
4738                                                 U64_LO(fp->rx_sge_mapping);
4739                 }
4740
4741                 context->ustorm_ag_context.cdu_usage =
4742                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4743                                                CDU_REGION_NUMBER_UCM_AG,
4744                                                ETH_CONNECTION_TYPE);
4745
4746                 context->xstorm_st_context.tx_bd_page_base_hi =
4747                                                 U64_HI(fp->tx_desc_mapping);
4748                 context->xstorm_st_context.tx_bd_page_base_lo =
4749                                                 U64_LO(fp->tx_desc_mapping);
4750                 context->xstorm_st_context.db_data_addr_hi =
4751                                                 U64_HI(fp->tx_prods_mapping);
4752                 context->xstorm_st_context.db_data_addr_lo =
4753                                                 U64_LO(fp->tx_prods_mapping);
4754                 context->xstorm_st_context.statistics_data = (cl_id |
4755                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4756                 context->cstorm_st_context.sb_index_number =
4757                                                 C_SB_ETH_TX_CQ_INDEX;
4758                 context->cstorm_st_context.status_block_id = sb_id;
4759
4760                 context->xstorm_ag_context.cdu_reserved =
4761                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4762                                                CDU_REGION_NUMBER_XCM_AG,
4763                                                ETH_CONNECTION_TYPE);
4764         }
4765 }
4766
4767 static void bnx2x_init_ind_table(struct bnx2x *bp)
4768 {
4769         int func = BP_FUNC(bp);
4770         int i;
4771
4772         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4773                 return;
4774
4775         DP(NETIF_MSG_IFUP,
4776            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
4777         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4778                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4779                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4780                         bp->fp->cl_id + (i % bp->num_rx_queues));
4781 }
4782
4783 static void bnx2x_set_client_config(struct bnx2x *bp)
4784 {
4785         struct tstorm_eth_client_config tstorm_client = {0};
4786         int port = BP_PORT(bp);
4787         int i;
4788
4789         tstorm_client.mtu = bp->dev->mtu;
4790         tstorm_client.config_flags =
4791                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4792                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
4793 #ifdef BCM_VLAN
4794         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4795                 tstorm_client.config_flags |=
4796                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
4797                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4798         }
4799 #endif
4800
4801         if (bp->flags & TPA_ENABLE_FLAG) {
4802                 tstorm_client.max_sges_for_packet =
4803                         SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4804                 tstorm_client.max_sges_for_packet =
4805                         ((tstorm_client.max_sges_for_packet +
4806                           PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4807                         PAGES_PER_SGE_SHIFT;
4808
4809                 tstorm_client.config_flags |=
4810                                 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4811         }
4812
4813         for_each_queue(bp, i) {
4814                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4815
4816                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4817                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4818                        ((u32 *)&tstorm_client)[0]);
4819                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4820                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4821                        ((u32 *)&tstorm_client)[1]);
4822         }
4823
4824         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4825            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4826 }
4827
4828 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4829 {
4830         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4831         int mode = bp->rx_mode;
4832         int mask = (1 << BP_L_ID(bp));
4833         int func = BP_FUNC(bp);
4834         int i;
4835
4836         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
4837
4838         switch (mode) {
4839         case BNX2X_RX_MODE_NONE: /* no Rx */
4840                 tstorm_mac_filter.ucast_drop_all = mask;
4841                 tstorm_mac_filter.mcast_drop_all = mask;
4842                 tstorm_mac_filter.bcast_drop_all = mask;
4843                 break;
4844
4845         case BNX2X_RX_MODE_NORMAL:
4846                 tstorm_mac_filter.bcast_accept_all = mask;
4847                 break;
4848
4849         case BNX2X_RX_MODE_ALLMULTI:
4850                 tstorm_mac_filter.mcast_accept_all = mask;
4851                 tstorm_mac_filter.bcast_accept_all = mask;
4852                 break;
4853
4854         case BNX2X_RX_MODE_PROMISC:
4855                 tstorm_mac_filter.ucast_accept_all = mask;
4856                 tstorm_mac_filter.mcast_accept_all = mask;
4857                 tstorm_mac_filter.bcast_accept_all = mask;
4858                 break;
4859
4860         default:
4861                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4862                 break;
4863         }
4864
4865         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4866                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4867                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4868                        ((u32 *)&tstorm_mac_filter)[i]);
4869
4870 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4871                    ((u32 *)&tstorm_mac_filter)[i]); */
4872         }
4873
4874         if (mode != BNX2X_RX_MODE_NONE)
4875                 bnx2x_set_client_config(bp);
4876 }
4877
4878 static void bnx2x_init_internal_common(struct bnx2x *bp)
4879 {
4880         int i;
4881
4882         if (bp->flags & TPA_ENABLE_FLAG) {
4883                 struct tstorm_eth_tpa_exist tpa = {0};
4884
4885                 tpa.tpa_exist = 1;
4886
4887                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4888                        ((u32 *)&tpa)[0]);
4889                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4890                        ((u32 *)&tpa)[1]);
4891         }
4892
4893         /* Zero this manually as its initialization is
4894            currently missing in the initTool */
4895         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4896                 REG_WR(bp, BAR_USTRORM_INTMEM +
4897                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4898 }
4899
4900 static void bnx2x_init_internal_port(struct bnx2x *bp)
4901 {
4902         int port = BP_PORT(bp);
4903
4904         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4905         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4906         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4907         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4908 }
4909
4910 /* Calculates the sum of vn_min_rates.
4911    It's needed for further normalizing of the min_rates.
4912    Returns:
4913      sum of vn_min_rates.
4914        or
4915      0 - if all the min_rates are 0.
4916      In the later case fainess algorithm should be deactivated.
4917      If not all min_rates are zero then those that are zeroes will be set to 1.
4918  */
4919 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4920 {
4921         int all_zero = 1;
4922         int port = BP_PORT(bp);
4923         int vn;
4924
4925         bp->vn_weight_sum = 0;
4926         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4927                 int func = 2*vn + port;
4928                 u32 vn_cfg =
4929                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4930                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4931                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4932
4933                 /* Skip hidden vns */
4934                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4935                         continue;
4936
4937                 /* If min rate is zero - set it to 1 */
4938                 if (!vn_min_rate)
4939                         vn_min_rate = DEF_MIN_RATE;
4940                 else
4941                         all_zero = 0;
4942
4943                 bp->vn_weight_sum += vn_min_rate;
4944         }
4945
4946         /* ... only if all min rates are zeros - disable fairness */
4947         if (all_zero)
4948                 bp->vn_weight_sum = 0;
4949 }
4950
4951 static void bnx2x_init_internal_func(struct bnx2x *bp)
4952 {
4953         struct tstorm_eth_function_common_config tstorm_config = {0};
4954         struct stats_indication_flags stats_flags = {0};
4955         int port = BP_PORT(bp);
4956         int func = BP_FUNC(bp);
4957         int i, j;
4958         u32 offset;
4959         u16 max_agg_size;
4960
4961         if (is_multi(bp)) {
4962                 tstorm_config.config_flags = MULTI_FLAGS(bp);
4963                 tstorm_config.rss_result_mask = MULTI_MASK;
4964         }
4965         if (IS_E1HMF(bp))
4966                 tstorm_config.config_flags |=
4967                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
4968
4969         tstorm_config.leading_client_id = BP_L_ID(bp);
4970
4971         REG_WR(bp, BAR_TSTRORM_INTMEM +
4972                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4973                (*(u32 *)&tstorm_config));
4974
4975         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4976         bnx2x_set_storm_rx_mode(bp);
4977
4978         for_each_queue(bp, i) {
4979                 u8 cl_id = bp->fp[i].cl_id;
4980
4981                 /* reset xstorm per client statistics */
4982                 offset = BAR_XSTRORM_INTMEM +
4983                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4984                 for (j = 0;
4985                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4986                         REG_WR(bp, offset + j*4, 0);
4987
4988                 /* reset tstorm per client statistics */
4989                 offset = BAR_TSTRORM_INTMEM +
4990                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4991                 for (j = 0;
4992                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4993                         REG_WR(bp, offset + j*4, 0);
4994
4995                 /* reset ustorm per client statistics */
4996                 offset = BAR_USTRORM_INTMEM +
4997                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4998                 for (j = 0;
4999                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5000                         REG_WR(bp, offset + j*4, 0);
5001         }
5002
5003         /* Init statistics related context */
5004         stats_flags.collect_eth = 1;
5005
5006         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5007                ((u32 *)&stats_flags)[0]);
5008         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5009                ((u32 *)&stats_flags)[1]);
5010
5011         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5012                ((u32 *)&stats_flags)[0]);
5013         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5014                ((u32 *)&stats_flags)[1]);
5015
5016         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5017                ((u32 *)&stats_flags)[0]);
5018         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5019                ((u32 *)&stats_flags)[1]);
5020
5021         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5022                ((u32 *)&stats_flags)[0]);
5023         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5024                ((u32 *)&stats_flags)[1]);
5025
5026         REG_WR(bp, BAR_XSTRORM_INTMEM +
5027                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5028                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5029         REG_WR(bp, BAR_XSTRORM_INTMEM +
5030                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5031                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5032
5033         REG_WR(bp, BAR_TSTRORM_INTMEM +
5034                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5035                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5036         REG_WR(bp, BAR_TSTRORM_INTMEM +
5037                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5038                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5039
5040         REG_WR(bp, BAR_USTRORM_INTMEM +
5041                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5042                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5043         REG_WR(bp, BAR_USTRORM_INTMEM +
5044                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5045                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5046
5047         if (CHIP_IS_E1H(bp)) {
5048                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5049                         IS_E1HMF(bp));
5050                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5051                         IS_E1HMF(bp));
5052                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5053                         IS_E1HMF(bp));
5054                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5055                         IS_E1HMF(bp));
5056
5057                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5058                          bp->e1hov);
5059         }
5060
5061         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5062         max_agg_size =
5063                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5064                           SGE_PAGE_SIZE * PAGES_PER_SGE),
5065                     (u32)0xffff);
5066         for_each_rx_queue(bp, i) {
5067                 struct bnx2x_fastpath *fp = &bp->fp[i];
5068
5069                 REG_WR(bp, BAR_USTRORM_INTMEM +
5070                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5071                        U64_LO(fp->rx_comp_mapping));
5072                 REG_WR(bp, BAR_USTRORM_INTMEM +
5073                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5074                        U64_HI(fp->rx_comp_mapping));
5075
5076                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5077                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5078                          max_agg_size);
5079         }
5080
5081         /* dropless flow control */
5082         if (CHIP_IS_E1H(bp)) {
5083                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5084
5085                 rx_pause.bd_thr_low = 250;
5086                 rx_pause.cqe_thr_low = 250;
5087                 rx_pause.cos = 1;
5088                 rx_pause.sge_thr_low = 0;
5089                 rx_pause.bd_thr_high = 350;
5090                 rx_pause.cqe_thr_high = 350;
5091                 rx_pause.sge_thr_high = 0;
5092
5093                 for_each_rx_queue(bp, i) {
5094                         struct bnx2x_fastpath *fp = &bp->fp[i];
5095
5096                         if (!fp->disable_tpa) {
5097                                 rx_pause.sge_thr_low = 150;
5098                                 rx_pause.sge_thr_high = 250;
5099                         }
5100
5101
5102                         offset = BAR_USTRORM_INTMEM +
5103                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5104                                                                    fp->cl_id);
5105                         for (j = 0;
5106                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5107                              j++)
5108                                 REG_WR(bp, offset + j*4,
5109                                        ((u32 *)&rx_pause)[j]);
5110                 }
5111         }
5112
5113         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5114
5115         /* Init rate shaping and fairness contexts */
5116         if (IS_E1HMF(bp)) {
5117                 int vn;
5118
5119                 /* During init there is no active link
5120                    Until link is up, set link rate to 10Gbps */
5121                 bp->link_vars.line_speed = SPEED_10000;
5122                 bnx2x_init_port_minmax(bp);
5123
5124                 bnx2x_calc_vn_weight_sum(bp);
5125
5126                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5127                         bnx2x_init_vn_minmax(bp, 2*vn + port);
5128
5129                 /* Enable rate shaping and fairness */
5130                 bp->cmng.flags.cmng_enables =
5131                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5132                 if (bp->vn_weight_sum)
5133                         bp->cmng.flags.cmng_enables |=
5134                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5135                 else
5136                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5137                            "  fairness will be disabled\n");
5138         } else {
5139                 /* rate shaping and fairness are disabled */
5140                 DP(NETIF_MSG_IFUP,
5141                    "single function mode  minmax will be disabled\n");
5142         }
5143
5144
5145         /* Store it to internal memory */
5146         if (bp->port.pmf)
5147                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5148                         REG_WR(bp, BAR_XSTRORM_INTMEM +
5149                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5150                                ((u32 *)(&bp->cmng))[i]);
5151 }
5152
5153 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5154 {
5155         switch (load_code) {
5156         case FW_MSG_CODE_DRV_LOAD_COMMON:
5157                 bnx2x_init_internal_common(bp);
5158                 /* no break */
5159
5160         case FW_MSG_CODE_DRV_LOAD_PORT:
5161                 bnx2x_init_internal_port(bp);
5162                 /* no break */
5163
5164         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5165                 bnx2x_init_internal_func(bp);
5166                 break;
5167
5168         default:
5169                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5170                 break;
5171         }
5172 }
5173
5174 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5175 {
5176         int i;
5177
5178         for_each_queue(bp, i) {
5179                 struct bnx2x_fastpath *fp = &bp->fp[i];
5180
5181                 fp->bp = bp;
5182                 fp->state = BNX2X_FP_STATE_CLOSED;
5183                 fp->index = i;
5184                 fp->cl_id = BP_L_ID(bp) + i;
5185                 fp->sb_id = fp->cl_id;
5186                 DP(NETIF_MSG_IFUP,
5187                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
5188                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5189                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5190                               fp->sb_id);
5191                 bnx2x_update_fpsb_idx(fp);
5192         }
5193
5194         /* ensure status block indices were read */
5195         rmb();
5196
5197
5198         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5199                           DEF_SB_ID);
5200         bnx2x_update_dsb_idx(bp);
5201         bnx2x_update_coalesce(bp);
5202         bnx2x_init_rx_rings(bp);
5203         bnx2x_init_tx_ring(bp);
5204         bnx2x_init_sp_ring(bp);
5205         bnx2x_init_context(bp);
5206         bnx2x_init_internal(bp, load_code);
5207         bnx2x_init_ind_table(bp);
5208         bnx2x_stats_init(bp);
5209
5210         /* At this point, we are ready for interrupts */
5211         atomic_set(&bp->intr_sem, 0);
5212
5213         /* flush all before enabling interrupts */
5214         mb();
5215         mmiowb();
5216
5217         bnx2x_int_enable(bp);
5218
5219         /* Check for SPIO5 */
5220         bnx2x_attn_int_deasserted0(bp,
5221                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5222                                    AEU_INPUTS_ATTN_BITS_SPIO5);
5223 }
5224
5225 /* end of nic init */
5226
5227 /*
5228  * gzip service functions
5229  */
5230
5231 static int bnx2x_gunzip_init(struct bnx2x *bp)
5232 {
5233         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5234                                               &bp->gunzip_mapping);
5235         if (bp->gunzip_buf  == NULL)
5236                 goto gunzip_nomem1;
5237
5238         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5239         if (bp->strm  == NULL)
5240                 goto gunzip_nomem2;
5241
5242         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5243                                       GFP_KERNEL);
5244         if (bp->strm->workspace == NULL)
5245                 goto gunzip_nomem3;
5246
5247         return 0;
5248
5249 gunzip_nomem3:
5250         kfree(bp->strm);
5251         bp->strm = NULL;
5252
5253 gunzip_nomem2:
5254         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5255                             bp->gunzip_mapping);
5256         bp->gunzip_buf = NULL;
5257
5258 gunzip_nomem1:
5259         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5260                " un-compression\n", bp->dev->name);
5261         return -ENOMEM;
5262 }
5263
5264 static void bnx2x_gunzip_end(struct bnx2x *bp)
5265 {
5266         kfree(bp->strm->workspace);
5267
5268         kfree(bp->strm);
5269         bp->strm = NULL;
5270
5271         if (bp->gunzip_buf) {
5272                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5273                                     bp->gunzip_mapping);
5274                 bp->gunzip_buf = NULL;
5275         }
5276 }
5277
5278 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5279 {
5280         int n, rc;
5281
5282         /* check gzip header */
5283         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5284                 BNX2X_ERR("Bad gzip header\n");
5285                 return -EINVAL;
5286         }
5287
5288         n = 10;
5289
5290 #define FNAME                           0x8
5291
5292         if (zbuf[3] & FNAME)
5293                 while ((zbuf[n++] != 0) && (n < len));
5294
5295         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5296         bp->strm->avail_in = len - n;
5297         bp->strm->next_out = bp->gunzip_buf;
5298         bp->strm->avail_out = FW_BUF_SIZE;
5299
5300         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5301         if (rc != Z_OK)
5302                 return rc;
5303
5304         rc = zlib_inflate(bp->strm, Z_FINISH);
5305         if ((rc != Z_OK) && (rc != Z_STREAM_END))
5306                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5307                        bp->dev->name, bp->strm->msg);
5308
5309         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5310         if (bp->gunzip_outlen & 0x3)
5311                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5312                                     " gunzip_outlen (%d) not aligned\n",
5313                        bp->dev->name, bp->gunzip_outlen);
5314         bp->gunzip_outlen >>= 2;
5315
5316         zlib_inflateEnd(bp->strm);
5317
5318         if (rc == Z_STREAM_END)
5319                 return 0;
5320
5321         return rc;
5322 }
5323
5324 /* nic load/unload */
5325
5326 /*
5327  * General service functions
5328  */
5329
5330 /* send a NIG loopback debug packet */
5331 static void bnx2x_lb_pckt(struct bnx2x *bp)
5332 {
5333         u32 wb_write[3];
5334
5335         /* Ethernet source and destination addresses */
5336         wb_write[0] = 0x55555555;
5337         wb_write[1] = 0x55555555;
5338         wb_write[2] = 0x20;             /* SOP */
5339         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5340
5341         /* NON-IP protocol */
5342         wb_write[0] = 0x09000000;
5343         wb_write[1] = 0x55555555;
5344         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
5345         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5346 }
5347
5348 /* some of the internal memories
5349  * are not directly readable from the driver
5350  * to test them we send debug packets
5351  */
5352 static int bnx2x_int_mem_test(struct bnx2x *bp)
5353 {
5354         int factor;
5355         int count, i;
5356         u32 val = 0;
5357
5358         if (CHIP_REV_IS_FPGA(bp))
5359                 factor = 120;
5360         else if (CHIP_REV_IS_EMUL(bp))
5361                 factor = 200;
5362         else
5363                 factor = 1;
5364
5365         DP(NETIF_MSG_HW, "start part1\n");
5366
5367         /* Disable inputs of parser neighbor blocks */
5368         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5369         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5370         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5371         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5372
5373         /*  Write 0 to parser credits for CFC search request */
5374         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5375
5376         /* send Ethernet packet */
5377         bnx2x_lb_pckt(bp);
5378
5379         /* TODO do i reset NIG statistic? */
5380         /* Wait until NIG register shows 1 packet of size 0x10 */
5381         count = 1000 * factor;
5382         while (count) {
5383
5384                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5385                 val = *bnx2x_sp(bp, wb_data[0]);
5386                 if (val == 0x10)
5387                         break;
5388
5389                 msleep(10);
5390                 count--;
5391         }
5392         if (val != 0x10) {
5393                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5394                 return -1;
5395         }
5396
5397         /* Wait until PRS register shows 1 packet */
5398         count = 1000 * factor;
5399         while (count) {
5400                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5401                 if (val == 1)
5402                         break;
5403
5404                 msleep(10);
5405                 count--;
5406         }
5407         if (val != 0x1) {
5408                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5409                 return -2;
5410         }
5411
5412         /* Reset and init BRB, PRS */
5413         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5414         msleep(50);
5415         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5416         msleep(50);
5417         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5418         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5419
5420         DP(NETIF_MSG_HW, "part2\n");
5421
5422         /* Disable inputs of parser neighbor blocks */
5423         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5424         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5425         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5426         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5427
5428         /* Write 0 to parser credits for CFC search request */
5429         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5430
5431         /* send 10 Ethernet packets */
5432         for (i = 0; i < 10; i++)
5433                 bnx2x_lb_pckt(bp);
5434
5435         /* Wait until NIG register shows 10 + 1
5436            packets of size 11*0x10 = 0xb0 */
5437         count = 1000 * factor;
5438         while (count) {
5439
5440                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5441                 val = *bnx2x_sp(bp, wb_data[0]);
5442                 if (val == 0xb0)
5443                         break;
5444
5445                 msleep(10);
5446                 count--;
5447         }
5448         if (val != 0xb0) {
5449                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5450                 return -3;
5451         }
5452
5453         /* Wait until PRS register shows 2 packets */
5454         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5455         if (val != 2)
5456                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5457
5458         /* Write 1 to parser credits for CFC search request */
5459         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5460
5461         /* Wait until PRS register shows 3 packets */
5462         msleep(10 * factor);
5463         /* Wait until NIG register shows 1 packet of size 0x10 */
5464         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5465         if (val != 3)
5466                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5467
5468         /* clear NIG EOP FIFO */
5469         for (i = 0; i < 11; i++)
5470                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5471         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5472         if (val != 1) {
5473                 BNX2X_ERR("clear of NIG failed\n");
5474                 return -4;
5475         }
5476
5477         /* Reset and init BRB, PRS, NIG */
5478         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5479         msleep(50);
5480         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5481         msleep(50);
5482         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5483         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5484 #ifndef BCM_ISCSI
5485         /* set NIC mode */
5486         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5487 #endif
5488
5489         /* Enable inputs of parser neighbor blocks */
5490         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5491         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5492         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5493         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5494
5495         DP(NETIF_MSG_HW, "done\n");
5496
5497         return 0; /* OK */
5498 }
5499
5500 static void enable_blocks_attention(struct bnx2x *bp)
5501 {
5502         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5503         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5504         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5505         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5506         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5507         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5508         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5509         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5510         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5511 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5512 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5513         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5514         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5515         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5516 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5517 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5518         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5519         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5520         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5521         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5522 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5523 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5524         if (CHIP_REV_IS_FPGA(bp))
5525                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5526         else
5527                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5528         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5529         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5530         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5531 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5532 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5533         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5534         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5535 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5536         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5537 }
5538
5539
5540 static void bnx2x_reset_common(struct bnx2x *bp)
5541 {
5542         /* reset_common */
5543         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5544                0xd3ffff7f);
5545         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5546 }
5547
5548
5549 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5550 {
5551         u32 val;
5552         u8 port;
5553         u8 is_required = 0;
5554
5555         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5556               SHARED_HW_CFG_FAN_FAILURE_MASK;
5557
5558         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5559                 is_required = 1;
5560
5561         /*
5562          * The fan failure mechanism is usually related to the PHY type since
5563          * the power consumption of the board is affected by the PHY. Currently,
5564          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5565          */
5566         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5567                 for (port = PORT_0; port < PORT_MAX; port++) {
5568                         u32 phy_type =
5569                                 SHMEM_RD(bp, dev_info.port_hw_config[port].
5570                                          external_phy_config) &
5571                                 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
5572                         is_required |=
5573                                 ((phy_type ==
5574                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
5575                                  (phy_type ==
5576                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
5577                                  (phy_type ==
5578                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
5579                 }
5580
5581         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
5582
5583         if (is_required == 0)
5584                 return;
5585
5586         /* Fan failure is indicated by SPIO 5 */
5587         bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5588                        MISC_REGISTERS_SPIO_INPUT_HI_Z);
5589
5590         /* set to active low mode */
5591         val = REG_RD(bp, MISC_REG_SPIO_INT);
5592         val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5593                                 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5594         REG_WR(bp, MISC_REG_SPIO_INT, val);
5595
5596         /* enable interrupt to signal the IGU */
5597         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5598         val |= (1 << MISC_REGISTERS_SPIO_5);
5599         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5600 }
5601
5602 static int bnx2x_init_common(struct bnx2x *bp)
5603 {
5604         u32 val, i;
5605
5606         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5607
5608         bnx2x_reset_common(bp);
5609         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5610         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5611
5612         bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
5613         if (CHIP_IS_E1H(bp))
5614                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5615
5616         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5617         msleep(30);
5618         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5619
5620         bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
5621         if (CHIP_IS_E1(bp)) {
5622                 /* enable HW interrupt from PXP on USDM overflow
5623                    bit 16 on INT_MASK_0 */
5624                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5625         }
5626
5627         bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
5628         bnx2x_init_pxp(bp);
5629
5630 #ifdef __BIG_ENDIAN
5631         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5632         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5633         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5634         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5635         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5636         /* make sure this value is 0 */
5637         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5638
5639 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5640         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5641         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5642         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5643         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5644 #endif
5645
5646         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5647 #ifdef BCM_ISCSI
5648         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5649         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5650         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5651 #endif
5652
5653         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5654                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5655
5656         /* let the HW do it's magic ... */
5657         msleep(100);
5658         /* finish PXP init */
5659         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5660         if (val != 1) {
5661                 BNX2X_ERR("PXP2 CFG failed\n");
5662                 return -EBUSY;
5663         }
5664         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5665         if (val != 1) {
5666                 BNX2X_ERR("PXP2 RD_INIT failed\n");
5667                 return -EBUSY;
5668         }
5669
5670         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5671         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5672
5673         bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
5674
5675         /* clean the DMAE memory */
5676         bp->dmae_ready = 1;
5677         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5678
5679         bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5680         bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5681         bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5682         bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
5683
5684         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5685         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5686         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5687         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5688
5689         bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
5690         /* soft reset pulse */
5691         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5692         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5693
5694 #ifdef BCM_ISCSI
5695         bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
5696 #endif
5697
5698         bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
5699         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5700         if (!CHIP_REV_IS_SLOW(bp)) {
5701                 /* enable hw interrupt from doorbell Q */
5702                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5703         }
5704
5705         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5706         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5707         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5708         /* set NIC mode */
5709         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5710         if (CHIP_IS_E1H(bp))
5711                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5712
5713         bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5714         bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5715         bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5716         bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
5717
5718         bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5719         bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5720         bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5721         bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5722
5723         bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5724         bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5725         bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5726         bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
5727
5728         /* sync semi rtc */
5729         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5730                0x80000000);
5731         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5732                0x80000000);
5733
5734         bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5735         bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5736         bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
5737
5738         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5739         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5740                 REG_WR(bp, i, 0xc0cac01a);
5741                 /* TODO: replace with something meaningful */
5742         }
5743         bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
5744         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5745
5746         if (sizeof(union cdu_context) != 1024)
5747                 /* we currently assume that a context is 1024 bytes */
5748                 printk(KERN_ALERT PFX "please adjust the size of"
5749                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5750
5751         bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
5752         val = (4 << 24) + (0 << 12) + 1024;
5753         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5754         if (CHIP_IS_E1(bp)) {
5755                 /* !!! fix pxp client crdit until excel update */
5756                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5757                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5758         }
5759
5760         bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
5761         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5762         /* enable context validation interrupt from CFC */
5763         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5764
5765         /* set the thresholds to prevent CFC/CDU race */
5766         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5767
5768         bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
5769         bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
5770
5771         /* PXPCS COMMON comes here */
5772         bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
5773         /* Reset PCIE errors for debug */
5774         REG_WR(bp, 0x2814, 0xffffffff);
5775         REG_WR(bp, 0x3820, 0xffffffff);
5776
5777         /* EMAC0 COMMON comes here */
5778         bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
5779         /* EMAC1 COMMON comes here */
5780         bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
5781         /* DBU COMMON comes here */
5782         bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
5783         /* DBG COMMON comes here */
5784         bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
5785
5786         bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
5787         if (CHIP_IS_E1H(bp)) {
5788                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5789                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5790         }
5791
5792         if (CHIP_REV_IS_SLOW(bp))
5793                 msleep(200);
5794
5795         /* finish CFC init */
5796         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5797         if (val != 1) {
5798                 BNX2X_ERR("CFC LL_INIT failed\n");
5799                 return -EBUSY;
5800         }
5801         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5802         if (val != 1) {
5803                 BNX2X_ERR("CFC AC_INIT failed\n");
5804                 return -EBUSY;
5805         }
5806         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5807         if (val != 1) {
5808                 BNX2X_ERR("CFC CAM_INIT failed\n");
5809                 return -EBUSY;
5810         }
5811         REG_WR(bp, CFC_REG_DEBUG0, 0);
5812
5813         /* read NIG statistic
5814            to see if this is our first up since powerup */
5815         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5816         val = *bnx2x_sp(bp, wb_data[0]);
5817
5818         /* do internal memory self test */
5819         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5820                 BNX2X_ERR("internal mem self test failed\n");
5821                 return -EBUSY;
5822         }
5823
5824         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5825         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5826         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5827         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5828         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
5829                 bp->port.need_hw_lock = 1;
5830                 break;
5831
5832         default:
5833                 break;
5834         }
5835
5836         bnx2x_setup_fan_failure_detection(bp);
5837
5838         /* clear PXP2 attentions */
5839         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5840
5841         enable_blocks_attention(bp);
5842
5843         if (!BP_NOMCP(bp)) {
5844                 bnx2x_acquire_phy_lock(bp);
5845                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5846                 bnx2x_release_phy_lock(bp);
5847         } else
5848                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5849
5850         return 0;
5851 }
5852
5853 static int bnx2x_init_port(struct bnx2x *bp)
5854 {
5855         int port = BP_PORT(bp);
5856         int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
5857         u32 low, high;
5858         u32 val;
5859
5860         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
5861
5862         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5863
5864         /* Port PXP comes here */
5865         bnx2x_init_block(bp, PXP_BLOCK, init_stage);
5866         /* Port PXP2 comes here */
5867         bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
5868 #ifdef BCM_ISCSI
5869         /* Port0  1
5870          * Port1  385 */
5871         i++;
5872         wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5873         wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5874         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5875         REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5876
5877         /* Port0  2
5878          * Port1  386 */
5879         i++;
5880         wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5881         wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5882         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5883         REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5884
5885         /* Port0  3
5886          * Port1  387 */
5887         i++;
5888         wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5889         wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5890         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5891         REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5892 #endif
5893         /* Port CMs come here */
5894         bnx2x_init_block(bp, XCM_BLOCK, init_stage);
5895
5896         /* Port QM comes here */
5897 #ifdef BCM_ISCSI
5898         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5899         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5900
5901         bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
5902 #endif
5903         /* Port DQ comes here */
5904         bnx2x_init_block(bp, DQ_BLOCK, init_stage);
5905
5906         bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5907         if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5908                 /* no pause for emulation and FPGA */
5909                 low = 0;
5910                 high = 513;
5911         } else {
5912                 if (IS_E1HMF(bp))
5913                         low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5914                 else if (bp->dev->mtu > 4096) {
5915                         if (bp->flags & ONE_PORT_FLAG)
5916                                 low = 160;
5917                         else {
5918                                 val = bp->dev->mtu;
5919                                 /* (24*1024 + val*4)/256 */
5920                                 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5921                         }
5922                 } else
5923                         low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5924                 high = low + 56;        /* 14*1024/256 */
5925         }
5926         REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5927         REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5928
5929
5930         /* Port PRS comes here */
5931         bnx2x_init_block(bp, PRS_BLOCK, init_stage);
5932         /* Port TSDM comes here */
5933         bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
5934         /* Port CSDM comes here */
5935         bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
5936         /* Port USDM comes here */
5937         bnx2x_init_block(bp, USDM_BLOCK, init_stage);
5938         /* Port XSDM comes here */
5939         bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
5940
5941         bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5942         bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5943         bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5944         bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
5945
5946         /* Port UPB comes here */
5947         bnx2x_init_block(bp, UPB_BLOCK, init_stage);
5948         /* Port XPB comes here */
5949         bnx2x_init_block(bp, XPB_BLOCK, init_stage);
5950
5951         bnx2x_init_block(bp, PBF_BLOCK, init_stage);
5952
5953         /* configure PBF to work without PAUSE mtu 9000 */
5954         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5955
5956         /* update threshold */
5957         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5958         /* update init credit */
5959         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5960
5961         /* probe changes */
5962         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5963         msleep(5);
5964         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5965
5966 #ifdef BCM_ISCSI
5967         /* tell the searcher where the T2 table is */
5968         REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5969
5970         wb_write[0] = U64_LO(bp->t2_mapping);
5971         wb_write[1] = U64_HI(bp->t2_mapping);
5972         REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5973         wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5974         wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5975         REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5976
5977         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5978         /* Port SRCH comes here */
5979 #endif
5980         /* Port CDU comes here */
5981         bnx2x_init_block(bp, CDU_BLOCK, init_stage);
5982         /* Port CFC comes here */
5983         bnx2x_init_block(bp, CFC_BLOCK, init_stage);
5984
5985         if (CHIP_IS_E1(bp)) {
5986                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5987                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5988         }
5989         bnx2x_init_block(bp, HC_BLOCK, init_stage);
5990
5991         bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
5992         /* init aeu_mask_attn_func_0/1:
5993          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5994          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5995          *             bits 4-7 are used for "per vn group attention" */
5996         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5997                (IS_E1HMF(bp) ? 0xF7 : 0x7));
5998
5999         /* Port PXPCS comes here */
6000         bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6001         /* Port EMAC0 comes here */
6002         bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6003         /* Port EMAC1 comes here */
6004         bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6005         /* Port DBU comes here */
6006         bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6007         /* Port DBG comes here */
6008         bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6009
6010         bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6011
6012         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6013
6014         if (CHIP_IS_E1H(bp)) {
6015                 /* 0x2 disable e1hov, 0x1 enable */
6016                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6017                        (IS_E1HMF(bp) ? 0x1 : 0x2));
6018
6019                 /* support pause requests from USDM, TSDM and BRB */
6020                 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
6021
6022                 {
6023                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6024                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6025                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6026                 }
6027         }
6028
6029         /* Port MCP comes here */
6030         bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6031         /* Port DMAE comes here */
6032         bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6033
6034         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6035         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6036                 {
6037                 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6038
6039                 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6040                                MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6041
6042                 /* The GPIO should be swapped if the swap register is
6043                    set and active */
6044                 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6045                 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6046
6047                 /* Select function upon port-swap configuration */
6048                 if (port == 0) {
6049                         offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6050                         aeu_gpio_mask = (swap_val && swap_override) ?
6051                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6052                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6053                 } else {
6054                         offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6055                         aeu_gpio_mask = (swap_val && swap_override) ?
6056                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6057                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6058                 }
6059                 val = REG_RD(bp, offset);
6060                 /* add GPIO3 to group */
6061                 val |= aeu_gpio_mask;
6062                 REG_WR(bp, offset, val);
6063                 }
6064                 break;
6065
6066         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6067         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6068                 /* add SPIO 5 to group 0 */
6069                 {
6070                 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6071                                        MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6072                 val = REG_RD(bp, reg_addr);
6073                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6074                 REG_WR(bp, reg_addr, val);
6075                 }
6076                 break;
6077
6078         default:
6079                 break;
6080         }
6081
6082         bnx2x__link_reset(bp);
6083
6084         return 0;
6085 }
6086
6087 #define ILT_PER_FUNC            (768/2)
6088 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
6089 /* the phys address is shifted right 12 bits and has an added
6090    1=valid bit added to the 53rd bit
6091    then since this is a wide register(TM)
6092    we split it into two 32 bit writes
6093  */
6094 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6095 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
6096 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
6097 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
6098
6099 #define CNIC_ILT_LINES          0
6100
6101 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6102 {
6103         int reg;
6104
6105         if (CHIP_IS_E1H(bp))
6106                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6107         else /* E1 */
6108                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6109
6110         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6111 }
6112
6113 static int bnx2x_init_func(struct bnx2x *bp)
6114 {
6115         int port = BP_PORT(bp);
6116         int func = BP_FUNC(bp);
6117         u32 addr, val;
6118         int i;
6119
6120         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
6121
6122         /* set MSI reconfigure capability */
6123         addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6124         val = REG_RD(bp, addr);
6125         val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6126         REG_WR(bp, addr, val);
6127
6128         i = FUNC_ILT_BASE(func);
6129
6130         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6131         if (CHIP_IS_E1H(bp)) {
6132                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6133                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6134         } else /* E1 */
6135                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6136                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6137
6138
6139         if (CHIP_IS_E1H(bp)) {
6140                 for (i = 0; i < 9; i++)
6141                         bnx2x_init_block(bp,
6142                                          cm_blocks[i], FUNC0_STAGE + func);
6143
6144                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6145                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6146         }
6147
6148         /* HC init per function */
6149         if (CHIP_IS_E1H(bp)) {
6150                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6151
6152                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6153                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6154         }
6155         bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
6156
6157         /* Reset PCIE errors for debug */
6158         REG_WR(bp, 0x2114, 0xffffffff);
6159         REG_WR(bp, 0x2120, 0xffffffff);
6160
6161         return 0;
6162 }
6163
6164 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6165 {
6166         int i, rc = 0;
6167
6168         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
6169            BP_FUNC(bp), load_code);
6170
6171         bp->dmae_ready = 0;
6172         mutex_init(&bp->dmae_mutex);
6173         bnx2x_gunzip_init(bp);
6174
6175         switch (load_code) {
6176         case FW_MSG_CODE_DRV_LOAD_COMMON:
6177                 rc = bnx2x_init_common(bp);
6178                 if (rc)
6179                         goto init_hw_err;
6180                 /* no break */
6181
6182         case FW_MSG_CODE_DRV_LOAD_PORT:
6183                 bp->dmae_ready = 1;
6184                 rc = bnx2x_init_port(bp);
6185                 if (rc)
6186                         goto init_hw_err;
6187                 /* no break */
6188
6189         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6190                 bp->dmae_ready = 1;
6191                 rc = bnx2x_init_func(bp);
6192                 if (rc)
6193                         goto init_hw_err;
6194                 break;
6195
6196         default:
6197                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6198                 break;
6199         }
6200
6201         if (!BP_NOMCP(bp)) {
6202                 int func = BP_FUNC(bp);
6203
6204                 bp->fw_drv_pulse_wr_seq =
6205                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6206                                  DRV_PULSE_SEQ_MASK);
6207                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6208                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x  func_stx 0x%x\n",
6209                    bp->fw_drv_pulse_wr_seq, bp->func_stx);
6210         } else
6211                 bp->func_stx = 0;
6212
6213         /* this needs to be done before gunzip end */
6214         bnx2x_zero_def_sb(bp);
6215         for_each_queue(bp, i)
6216                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6217
6218 init_hw_err:
6219         bnx2x_gunzip_end(bp);
6220
6221         return rc;
6222 }
6223
6224 /* send the MCP a request, block until there is a reply */
6225 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6226 {
6227         int func = BP_FUNC(bp);
6228         u32 seq = ++bp->fw_seq;
6229         u32 rc = 0;
6230         u32 cnt = 1;
6231         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
6232
6233         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
6234         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
6235
6236         do {
6237                 /* let the FW do it's magic ... */
6238                 msleep(delay);
6239
6240                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
6241
6242                 /* Give the FW up to 2 second (200*10ms) */
6243         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6244
6245         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6246            cnt*delay, rc, seq);
6247
6248         /* is this a reply to our command? */
6249         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6250                 rc &= FW_MSG_CODE_MASK;
6251
6252         } else {
6253                 /* FW BUG! */
6254                 BNX2X_ERR("FW failed to respond!\n");
6255                 bnx2x_fw_dump(bp);
6256                 rc = 0;
6257         }
6258
6259         return rc;
6260 }
6261
6262 static void bnx2x_free_mem(struct bnx2x *bp)
6263 {
6264
6265 #define BNX2X_PCI_FREE(x, y, size) \
6266         do { \
6267                 if (x) { \
6268                         pci_free_consistent(bp->pdev, size, x, y); \
6269                         x = NULL; \
6270                         y = 0; \
6271                 } \
6272         } while (0)
6273
6274 #define BNX2X_FREE(x) \
6275         do { \
6276                 if (x) { \
6277                         vfree(x); \
6278                         x = NULL; \
6279                 } \
6280         } while (0)
6281
6282         int i;
6283
6284         /* fastpath */
6285         /* Common */
6286         for_each_queue(bp, i) {
6287
6288                 /* status blocks */
6289                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6290                                bnx2x_fp(bp, i, status_blk_mapping),
6291                                sizeof(struct host_status_block) +
6292                                sizeof(struct eth_tx_db_data));
6293         }
6294         /* Rx */
6295         for_each_rx_queue(bp, i) {
6296
6297                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6298                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6299                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6300                                bnx2x_fp(bp, i, rx_desc_mapping),
6301                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
6302
6303                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6304                                bnx2x_fp(bp, i, rx_comp_mapping),
6305                                sizeof(struct eth_fast_path_rx_cqe) *
6306                                NUM_RCQ_BD);
6307
6308                 /* SGE ring */
6309                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6310                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6311                                bnx2x_fp(bp, i, rx_sge_mapping),
6312                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6313         }
6314         /* Tx */
6315         for_each_tx_queue(bp, i) {
6316
6317                 /* fastpath tx rings: tx_buf tx_desc */
6318                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6319                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6320                                bnx2x_fp(bp, i, tx_desc_mapping),
6321                                sizeof(struct eth_tx_bd) * NUM_TX_BD);
6322         }
6323         /* end of fastpath */
6324
6325         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6326                        sizeof(struct host_def_status_block));
6327
6328         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6329                        sizeof(struct bnx2x_slowpath));
6330
6331 #ifdef BCM_ISCSI
6332         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6333         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6334         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6335         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6336 #endif
6337         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6338
6339 #undef BNX2X_PCI_FREE
6340 #undef BNX2X_KFREE
6341 }
6342
6343 static int bnx2x_alloc_mem(struct bnx2x *bp)
6344 {
6345
6346 #define BNX2X_PCI_ALLOC(x, y, size) \
6347         do { \
6348                 x = pci_alloc_consistent(bp->pdev, size, y); \
6349                 if (x == NULL) \
6350                         goto alloc_mem_err; \
6351                 memset(x, 0, size); \
6352         } while (0)
6353
6354 #define BNX2X_ALLOC(x, size) \
6355         do { \
6356                 x = vmalloc(size); \
6357                 if (x == NULL) \
6358                         goto alloc_mem_err; \
6359                 memset(x, 0, size); \
6360         } while (0)
6361
6362         int i;
6363
6364         /* fastpath */
6365         /* Common */
6366         for_each_queue(bp, i) {
6367                 bnx2x_fp(bp, i, bp) = bp;
6368
6369                 /* status blocks */
6370                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6371                                 &bnx2x_fp(bp, i, status_blk_mapping),
6372                                 sizeof(struct host_status_block) +
6373                                 sizeof(struct eth_tx_db_data));
6374         }
6375         /* Rx */
6376         for_each_rx_queue(bp, i) {
6377
6378                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6379                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6380                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6381                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6382                                 &bnx2x_fp(bp, i, rx_desc_mapping),
6383                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6384
6385                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6386                                 &bnx2x_fp(bp, i, rx_comp_mapping),
6387                                 sizeof(struct eth_fast_path_rx_cqe) *
6388                                 NUM_RCQ_BD);
6389
6390                 /* SGE ring */
6391                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6392                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6393                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6394                                 &bnx2x_fp(bp, i, rx_sge_mapping),
6395                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6396         }
6397         /* Tx */
6398         for_each_tx_queue(bp, i) {
6399
6400                 bnx2x_fp(bp, i, hw_tx_prods) =
6401                                 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6402
6403                 bnx2x_fp(bp, i, tx_prods_mapping) =
6404                                 bnx2x_fp(bp, i, status_blk_mapping) +
6405                                 sizeof(struct host_status_block);
6406
6407                 /* fastpath tx rings: tx_buf tx_desc */
6408                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6409                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6410                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6411                                 &bnx2x_fp(bp, i, tx_desc_mapping),
6412                                 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6413         }
6414         /* end of fastpath */
6415
6416         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6417                         sizeof(struct host_def_status_block));
6418
6419         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6420                         sizeof(struct bnx2x_slowpath));
6421
6422 #ifdef BCM_ISCSI
6423         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6424
6425         /* Initialize T1 */
6426         for (i = 0; i < 64*1024; i += 64) {
6427                 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6428                 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6429         }
6430
6431         /* allocate searcher T2 table
6432            we allocate 1/4 of alloc num for T2
6433           (which is not entered into the ILT) */
6434         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6435
6436         /* Initialize T2 */
6437         for (i = 0; i < 16*1024; i += 64)
6438                 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6439
6440         /* now fixup the last line in the block to point to the next block */
6441         *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6442
6443         /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6444         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6445
6446         /* QM queues (128*MAX_CONN) */
6447         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6448 #endif
6449
6450         /* Slow path ring */
6451         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6452
6453         return 0;
6454
6455 alloc_mem_err:
6456         bnx2x_free_mem(bp);
6457         return -ENOMEM;
6458
6459 #undef BNX2X_PCI_ALLOC
6460 #undef BNX2X_ALLOC
6461 }
6462
6463 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6464 {
6465         int i;
6466
6467         for_each_tx_queue(bp, i) {
6468                 struct bnx2x_fastpath *fp = &bp->fp[i];
6469
6470                 u16 bd_cons = fp->tx_bd_cons;
6471                 u16 sw_prod = fp->tx_pkt_prod;
6472                 u16 sw_cons = fp->tx_pkt_cons;
6473
6474                 while (sw_cons != sw_prod) {
6475                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6476                         sw_cons++;
6477                 }
6478         }
6479 }
6480
6481 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6482 {
6483         int i, j;
6484
6485         for_each_rx_queue(bp, j) {
6486                 struct bnx2x_fastpath *fp = &bp->fp[j];
6487
6488                 for (i = 0; i < NUM_RX_BD; i++) {
6489                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6490                         struct sk_buff *skb = rx_buf->skb;
6491
6492                         if (skb == NULL)
6493                                 continue;
6494
6495                         pci_unmap_single(bp->pdev,
6496                                          pci_unmap_addr(rx_buf, mapping),
6497                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6498
6499                         rx_buf->skb = NULL;
6500                         dev_kfree_skb(skb);
6501                 }
6502                 if (!fp->disable_tpa)
6503                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6504                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
6505                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
6506         }
6507 }
6508
6509 static void bnx2x_free_skbs(struct bnx2x *bp)
6510 {
6511         bnx2x_free_tx_skbs(bp);
6512         bnx2x_free_rx_skbs(bp);
6513 }
6514
6515 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6516 {
6517         int i, offset = 1;
6518
6519         free_irq(bp->msix_table[0].vector, bp->dev);
6520         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6521            bp->msix_table[0].vector);
6522
6523         for_each_queue(bp, i) {
6524                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
6525                    "state %x\n", i, bp->msix_table[i + offset].vector,
6526                    bnx2x_fp(bp, i, state));
6527
6528                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6529         }
6530 }
6531
6532 static void bnx2x_free_irq(struct bnx2x *bp)
6533 {
6534         if (bp->flags & USING_MSIX_FLAG) {
6535                 bnx2x_free_msix_irqs(bp);
6536                 pci_disable_msix(bp->pdev);
6537                 bp->flags &= ~USING_MSIX_FLAG;
6538
6539         } else if (bp->flags & USING_MSI_FLAG) {
6540                 free_irq(bp->pdev->irq, bp->dev);
6541                 pci_disable_msi(bp->pdev);
6542                 bp->flags &= ~USING_MSI_FLAG;
6543
6544         } else
6545                 free_irq(bp->pdev->irq, bp->dev);
6546 }
6547
6548 static int bnx2x_enable_msix(struct bnx2x *bp)
6549 {
6550         int i, rc, offset = 1;
6551         int igu_vec = 0;
6552
6553         bp->msix_table[0].entry = igu_vec;
6554         DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6555
6556         for_each_queue(bp, i) {
6557                 igu_vec = BP_L_ID(bp) + offset + i;
6558                 bp->msix_table[i + offset].entry = igu_vec;
6559                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6560                    "(fastpath #%u)\n", i + offset, igu_vec, i);
6561         }
6562
6563         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6564                              BNX2X_NUM_QUEUES(bp) + offset);
6565         if (rc) {
6566                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
6567                 return rc;
6568         }
6569
6570         bp->flags |= USING_MSIX_FLAG;
6571
6572         return 0;
6573 }
6574
6575 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6576 {
6577         int i, rc, offset = 1;
6578
6579         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6580                          bp->dev->name, bp->dev);
6581         if (rc) {
6582                 BNX2X_ERR("request sp irq failed\n");
6583                 return -EBUSY;
6584         }
6585
6586         for_each_queue(bp, i) {
6587                 struct bnx2x_fastpath *fp = &bp->fp[i];
6588
6589                 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
6590                 rc = request_irq(bp->msix_table[i + offset].vector,
6591                                  bnx2x_msix_fp_int, 0, fp->name, fp);
6592                 if (rc) {
6593                         BNX2X_ERR("request fp #%d irq failed  rc %d\n", i, rc);
6594                         bnx2x_free_msix_irqs(bp);
6595                         return -EBUSY;
6596                 }
6597
6598                 fp->state = BNX2X_FP_STATE_IRQ;
6599         }
6600
6601         i = BNX2X_NUM_QUEUES(bp);
6602         if (is_multi(bp))
6603                 printk(KERN_INFO PFX
6604                        "%s: using MSI-X  IRQs: sp %d  fp %d - %d\n",
6605                        bp->dev->name, bp->msix_table[0].vector,
6606                        bp->msix_table[offset].vector,
6607                        bp->msix_table[offset + i - 1].vector);
6608         else
6609                 printk(KERN_INFO PFX "%s: using MSI-X  IRQs: sp %d  fp %d\n",
6610                        bp->dev->name, bp->msix_table[0].vector,
6611                        bp->msix_table[offset + i - 1].vector);
6612
6613         return 0;
6614 }
6615
6616 static int bnx2x_enable_msi(struct bnx2x *bp)
6617 {
6618         int rc;
6619
6620         rc = pci_enable_msi(bp->pdev);
6621         if (rc) {
6622                 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6623                 return -1;
6624         }
6625         bp->flags |= USING_MSI_FLAG;
6626
6627         return 0;
6628 }
6629
6630 static int bnx2x_req_irq(struct bnx2x *bp)
6631 {
6632         unsigned long flags;
6633         int rc;
6634
6635         if (bp->flags & USING_MSI_FLAG)
6636                 flags = 0;
6637         else
6638                 flags = IRQF_SHARED;
6639
6640         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
6641                          bp->dev->name, bp->dev);
6642         if (!rc)
6643                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6644
6645         return rc;
6646 }
6647
6648 static void bnx2x_napi_enable(struct bnx2x *bp)
6649 {
6650         int i;
6651
6652         for_each_rx_queue(bp, i)
6653                 napi_enable(&bnx2x_fp(bp, i, napi));
6654 }
6655
6656 static void bnx2x_napi_disable(struct bnx2x *bp)
6657 {
6658         int i;
6659
6660         for_each_rx_queue(bp, i)
6661                 napi_disable(&bnx2x_fp(bp, i, napi));
6662 }
6663
6664 static void bnx2x_netif_start(struct bnx2x *bp)
6665 {
6666         int intr_sem;
6667
6668         intr_sem = atomic_dec_and_test(&bp->intr_sem);
6669         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
6670
6671         if (intr_sem) {
6672                 if (netif_running(bp->dev)) {
6673                         bnx2x_napi_enable(bp);
6674                         bnx2x_int_enable(bp);
6675                         if (bp->state == BNX2X_STATE_OPEN)
6676                                 netif_tx_wake_all_queues(bp->dev);
6677                 }
6678         }
6679 }
6680
6681 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6682 {
6683         bnx2x_int_disable_sync(bp, disable_hw);
6684         bnx2x_napi_disable(bp);
6685         netif_tx_disable(bp->dev);
6686         bp->dev->trans_start = jiffies; /* prevent tx timeout */
6687 }
6688
6689 /*
6690  * Init service functions
6691  */
6692
6693 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6694 {
6695         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6696         int port = BP_PORT(bp);
6697
6698         /* CAM allocation
6699          * unicasts 0-31:port0 32-63:port1
6700          * multicast 64-127:port0 128-191:port1
6701          */
6702         config->hdr.length = 2;
6703         config->hdr.offset = port ? 32 : 0;
6704         config->hdr.client_id = bp->fp->cl_id;
6705         config->hdr.reserved1 = 0;
6706
6707         /* primary MAC */
6708         config->config_table[0].cam_entry.msb_mac_addr =
6709                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6710         config->config_table[0].cam_entry.middle_mac_addr =
6711                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6712         config->config_table[0].cam_entry.lsb_mac_addr =
6713                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6714         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6715         if (set)
6716                 config->config_table[0].target_table_entry.flags = 0;
6717         else
6718                 CAM_INVALIDATE(config->config_table[0]);
6719         config->config_table[0].target_table_entry.client_id = 0;
6720         config->config_table[0].target_table_entry.vlan_id = 0;
6721
6722         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6723            (set ? "setting" : "clearing"),
6724            config->config_table[0].cam_entry.msb_mac_addr,
6725            config->config_table[0].cam_entry.middle_mac_addr,
6726            config->config_table[0].cam_entry.lsb_mac_addr);
6727
6728         /* broadcast */
6729         config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
6730         config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
6731         config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
6732         config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6733         if (set)
6734                 config->config_table[1].target_table_entry.flags =
6735                                 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6736         else
6737                 CAM_INVALIDATE(config->config_table[1]);
6738         config->config_table[1].target_table_entry.client_id = 0;
6739         config->config_table[1].target_table_entry.vlan_id = 0;
6740
6741         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6742                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6743                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6744 }
6745
6746 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6747 {
6748         struct mac_configuration_cmd_e1h *config =
6749                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6750
6751         if (set && (bp->state != BNX2X_STATE_OPEN)) {
6752                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6753                 return;
6754         }
6755
6756         /* CAM allocation for E1H
6757          * unicasts: by func number
6758          * multicast: 20+FUNC*20, 20 each
6759          */
6760         config->hdr.length = 1;
6761         config->hdr.offset = BP_FUNC(bp);
6762         config->hdr.client_id = bp->fp->cl_id;
6763         config->hdr.reserved1 = 0;
6764
6765         /* primary MAC */
6766         config->config_table[0].msb_mac_addr =
6767                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6768         config->config_table[0].middle_mac_addr =
6769                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6770         config->config_table[0].lsb_mac_addr =
6771                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6772         config->config_table[0].client_id = BP_L_ID(bp);
6773         config->config_table[0].vlan_id = 0;
6774         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6775         if (set)
6776                 config->config_table[0].flags = BP_PORT(bp);
6777         else
6778                 config->config_table[0].flags =
6779                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6780
6781         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID %d\n",
6782            (set ? "setting" : "clearing"),
6783            config->config_table[0].msb_mac_addr,
6784            config->config_table[0].middle_mac_addr,
6785            config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6786
6787         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6788                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6789                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6790 }
6791
6792 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6793                              int *state_p, int poll)
6794 {
6795         /* can take a while if any port is running */
6796         int cnt = 5000;
6797
6798         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6799            poll ? "polling" : "waiting", state, idx);
6800
6801         might_sleep();
6802         while (cnt--) {
6803                 if (poll) {
6804                         bnx2x_rx_int(bp->fp, 10);
6805                         /* if index is different from 0
6806                          * the reply for some commands will
6807                          * be on the non default queue
6808                          */
6809                         if (idx)
6810                                 bnx2x_rx_int(&bp->fp[idx], 10);
6811                 }
6812
6813                 mb(); /* state is changed by bnx2x_sp_event() */
6814                 if (*state_p == state) {
6815 #ifdef BNX2X_STOP_ON_ERROR
6816                         DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
6817 #endif
6818                         return 0;
6819                 }
6820
6821                 msleep(1);
6822         }
6823
6824         /* timeout! */
6825         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6826                   poll ? "polling" : "waiting", state, idx);
6827 #ifdef BNX2X_STOP_ON_ERROR
6828         bnx2x_panic();
6829 #endif
6830
6831         return -EBUSY;
6832 }
6833
6834 static int bnx2x_setup_leading(struct bnx2x *bp)
6835 {
6836         int rc;
6837
6838         /* reset IGU state */
6839         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6840
6841         /* SETUP ramrod */
6842         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6843
6844         /* Wait for completion */
6845         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6846
6847         return rc;
6848 }
6849
6850 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6851 {
6852         struct bnx2x_fastpath *fp = &bp->fp[index];
6853
6854         /* reset IGU state */
6855         bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6856
6857         /* SETUP ramrod */
6858         fp->state = BNX2X_FP_STATE_OPENING;
6859         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6860                       fp->cl_id, 0);
6861
6862         /* Wait for completion */
6863         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6864                                  &(fp->state), 0);
6865 }
6866
6867 static int bnx2x_poll(struct napi_struct *napi, int budget);
6868
6869 static void bnx2x_set_int_mode(struct bnx2x *bp)
6870 {
6871         int num_queues;
6872
6873         switch (int_mode) {
6874         case INT_MODE_INTx:
6875         case INT_MODE_MSI:
6876                 num_queues = 1;
6877                 bp->num_rx_queues = num_queues;
6878                 bp->num_tx_queues = num_queues;
6879                 DP(NETIF_MSG_IFUP,
6880                    "set number of queues to %d\n", num_queues);
6881                 break;
6882
6883         case INT_MODE_MSIX:
6884         default:
6885                 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6886                         num_queues = min_t(u32, num_online_cpus(),
6887                                            BNX2X_MAX_QUEUES(bp));
6888                 else
6889                         num_queues = 1;
6890                 bp->num_rx_queues = num_queues;
6891                 bp->num_tx_queues = num_queues;
6892                 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6893                    "  number of tx queues to %d\n",
6894                    bp->num_rx_queues, bp->num_tx_queues);
6895                 /* if we can't use MSI-X we only need one fp,
6896                  * so try to enable MSI-X with the requested number of fp's
6897                  * and fallback to MSI or legacy INTx with one fp
6898                  */
6899                 if (bnx2x_enable_msix(bp)) {
6900                         /* failed to enable MSI-X */
6901                         num_queues = 1;
6902                         bp->num_rx_queues = num_queues;
6903                         bp->num_tx_queues = num_queues;
6904                         if (bp->multi_mode)
6905                                 BNX2X_ERR("Multi requested but failed to "
6906                                           "enable MSI-X  set number of "
6907                                           "queues to %d\n", num_queues);
6908                 }
6909                 break;
6910         }
6911         bp->dev->real_num_tx_queues = bp->num_tx_queues;
6912 }
6913
6914 static void bnx2x_set_rx_mode(struct net_device *dev);
6915
6916 /* must be called with rtnl_lock */
6917 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6918 {
6919         u32 load_code;
6920         int i, rc = 0;
6921 #ifdef BNX2X_STOP_ON_ERROR
6922         DP(NETIF_MSG_IFUP, "enter  load_mode %d\n", load_mode);
6923         if (unlikely(bp->panic))
6924                 return -EPERM;
6925 #endif
6926
6927         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6928
6929         bnx2x_set_int_mode(bp);
6930
6931         if (bnx2x_alloc_mem(bp))
6932                 return -ENOMEM;
6933
6934         for_each_rx_queue(bp, i)
6935                 bnx2x_fp(bp, i, disable_tpa) =
6936                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
6937
6938         for_each_rx_queue(bp, i)
6939                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6940                                bnx2x_poll, 128);
6941
6942 #ifdef BNX2X_STOP_ON_ERROR
6943         for_each_rx_queue(bp, i) {
6944                 struct bnx2x_fastpath *fp = &bp->fp[i];
6945
6946                 fp->poll_no_work = 0;
6947                 fp->poll_calls = 0;
6948                 fp->poll_max_calls = 0;
6949                 fp->poll_complete = 0;
6950                 fp->poll_exit = 0;
6951         }
6952 #endif
6953         bnx2x_napi_enable(bp);
6954
6955         if (bp->flags & USING_MSIX_FLAG) {
6956                 rc = bnx2x_req_msix_irqs(bp);
6957                 if (rc) {
6958                         pci_disable_msix(bp->pdev);
6959                         goto load_error1;
6960                 }
6961         } else {
6962                 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6963                         bnx2x_enable_msi(bp);
6964                 bnx2x_ack_int(bp);
6965                 rc = bnx2x_req_irq(bp);
6966                 if (rc) {
6967                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
6968                         if (bp->flags & USING_MSI_FLAG)
6969                                 pci_disable_msi(bp->pdev);
6970                         goto load_error1;
6971                 }
6972                 if (bp->flags & USING_MSI_FLAG) {
6973                         bp->dev->irq = bp->pdev->irq;
6974                         printk(KERN_INFO PFX "%s: using MSI  IRQ %d\n",
6975                                bp->dev->name, bp->pdev->irq);
6976                 }
6977         }
6978
6979         /* Send LOAD_REQUEST command to MCP
6980            Returns the type of LOAD command:
6981            if it is the first port to be initialized
6982            common blocks should be initialized, otherwise - not
6983         */
6984         if (!BP_NOMCP(bp)) {
6985                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6986                 if (!load_code) {
6987                         BNX2X_ERR("MCP response failure, aborting\n");
6988                         rc = -EBUSY;
6989                         goto load_error2;
6990                 }
6991                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6992                         rc = -EBUSY; /* other port in diagnostic mode */
6993                         goto load_error2;
6994                 }
6995
6996         } else {
6997                 int port = BP_PORT(bp);
6998
6999                 DP(NETIF_MSG_IFUP, "NO MCP - load counts      %d, %d, %d\n",
7000                    load_count[0], load_count[1], load_count[2]);
7001                 load_count[0]++;
7002                 load_count[1 + port]++;
7003                 DP(NETIF_MSG_IFUP, "NO MCP - new load counts  %d, %d, %d\n",
7004                    load_count[0], load_count[1], load_count[2]);
7005                 if (load_count[0] == 1)
7006                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7007                 else if (load_count[1 + port] == 1)
7008                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7009                 else
7010                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7011         }
7012
7013         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7014             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7015                 bp->port.pmf = 1;
7016         else
7017                 bp->port.pmf = 0;
7018         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7019
7020         /* Initialize HW */
7021         rc = bnx2x_init_hw(bp, load_code);
7022         if (rc) {
7023                 BNX2X_ERR("HW init failed, aborting\n");
7024                 goto load_error2;
7025         }
7026
7027         /* Setup NIC internals and enable interrupts */
7028         bnx2x_nic_init(bp, load_code);
7029
7030         /* Send LOAD_DONE command to MCP */
7031         if (!BP_NOMCP(bp)) {
7032                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7033                 if (!load_code) {
7034                         BNX2X_ERR("MCP response failure, aborting\n");
7035                         rc = -EBUSY;
7036                         goto load_error3;
7037                 }
7038         }
7039
7040         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7041
7042         rc = bnx2x_setup_leading(bp);
7043         if (rc) {
7044                 BNX2X_ERR("Setup leading failed!\n");
7045                 goto load_error3;
7046         }
7047
7048         if (CHIP_IS_E1H(bp))
7049                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
7050                         DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
7051                         bp->state = BNX2X_STATE_DISABLED;
7052                 }
7053
7054         if (bp->state == BNX2X_STATE_OPEN)
7055                 for_each_nondefault_queue(bp, i) {
7056                         rc = bnx2x_setup_multi(bp, i);
7057                         if (rc)
7058                                 goto load_error3;
7059                 }
7060
7061         if (CHIP_IS_E1(bp))
7062                 bnx2x_set_mac_addr_e1(bp, 1);
7063         else
7064                 bnx2x_set_mac_addr_e1h(bp, 1);
7065
7066         if (bp->port.pmf)
7067                 bnx2x_initial_phy_init(bp, load_mode);
7068
7069         /* Start fast path */
7070         switch (load_mode) {
7071         case LOAD_NORMAL:
7072                 /* Tx queue should be only reenabled */
7073                 netif_tx_wake_all_queues(bp->dev);
7074                 /* Initialize the receive filter. */
7075                 bnx2x_set_rx_mode(bp->dev);
7076                 break;
7077
7078         case LOAD_OPEN:
7079                 netif_tx_start_all_queues(bp->dev);
7080                 /* Initialize the receive filter. */
7081                 bnx2x_set_rx_mode(bp->dev);
7082                 break;
7083
7084         case LOAD_DIAG:
7085                 /* Initialize the receive filter. */
7086                 bnx2x_set_rx_mode(bp->dev);
7087                 bp->state = BNX2X_STATE_DIAG;
7088                 break;
7089
7090         default:
7091                 break;
7092         }
7093
7094         if (!bp->port.pmf)
7095                 bnx2x__link_status_update(bp);
7096
7097         /* start the timer */
7098         mod_timer(&bp->timer, jiffies + bp->current_interval);
7099
7100
7101         return 0;
7102
7103 load_error3:
7104         bnx2x_int_disable_sync(bp, 1);
7105         if (!BP_NOMCP(bp)) {
7106                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7107                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7108         }
7109         bp->port.pmf = 0;
7110         /* Free SKBs, SGEs, TPA pool and driver internals */
7111         bnx2x_free_skbs(bp);
7112         for_each_rx_queue(bp, i)
7113                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7114 load_error2:
7115         /* Release IRQs */
7116         bnx2x_free_irq(bp);
7117 load_error1:
7118         bnx2x_napi_disable(bp);
7119         for_each_rx_queue(bp, i)
7120                 netif_napi_del(&bnx2x_fp(bp, i, napi));
7121         bnx2x_free_mem(bp);
7122
7123         return rc;
7124 }
7125
7126 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7127 {
7128         struct bnx2x_fastpath *fp = &bp->fp[index];
7129         int rc;
7130
7131         /* halt the connection */
7132         fp->state = BNX2X_FP_STATE_HALTING;
7133         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7134
7135         /* Wait for completion */
7136         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7137                                &(fp->state), 1);
7138         if (rc) /* timeout */
7139                 return rc;
7140
7141         /* delete cfc entry */
7142         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7143
7144         /* Wait for completion */
7145         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7146                                &(fp->state), 1);
7147         return rc;
7148 }
7149
7150 static int bnx2x_stop_leading(struct bnx2x *bp)
7151 {
7152         __le16 dsb_sp_prod_idx;
7153         /* if the other port is handling traffic,
7154            this can take a lot of time */
7155         int cnt = 500;
7156         int rc;
7157
7158         might_sleep();
7159
7160         /* Send HALT ramrod */
7161         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7162         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7163
7164         /* Wait for completion */
7165         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7166                                &(bp->fp[0].state), 1);
7167         if (rc) /* timeout */
7168                 return rc;
7169
7170         dsb_sp_prod_idx = *bp->dsb_sp_prod;
7171
7172         /* Send PORT_DELETE ramrod */
7173         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7174
7175         /* Wait for completion to arrive on default status block
7176            we are going to reset the chip anyway
7177            so there is not much to do if this times out
7178          */
7179         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7180                 if (!cnt) {
7181                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7182                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7183                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
7184 #ifdef BNX2X_STOP_ON_ERROR
7185                         bnx2x_panic();
7186 #endif
7187                         rc = -EBUSY;
7188                         break;
7189                 }
7190                 cnt--;
7191                 msleep(1);
7192                 rmb(); /* Refresh the dsb_sp_prod */
7193         }
7194         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7195         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7196
7197         return rc;
7198 }
7199
7200 static void bnx2x_reset_func(struct bnx2x *bp)
7201 {
7202         int port = BP_PORT(bp);
7203         int func = BP_FUNC(bp);
7204         int base, i;
7205
7206         /* Configure IGU */
7207         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7208         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7209
7210         /* Clear ILT */
7211         base = FUNC_ILT_BASE(func);
7212         for (i = base; i < base + ILT_PER_FUNC; i++)
7213                 bnx2x_ilt_wr(bp, i, 0);
7214 }
7215
7216 static void bnx2x_reset_port(struct bnx2x *bp)
7217 {
7218         int port = BP_PORT(bp);
7219         u32 val;
7220
7221         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7222
7223         /* Do not rcv packets to BRB */
7224         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7225         /* Do not direct rcv packets that are not for MCP to the BRB */
7226         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7227                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7228
7229         /* Configure AEU */
7230         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7231
7232         msleep(100);
7233         /* Check for BRB port occupancy */
7234         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7235         if (val)
7236                 DP(NETIF_MSG_IFDOWN,
7237                    "BRB1 is not empty  %d blocks are occupied\n", val);
7238
7239         /* TODO: Close Doorbell port? */
7240 }
7241
7242 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7243 {
7244         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
7245            BP_FUNC(bp), reset_code);
7246
7247         switch (reset_code) {
7248         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7249                 bnx2x_reset_port(bp);
7250                 bnx2x_reset_func(bp);
7251                 bnx2x_reset_common(bp);
7252                 break;
7253
7254         case FW_MSG_CODE_DRV_UNLOAD_PORT:
7255                 bnx2x_reset_port(bp);
7256                 bnx2x_reset_func(bp);
7257                 break;
7258
7259         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7260                 bnx2x_reset_func(bp);
7261                 break;
7262
7263         default:
7264                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7265                 break;
7266         }
7267 }
7268
7269 /* must be called with rtnl_lock */
7270 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7271 {
7272         int port = BP_PORT(bp);
7273         u32 reset_code = 0;
7274         int i, cnt, rc;
7275
7276         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7277
7278         bp->rx_mode = BNX2X_RX_MODE_NONE;
7279         bnx2x_set_storm_rx_mode(bp);
7280
7281         bnx2x_netif_stop(bp, 1);
7282
7283         del_timer_sync(&bp->timer);
7284         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7285                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7286         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7287
7288         /* Release IRQs */
7289         bnx2x_free_irq(bp);
7290
7291         /* Wait until tx fastpath tasks complete */
7292         for_each_tx_queue(bp, i) {
7293                 struct bnx2x_fastpath *fp = &bp->fp[i];
7294
7295                 cnt = 1000;
7296                 while (bnx2x_has_tx_work_unload(fp)) {
7297
7298                         bnx2x_tx_int(fp);
7299                         if (!cnt) {
7300                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
7301                                           i);
7302 #ifdef BNX2X_STOP_ON_ERROR
7303                                 bnx2x_panic();
7304                                 return -EBUSY;
7305 #else
7306                                 break;
7307 #endif
7308                         }
7309                         cnt--;
7310                         msleep(1);
7311                 }
7312         }
7313         /* Give HW time to discard old tx messages */
7314         msleep(1);
7315
7316         if (CHIP_IS_E1(bp)) {
7317                 struct mac_configuration_cmd *config =
7318                                                 bnx2x_sp(bp, mcast_config);
7319
7320                 bnx2x_set_mac_addr_e1(bp, 0);
7321
7322                 for (i = 0; i < config->hdr.length; i++)
7323                         CAM_INVALIDATE(config->config_table[i]);
7324
7325                 config->hdr.length = i;
7326                 if (CHIP_REV_IS_SLOW(bp))
7327                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7328                 else
7329                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7330                 config->hdr.client_id = bp->fp->cl_id;
7331                 config->hdr.reserved1 = 0;
7332
7333                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7334                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7335                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7336
7337         } else { /* E1H */
7338                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7339
7340                 bnx2x_set_mac_addr_e1h(bp, 0);
7341
7342                 for (i = 0; i < MC_HASH_SIZE; i++)
7343                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7344         }
7345
7346         if (unload_mode == UNLOAD_NORMAL)
7347                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7348
7349         else if (bp->flags & NO_WOL_FLAG) {
7350                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7351                 if (CHIP_IS_E1H(bp))
7352                         REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7353
7354         } else if (bp->wol) {
7355                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7356                 u8 *mac_addr = bp->dev->dev_addr;
7357                 u32 val;
7358                 /* The mac address is written to entries 1-4 to
7359                    preserve entry 0 which is used by the PMF */
7360                 u8 entry = (BP_E1HVN(bp) + 1)*8;
7361
7362                 val = (mac_addr[0] << 8) | mac_addr[1];
7363                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7364
7365                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7366                       (mac_addr[4] << 8) | mac_addr[5];
7367                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7368
7369                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7370
7371         } else
7372                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7373
7374         /* Close multi and leading connections
7375            Completions for ramrods are collected in a synchronous way */
7376         for_each_nondefault_queue(bp, i)
7377                 if (bnx2x_stop_multi(bp, i))
7378                         goto unload_error;
7379
7380         rc = bnx2x_stop_leading(bp);
7381         if (rc) {
7382                 BNX2X_ERR("Stop leading failed!\n");
7383 #ifdef BNX2X_STOP_ON_ERROR
7384                 return -EBUSY;
7385 #else
7386                 goto unload_error;
7387 #endif
7388         }
7389
7390 unload_error:
7391         if (!BP_NOMCP(bp))
7392                 reset_code = bnx2x_fw_command(bp, reset_code);
7393         else {
7394                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts      %d, %d, %d\n",
7395                    load_count[0], load_count[1], load_count[2]);
7396                 load_count[0]--;
7397                 load_count[1 + port]--;
7398                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts  %d, %d, %d\n",
7399                    load_count[0], load_count[1], load_count[2]);
7400                 if (load_count[0] == 0)
7401                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7402                 else if (load_count[1 + port] == 0)
7403                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7404                 else
7405                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7406         }
7407
7408         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7409             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7410                 bnx2x__link_reset(bp);
7411
7412         /* Reset the chip */
7413         bnx2x_reset_chip(bp, reset_code);
7414
7415         /* Report UNLOAD_DONE to MCP */
7416         if (!BP_NOMCP(bp))
7417                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7418
7419         bp->port.pmf = 0;
7420
7421         /* Free SKBs, SGEs, TPA pool and driver internals */
7422         bnx2x_free_skbs(bp);
7423         for_each_rx_queue(bp, i)
7424                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7425         for_each_rx_queue(bp, i)
7426                 netif_napi_del(&bnx2x_fp(bp, i, napi));
7427         bnx2x_free_mem(bp);
7428
7429         bp->state = BNX2X_STATE_CLOSED;
7430
7431         netif_carrier_off(bp->dev);
7432
7433         return 0;
7434 }
7435
7436 static void bnx2x_reset_task(struct work_struct *work)
7437 {
7438         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7439
7440 #ifdef BNX2X_STOP_ON_ERROR
7441         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7442                   " so reset not done to allow debug dump,\n"
7443          KERN_ERR " you will need to reboot when done\n");
7444         return;
7445 #endif
7446
7447         rtnl_lock();
7448
7449         if (!netif_running(bp->dev))
7450                 goto reset_task_exit;
7451
7452         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7453         bnx2x_nic_load(bp, LOAD_NORMAL);
7454
7455 reset_task_exit:
7456         rtnl_unlock();
7457 }
7458
7459 /* end of nic load/unload */
7460
7461 /* ethtool_ops */
7462
7463 /*
7464  * Init service functions
7465  */
7466
7467 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7468 {
7469         switch (func) {
7470         case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7471         case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7472         case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7473         case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7474         case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7475         case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7476         case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7477         case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7478         default:
7479                 BNX2X_ERR("Unsupported function index: %d\n", func);
7480                 return (u32)(-1);
7481         }
7482 }
7483
7484 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7485 {
7486         u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7487
7488         /* Flush all outstanding writes */
7489         mmiowb();
7490
7491         /* Pretend to be function 0 */
7492         REG_WR(bp, reg, 0);
7493         /* Flush the GRC transaction (in the chip) */
7494         new_val = REG_RD(bp, reg);
7495         if (new_val != 0) {
7496                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7497                           new_val);
7498                 BUG();
7499         }
7500
7501         /* From now we are in the "like-E1" mode */
7502         bnx2x_int_disable(bp);
7503
7504         /* Flush all outstanding writes */
7505         mmiowb();
7506
7507         /* Restore the original funtion settings */
7508         REG_WR(bp, reg, orig_func);
7509         new_val = REG_RD(bp, reg);
7510         if (new_val != orig_func) {
7511                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7512                           orig_func, new_val);
7513                 BUG();
7514         }
7515 }
7516
7517 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7518 {
7519         if (CHIP_IS_E1H(bp))
7520                 bnx2x_undi_int_disable_e1h(bp, func);
7521         else
7522                 bnx2x_int_disable(bp);
7523 }
7524
7525 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7526 {
7527         u32 val;
7528
7529         /* Check if there is any driver already loaded */
7530         val = REG_RD(bp, MISC_REG_UNPREPARED);
7531         if (val == 0x1) {
7532                 /* Check if it is the UNDI driver
7533                  * UNDI driver initializes CID offset for normal bell to 0x7
7534                  */
7535                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7536                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7537                 if (val == 0x7) {
7538                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7539                         /* save our func */
7540                         int func = BP_FUNC(bp);
7541                         u32 swap_en;
7542                         u32 swap_val;
7543
7544                         /* clear the UNDI indication */
7545                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7546
7547                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
7548
7549                         /* try unload UNDI on port 0 */
7550                         bp->func = 0;
7551                         bp->fw_seq =
7552                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7553                                 DRV_MSG_SEQ_NUMBER_MASK);
7554                         reset_code = bnx2x_fw_command(bp, reset_code);
7555
7556                         /* if UNDI is loaded on the other port */
7557                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7558
7559                                 /* send "DONE" for previous unload */
7560                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7561
7562                                 /* unload UNDI on port 1 */
7563                                 bp->func = 1;
7564                                 bp->fw_seq =
7565                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7566                                         DRV_MSG_SEQ_NUMBER_MASK);
7567                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7568
7569                                 bnx2x_fw_command(bp, reset_code);
7570                         }
7571
7572                         /* now it's safe to release the lock */
7573                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7574
7575                         bnx2x_undi_int_disable(bp, func);
7576
7577                         /* close input traffic and wait for it */
7578                         /* Do not rcv packets to BRB */
7579                         REG_WR(bp,
7580                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7581                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7582                         /* Do not direct rcv packets that are not for MCP to
7583                          * the BRB */
7584                         REG_WR(bp,
7585                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7586                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7587                         /* clear AEU */
7588                         REG_WR(bp,
7589                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7590                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7591                         msleep(10);
7592
7593                         /* save NIG port swap info */
7594                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7595                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7596                         /* reset device */
7597                         REG_WR(bp,
7598                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7599                                0xd3ffffff);
7600                         REG_WR(bp,
7601                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7602                                0x1403);
7603                         /* take the NIG out of reset and restore swap values */
7604                         REG_WR(bp,
7605                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7606                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
7607                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7608                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7609
7610                         /* send unload done to the MCP */
7611                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7612
7613                         /* restore our func and fw_seq */
7614                         bp->func = func;
7615                         bp->fw_seq =
7616                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7617                                 DRV_MSG_SEQ_NUMBER_MASK);
7618
7619                 } else
7620                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7621         }
7622 }
7623
7624 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7625 {
7626         u32 val, val2, val3, val4, id;
7627         u16 pmc;
7628
7629         /* Get the chip revision id and number. */
7630         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7631         val = REG_RD(bp, MISC_REG_CHIP_NUM);
7632         id = ((val & 0xffff) << 16);
7633         val = REG_RD(bp, MISC_REG_CHIP_REV);
7634         id |= ((val & 0xf) << 12);
7635         val = REG_RD(bp, MISC_REG_CHIP_METAL);
7636         id |= ((val & 0xff) << 4);
7637         val = REG_RD(bp, MISC_REG_BOND_ID);
7638         id |= (val & 0xf);
7639         bp->common.chip_id = id;
7640         bp->link_params.chip_id = bp->common.chip_id;
7641         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7642
7643         val = (REG_RD(bp, 0x2874) & 0x55);
7644         if ((bp->common.chip_id & 0x1) ||
7645             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7646                 bp->flags |= ONE_PORT_FLAG;
7647                 BNX2X_DEV_INFO("single port device\n");
7648         }
7649
7650         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7651         bp->common.flash_size = (NVRAM_1MB_SIZE <<
7652                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
7653         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7654                        bp->common.flash_size, bp->common.flash_size);
7655
7656         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7657         bp->link_params.shmem_base = bp->common.shmem_base;
7658         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7659
7660         if (!bp->common.shmem_base ||
7661             (bp->common.shmem_base < 0xA0000) ||
7662             (bp->common.shmem_base >= 0xC0000)) {
7663                 BNX2X_DEV_INFO("MCP not active\n");
7664                 bp->flags |= NO_MCP_FLAG;
7665                 return;
7666         }
7667
7668         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7669         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7670                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7671                 BNX2X_ERR("BAD MCP validity signature\n");
7672
7673         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7674         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
7675
7676         bp->link_params.hw_led_mode = ((bp->common.hw_config &
7677                                         SHARED_HW_CFG_LED_MODE_MASK) >>
7678                                        SHARED_HW_CFG_LED_MODE_SHIFT);
7679
7680         bp->link_params.feature_config_flags = 0;
7681         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7682         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7683                 bp->link_params.feature_config_flags |=
7684                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7685         else
7686                 bp->link_params.feature_config_flags &=
7687                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7688
7689         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7690         bp->common.bc_ver = val;
7691         BNX2X_DEV_INFO("bc_ver %X\n", val);
7692         if (val < BNX2X_BC_VER) {
7693                 /* for now only warn
7694                  * later we might need to enforce this */
7695                 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7696                           " please upgrade BC\n", BNX2X_BC_VER, val);
7697         }
7698         bp->link_params.feature_config_flags |=
7699                 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
7700                 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
7701
7702         if (BP_E1HVN(bp) == 0) {
7703                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7704                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7705         } else {
7706                 /* no WOL capability for E1HVN != 0 */
7707                 bp->flags |= NO_WOL_FLAG;
7708         }
7709         BNX2X_DEV_INFO("%sWoL capable\n",
7710                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
7711
7712         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7713         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7714         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7715         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7716
7717         printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7718                val, val2, val3, val4);
7719 }
7720
7721 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7722                                                     u32 switch_cfg)
7723 {
7724         int port = BP_PORT(bp);
7725         u32 ext_phy_type;
7726
7727         switch (switch_cfg) {
7728         case SWITCH_CFG_1G:
7729                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7730
7731                 ext_phy_type =
7732                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7733                 switch (ext_phy_type) {
7734                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7735                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7736                                        ext_phy_type);
7737
7738                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7739                                                SUPPORTED_10baseT_Full |
7740                                                SUPPORTED_100baseT_Half |
7741                                                SUPPORTED_100baseT_Full |
7742                                                SUPPORTED_1000baseT_Full |
7743                                                SUPPORTED_2500baseX_Full |
7744                                                SUPPORTED_TP |
7745                                                SUPPORTED_FIBRE |
7746                                                SUPPORTED_Autoneg |
7747                                                SUPPORTED_Pause |
7748                                                SUPPORTED_Asym_Pause);
7749                         break;
7750
7751                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7752                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7753                                        ext_phy_type);
7754
7755                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7756                                                SUPPORTED_10baseT_Full |
7757                                                SUPPORTED_100baseT_Half |
7758                                                SUPPORTED_100baseT_Full |
7759                                                SUPPORTED_1000baseT_Full |
7760                                                SUPPORTED_TP |
7761                                                SUPPORTED_FIBRE |
7762                                                SUPPORTED_Autoneg |
7763                                                SUPPORTED_Pause |
7764                                                SUPPORTED_Asym_Pause);
7765                         break;
7766
7767                 default:
7768                         BNX2X_ERR("NVRAM config error. "
7769                                   "BAD SerDes ext_phy_config 0x%x\n",
7770                                   bp->link_params.ext_phy_config);
7771                         return;
7772                 }
7773
7774                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7775                                            port*0x10);
7776                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7777                 break;
7778
7779         case SWITCH_CFG_10G:
7780                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7781
7782                 ext_phy_type =
7783                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7784                 switch (ext_phy_type) {
7785                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7786                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7787                                        ext_phy_type);
7788
7789                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7790                                                SUPPORTED_10baseT_Full |
7791                                                SUPPORTED_100baseT_Half |
7792                                                SUPPORTED_100baseT_Full |
7793                                                SUPPORTED_1000baseT_Full |
7794                                                SUPPORTED_2500baseX_Full |
7795                                                SUPPORTED_10000baseT_Full |
7796                                                SUPPORTED_TP |
7797                                                SUPPORTED_FIBRE |
7798                                                SUPPORTED_Autoneg |
7799                                                SUPPORTED_Pause |
7800                                                SUPPORTED_Asym_Pause);
7801                         break;
7802
7803                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7804                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7805                                        ext_phy_type);
7806
7807                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7808                                                SUPPORTED_1000baseT_Full |
7809                                                SUPPORTED_FIBRE |
7810                                                SUPPORTED_Autoneg |
7811                                                SUPPORTED_Pause |
7812                                                SUPPORTED_Asym_Pause);
7813                         break;
7814
7815                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7816                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7817                                        ext_phy_type);
7818
7819                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7820                                                SUPPORTED_2500baseX_Full |
7821                                                SUPPORTED_1000baseT_Full |
7822                                                SUPPORTED_FIBRE |
7823                                                SUPPORTED_Autoneg |
7824                                                SUPPORTED_Pause |
7825                                                SUPPORTED_Asym_Pause);
7826                         break;
7827
7828                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7829                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7830                                        ext_phy_type);
7831
7832                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7833                                                SUPPORTED_FIBRE |
7834                                                SUPPORTED_Pause |
7835                                                SUPPORTED_Asym_Pause);
7836                         break;
7837
7838                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7839                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7840                                        ext_phy_type);
7841
7842                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7843                                                SUPPORTED_1000baseT_Full |
7844                                                SUPPORTED_FIBRE |
7845                                                SUPPORTED_Pause |
7846                                                SUPPORTED_Asym_Pause);
7847                         break;
7848
7849                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7850                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
7851                                        ext_phy_type);
7852
7853                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7854                                                SUPPORTED_1000baseT_Full |
7855                                                SUPPORTED_Autoneg |
7856                                                SUPPORTED_FIBRE |
7857                                                SUPPORTED_Pause |
7858                                                SUPPORTED_Asym_Pause);
7859                         break;
7860
7861                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
7862                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
7863                                        ext_phy_type);
7864
7865                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7866                                                SUPPORTED_1000baseT_Full |
7867                                                SUPPORTED_Autoneg |
7868                                                SUPPORTED_FIBRE |
7869                                                SUPPORTED_Pause |
7870                                                SUPPORTED_Asym_Pause);
7871                         break;
7872
7873                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7874                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7875                                        ext_phy_type);
7876
7877                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7878                                                SUPPORTED_TP |
7879                                                SUPPORTED_Autoneg |
7880                                                SUPPORTED_Pause |
7881                                                SUPPORTED_Asym_Pause);
7882                         break;
7883
7884                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7885                         BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7886                                        ext_phy_type);
7887
7888                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7889                                                SUPPORTED_10baseT_Full |
7890                                                SUPPORTED_100baseT_Half |
7891                                                SUPPORTED_100baseT_Full |
7892                                                SUPPORTED_1000baseT_Full |
7893                                                SUPPORTED_10000baseT_Full |
7894                                                SUPPORTED_TP |
7895                                                SUPPORTED_Autoneg |
7896                                                SUPPORTED_Pause |
7897                                                SUPPORTED_Asym_Pause);
7898                         break;
7899
7900                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7901                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7902                                   bp->link_params.ext_phy_config);
7903                         break;
7904
7905                 default:
7906                         BNX2X_ERR("NVRAM config error. "
7907                                   "BAD XGXS ext_phy_config 0x%x\n",
7908                                   bp->link_params.ext_phy_config);
7909                         return;
7910                 }
7911
7912                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7913                                            port*0x18);
7914                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7915
7916                 break;
7917
7918         default:
7919                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7920                           bp->port.link_config);
7921                 return;
7922         }
7923         bp->link_params.phy_addr = bp->port.phy_addr;
7924
7925         /* mask what we support according to speed_cap_mask */
7926         if (!(bp->link_params.speed_cap_mask &
7927                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7928                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7929
7930         if (!(bp->link_params.speed_cap_mask &
7931                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7932                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7933
7934         if (!(bp->link_params.speed_cap_mask &
7935                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7936                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7937
7938         if (!(bp->link_params.speed_cap_mask &
7939                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7940                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7941
7942         if (!(bp->link_params.speed_cap_mask &
7943                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7944                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7945                                         SUPPORTED_1000baseT_Full);
7946
7947         if (!(bp->link_params.speed_cap_mask &
7948                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7949                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7950
7951         if (!(bp->link_params.speed_cap_mask &
7952                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7953                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7954
7955         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7956 }
7957
7958 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7959 {
7960         bp->link_params.req_duplex = DUPLEX_FULL;
7961
7962         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7963         case PORT_FEATURE_LINK_SPEED_AUTO:
7964                 if (bp->port.supported & SUPPORTED_Autoneg) {
7965                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7966                         bp->port.advertising = bp->port.supported;
7967                 } else {
7968                         u32 ext_phy_type =
7969                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7970
7971                         if ((ext_phy_type ==
7972                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7973                             (ext_phy_type ==
7974                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7975                                 /* force 10G, no AN */
7976                                 bp->link_params.req_line_speed = SPEED_10000;
7977                                 bp->port.advertising =
7978                                                 (ADVERTISED_10000baseT_Full |
7979                                                  ADVERTISED_FIBRE);
7980                                 break;
7981                         }
7982                         BNX2X_ERR("NVRAM config error. "
7983                                   "Invalid link_config 0x%x"
7984                                   "  Autoneg not supported\n",
7985                                   bp->port.link_config);
7986                         return;
7987                 }
7988                 break;
7989
7990         case PORT_FEATURE_LINK_SPEED_10M_FULL:
7991                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7992                         bp->link_params.req_line_speed = SPEED_10;
7993                         bp->port.advertising = (ADVERTISED_10baseT_Full |
7994                                                 ADVERTISED_TP);
7995                 } else {
7996                         BNX2X_ERR("NVRAM config error. "
7997                                   "Invalid link_config 0x%x"
7998                                   "  speed_cap_mask 0x%x\n",
7999                                   bp->port.link_config,
8000                                   bp->link_params.speed_cap_mask);
8001                         return;
8002                 }
8003                 break;
8004
8005         case PORT_FEATURE_LINK_SPEED_10M_HALF:
8006                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
8007                         bp->link_params.req_line_speed = SPEED_10;
8008                         bp->link_params.req_duplex = DUPLEX_HALF;
8009                         bp->port.advertising = (ADVERTISED_10baseT_Half |
8010                                                 ADVERTISED_TP);
8011                 } else {
8012                         BNX2X_ERR("NVRAM config error. "
8013                                   "Invalid link_config 0x%x"
8014                                   "  speed_cap_mask 0x%x\n",
8015                                   bp->port.link_config,
8016                                   bp->link_params.speed_cap_mask);
8017                         return;
8018                 }
8019                 break;
8020
8021         case PORT_FEATURE_LINK_SPEED_100M_FULL:
8022                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
8023                         bp->link_params.req_line_speed = SPEED_100;
8024                         bp->port.advertising = (ADVERTISED_100baseT_Full |
8025                                                 ADVERTISED_TP);
8026                 } else {
8027                         BNX2X_ERR("NVRAM config error. "
8028                                   "Invalid link_config 0x%x"
8029                                   "  speed_cap_mask 0x%x\n",
8030                                   bp->port.link_config,
8031                                   bp->link_params.speed_cap_mask);
8032                         return;
8033                 }
8034                 break;
8035
8036         case PORT_FEATURE_LINK_SPEED_100M_HALF:
8037                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
8038                         bp->link_params.req_line_speed = SPEED_100;
8039                         bp->link_params.req_duplex = DUPLEX_HALF;
8040                         bp->port.advertising = (ADVERTISED_100baseT_Half |
8041                                                 ADVERTISED_TP);
8042                 } else {
8043                         BNX2X_ERR("NVRAM config error. "
8044                                   "Invalid link_config 0x%x"
8045                                   "  speed_cap_mask 0x%x\n",
8046                                   bp->port.link_config,
8047                                   bp->link_params.speed_cap_mask);
8048                         return;
8049                 }
8050                 break;
8051
8052         case PORT_FEATURE_LINK_SPEED_1G:
8053                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
8054                         bp->link_params.req_line_speed = SPEED_1000;
8055                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
8056                                                 ADVERTISED_TP);
8057                 } else {
8058                         BNX2X_ERR("NVRAM config error. "
8059                                   "Invalid link_config 0x%x"
8060                                   "  speed_cap_mask 0x%x\n",
8061                                   bp->port.link_config,
8062                                   bp->link_params.speed_cap_mask);
8063                         return;
8064                 }
8065                 break;
8066
8067         case PORT_FEATURE_LINK_SPEED_2_5G:
8068                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
8069                         bp->link_params.req_line_speed = SPEED_2500;
8070                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
8071                                                 ADVERTISED_TP);
8072                 } else {
8073                         BNX2X_ERR("NVRAM config error. "
8074                                   "Invalid link_config 0x%x"
8075                                   "  speed_cap_mask 0x%x\n",
8076                                   bp->port.link_config,
8077                                   bp->link_params.speed_cap_mask);
8078                         return;
8079                 }
8080                 break;
8081
8082         case PORT_FEATURE_LINK_SPEED_10G_CX4:
8083         case PORT_FEATURE_LINK_SPEED_10G_KX4:
8084         case PORT_FEATURE_LINK_SPEED_10G_KR:
8085                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
8086                         bp->link_params.req_line_speed = SPEED_10000;
8087                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
8088                                                 ADVERTISED_FIBRE);
8089                 } else {
8090                         BNX2X_ERR("NVRAM config error. "
8091                                   "Invalid link_config 0x%x"
8092                                   "  speed_cap_mask 0x%x\n",
8093                                   bp->port.link_config,
8094                                   bp->link_params.speed_cap_mask);
8095                         return;
8096                 }
8097                 break;
8098
8099         default:
8100                 BNX2X_ERR("NVRAM config error. "
8101                           "BAD link speed link_config 0x%x\n",
8102                           bp->port.link_config);
8103                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8104                 bp->port.advertising = bp->port.supported;
8105                 break;
8106         }
8107
8108         bp->link_params.req_flow_ctrl = (bp->port.link_config &
8109                                          PORT_FEATURE_FLOW_CONTROL_MASK);
8110         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8111             !(bp->port.supported & SUPPORTED_Autoneg))
8112                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8113
8114         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
8115                        "  advertising 0x%x\n",
8116                        bp->link_params.req_line_speed,
8117                        bp->link_params.req_duplex,
8118                        bp->link_params.req_flow_ctrl, bp->port.advertising);
8119 }
8120
8121 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8122 {
8123         int port = BP_PORT(bp);
8124         u32 val, val2;
8125         u32 config;
8126         u16 i;
8127
8128         bp->link_params.bp = bp;
8129         bp->link_params.port = port;
8130
8131         bp->link_params.lane_config =
8132                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8133         bp->link_params.ext_phy_config =
8134                 SHMEM_RD(bp,
8135                          dev_info.port_hw_config[port].external_phy_config);
8136         /* BCM8727_NOC => BCM8727 no over current */
8137         if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8138             PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8139                 bp->link_params.ext_phy_config &=
8140                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8141                 bp->link_params.ext_phy_config |=
8142                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8143                 bp->link_params.feature_config_flags |=
8144                         FEATURE_CONFIG_BCM8727_NOC;
8145         }
8146
8147         bp->link_params.speed_cap_mask =
8148                 SHMEM_RD(bp,
8149                          dev_info.port_hw_config[port].speed_capability_mask);
8150
8151         bp->port.link_config =
8152                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8153
8154         /* Get the 4 lanes xgxs config rx and tx */
8155         for (i = 0; i < 2; i++) {
8156                 val = SHMEM_RD(bp,
8157                            dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8158                 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8159                 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8160
8161                 val = SHMEM_RD(bp,
8162                            dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8163                 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8164                 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8165         }
8166
8167         /* If the device is capable of WoL, set the default state according
8168          * to the HW
8169          */
8170         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8171         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8172                    (config & PORT_FEATURE_WOL_ENABLED));
8173
8174         BNX2X_DEV_INFO("lane_config 0x%08x  ext_phy_config 0x%08x"
8175                        "  speed_cap_mask 0x%08x  link_config 0x%08x\n",
8176                        bp->link_params.lane_config,
8177                        bp->link_params.ext_phy_config,
8178                        bp->link_params.speed_cap_mask, bp->port.link_config);
8179
8180         bp->link_params.switch_cfg |= (bp->port.link_config &
8181                                        PORT_FEATURE_CONNECTED_SWITCH_MASK);
8182         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8183
8184         bnx2x_link_settings_requested(bp);
8185
8186         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8187         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8188         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8189         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8190         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8191         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8192         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
8193         bp->dev->dev_addr[5] = (u8)(val & 0xff);
8194         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8195         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8196 }
8197
8198 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8199 {
8200         int func = BP_FUNC(bp);
8201         u32 val, val2;
8202         int rc = 0;
8203
8204         bnx2x_get_common_hwinfo(bp);
8205
8206         bp->e1hov = 0;
8207         bp->e1hmf = 0;
8208         if (CHIP_IS_E1H(bp)) {
8209                 bp->mf_config =
8210                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8211
8212                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8213                        FUNC_MF_CFG_E1HOV_TAG_MASK);
8214                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8215
8216                         bp->e1hov = val;
8217                         bp->e1hmf = 1;
8218                         BNX2X_DEV_INFO("MF mode  E1HOV for func %d is %d "
8219                                        "(0x%04x)\n",
8220                                        func, bp->e1hov, bp->e1hov);
8221                 } else {
8222                         BNX2X_DEV_INFO("single function mode\n");
8223                         if (BP_E1HVN(bp)) {
8224                                 BNX2X_ERR("!!!  No valid E1HOV for func %d,"
8225                                           "  aborting\n", func);
8226                                 rc = -EPERM;
8227                         }
8228                 }
8229         }
8230
8231         if (!BP_NOMCP(bp)) {
8232                 bnx2x_get_port_hwinfo(bp);
8233
8234                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8235                               DRV_MSG_SEQ_NUMBER_MASK);
8236                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8237         }
8238
8239         if (IS_E1HMF(bp)) {
8240                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8241                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
8242                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8243                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8244                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8245                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8246                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8247                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8248                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
8249                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
8250                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8251                                ETH_ALEN);
8252                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8253                                ETH_ALEN);
8254                 }
8255
8256                 return rc;
8257         }
8258
8259         if (BP_NOMCP(bp)) {
8260                 /* only supposed to happen on emulation/FPGA */
8261                 BNX2X_ERR("warning random MAC workaround active\n");
8262                 random_ether_addr(bp->dev->dev_addr);
8263                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8264         }
8265
8266         return rc;
8267 }
8268
8269 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8270 {
8271         int func = BP_FUNC(bp);
8272         int timer_interval;
8273         int rc;
8274
8275         /* Disable interrupt handling until HW is initialized */
8276         atomic_set(&bp->intr_sem, 1);
8277         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8278
8279         mutex_init(&bp->port.phy_mutex);
8280
8281         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8282         INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8283
8284         rc = bnx2x_get_hwinfo(bp);
8285
8286         /* need to reset chip if undi was active */
8287         if (!BP_NOMCP(bp))
8288                 bnx2x_undi_unload(bp);
8289
8290         if (CHIP_REV_IS_FPGA(bp))
8291                 printk(KERN_ERR PFX "FPGA detected\n");
8292
8293         if (BP_NOMCP(bp) && (func == 0))
8294                 printk(KERN_ERR PFX
8295                        "MCP disabled, must load devices in order!\n");
8296
8297         /* Set multi queue mode */
8298         if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8299             ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8300                 printk(KERN_ERR PFX
8301                       "Multi disabled since int_mode requested is not MSI-X\n");
8302                 multi_mode = ETH_RSS_MODE_DISABLED;
8303         }
8304         bp->multi_mode = multi_mode;
8305
8306
8307         /* Set TPA flags */
8308         if (disable_tpa) {
8309                 bp->flags &= ~TPA_ENABLE_FLAG;
8310                 bp->dev->features &= ~NETIF_F_LRO;
8311         } else {
8312                 bp->flags |= TPA_ENABLE_FLAG;
8313                 bp->dev->features |= NETIF_F_LRO;
8314         }
8315
8316         bp->mrrs = mrrs;
8317
8318         bp->tx_ring_size = MAX_TX_AVAIL;
8319         bp->rx_ring_size = MAX_RX_AVAIL;
8320
8321         bp->rx_csum = 1;
8322
8323         bp->tx_ticks = 50;
8324         bp->rx_ticks = 25;
8325
8326         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8327         bp->current_interval = (poll ? poll : timer_interval);
8328
8329         init_timer(&bp->timer);
8330         bp->timer.expires = jiffies + bp->current_interval;
8331         bp->timer.data = (unsigned long) bp;
8332         bp->timer.function = bnx2x_timer;
8333
8334         return rc;
8335 }
8336
8337 /*
8338  * ethtool service functions
8339  */
8340
8341 /* All ethtool functions called with rtnl_lock */
8342
8343 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8344 {
8345         struct bnx2x *bp = netdev_priv(dev);
8346
8347         cmd->supported = bp->port.supported;
8348         cmd->advertising = bp->port.advertising;
8349
8350         if (netif_carrier_ok(dev)) {
8351                 cmd->speed = bp->link_vars.line_speed;
8352                 cmd->duplex = bp->link_vars.duplex;
8353         } else {
8354                 cmd->speed = bp->link_params.req_line_speed;
8355                 cmd->duplex = bp->link_params.req_duplex;
8356         }
8357         if (IS_E1HMF(bp)) {
8358                 u16 vn_max_rate;
8359
8360                 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8361                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8362                 if (vn_max_rate < cmd->speed)
8363                         cmd->speed = vn_max_rate;
8364         }
8365
8366         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8367                 u32 ext_phy_type =
8368                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8369
8370                 switch (ext_phy_type) {
8371                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8372                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8373                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8374                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8375                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8376                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8377                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8378                         cmd->port = PORT_FIBRE;
8379                         break;
8380
8381                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8382                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8383                         cmd->port = PORT_TP;
8384                         break;
8385
8386                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8387                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8388                                   bp->link_params.ext_phy_config);
8389                         break;
8390
8391                 default:
8392                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
8393                            bp->link_params.ext_phy_config);
8394                         break;
8395                 }
8396         } else
8397                 cmd->port = PORT_TP;
8398
8399         cmd->phy_address = bp->port.phy_addr;
8400         cmd->transceiver = XCVR_INTERNAL;
8401
8402         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8403                 cmd->autoneg = AUTONEG_ENABLE;
8404         else
8405                 cmd->autoneg = AUTONEG_DISABLE;
8406
8407         cmd->maxtxpkt = 0;
8408         cmd->maxrxpkt = 0;
8409
8410         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8411            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
8412            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
8413            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
8414            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8415            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8416            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8417
8418         return 0;
8419 }
8420
8421 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8422 {
8423         struct bnx2x *bp = netdev_priv(dev);
8424         u32 advertising;
8425
8426         if (IS_E1HMF(bp))
8427                 return 0;
8428
8429         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8430            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
8431            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
8432            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
8433            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8434            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8435            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8436
8437         if (cmd->autoneg == AUTONEG_ENABLE) {
8438                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8439                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8440                         return -EINVAL;
8441                 }
8442
8443                 /* advertise the requested speed and duplex if supported */
8444                 cmd->advertising &= bp->port.supported;
8445
8446                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8447                 bp->link_params.req_duplex = DUPLEX_FULL;
8448                 bp->port.advertising |= (ADVERTISED_Autoneg |
8449                                          cmd->advertising);
8450
8451         } else { /* forced speed */
8452                 /* advertise the requested speed and duplex if supported */
8453                 switch (cmd->speed) {
8454                 case SPEED_10:
8455                         if (cmd->duplex == DUPLEX_FULL) {
8456                                 if (!(bp->port.supported &
8457                                       SUPPORTED_10baseT_Full)) {
8458                                         DP(NETIF_MSG_LINK,
8459                                            "10M full not supported\n");
8460                                         return -EINVAL;
8461                                 }
8462
8463                                 advertising = (ADVERTISED_10baseT_Full |
8464                                                ADVERTISED_TP);
8465                         } else {
8466                                 if (!(bp->port.supported &
8467                                       SUPPORTED_10baseT_Half)) {
8468                                         DP(NETIF_MSG_LINK,
8469                                            "10M half not supported\n");
8470                                         return -EINVAL;
8471                                 }
8472
8473                                 advertising = (ADVERTISED_10baseT_Half |
8474                                                ADVERTISED_TP);
8475                         }
8476                         break;
8477
8478                 case SPEED_100:
8479                         if (cmd->duplex == DUPLEX_FULL) {
8480                                 if (!(bp->port.supported &
8481                                                 SUPPORTED_100baseT_Full)) {
8482                                         DP(NETIF_MSG_LINK,
8483                                            "100M full not supported\n");
8484                                         return -EINVAL;
8485                                 }
8486
8487                                 advertising = (ADVERTISED_100baseT_Full |
8488                                                ADVERTISED_TP);
8489                         } else {
8490                                 if (!(bp->port.supported &
8491                                                 SUPPORTED_100baseT_Half)) {
8492                                         DP(NETIF_MSG_LINK,
8493                                            "100M half not supported\n");
8494                                         return -EINVAL;
8495                                 }
8496
8497                                 advertising = (ADVERTISED_100baseT_Half |
8498                                                ADVERTISED_TP);
8499                         }
8500                         break;
8501
8502                 case SPEED_1000:
8503                         if (cmd->duplex != DUPLEX_FULL) {
8504                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
8505                                 return -EINVAL;
8506                         }
8507
8508                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
8509                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
8510                                 return -EINVAL;
8511                         }
8512
8513                         advertising = (ADVERTISED_1000baseT_Full |
8514                                        ADVERTISED_TP);
8515                         break;
8516
8517                 case SPEED_2500:
8518                         if (cmd->duplex != DUPLEX_FULL) {
8519                                 DP(NETIF_MSG_LINK,
8520                                    "2.5G half not supported\n");
8521                                 return -EINVAL;
8522                         }
8523
8524                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
8525                                 DP(NETIF_MSG_LINK,
8526                                    "2.5G full not supported\n");
8527                                 return -EINVAL;
8528                         }
8529
8530                         advertising = (ADVERTISED_2500baseX_Full |
8531                                        ADVERTISED_TP);
8532                         break;
8533
8534                 case SPEED_10000:
8535                         if (cmd->duplex != DUPLEX_FULL) {
8536                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
8537                                 return -EINVAL;
8538                         }
8539
8540                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
8541                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
8542                                 return -EINVAL;
8543                         }
8544
8545                         advertising = (ADVERTISED_10000baseT_Full |
8546                                        ADVERTISED_FIBRE);
8547                         break;
8548
8549                 default:
8550                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
8551                         return -EINVAL;
8552                 }
8553
8554                 bp->link_params.req_line_speed = cmd->speed;
8555                 bp->link_params.req_duplex = cmd->duplex;
8556                 bp->port.advertising = advertising;
8557         }
8558
8559         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
8560            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
8561            bp->link_params.req_line_speed, bp->link_params.req_duplex,
8562            bp->port.advertising);
8563
8564         if (netif_running(dev)) {
8565                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8566                 bnx2x_link_set(bp);
8567         }
8568
8569         return 0;
8570 }
8571
8572 #define PHY_FW_VER_LEN                  10
8573
8574 static void bnx2x_get_drvinfo(struct net_device *dev,
8575                               struct ethtool_drvinfo *info)
8576 {
8577         struct bnx2x *bp = netdev_priv(dev);
8578         u8 phy_fw_ver[PHY_FW_VER_LEN];
8579
8580         strcpy(info->driver, DRV_MODULE_NAME);
8581         strcpy(info->version, DRV_MODULE_VERSION);
8582
8583         phy_fw_ver[0] = '\0';
8584         if (bp->port.pmf) {
8585                 bnx2x_acquire_phy_lock(bp);
8586                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8587                                              (bp->state != BNX2X_STATE_CLOSED),
8588                                              phy_fw_ver, PHY_FW_VER_LEN);
8589                 bnx2x_release_phy_lock(bp);
8590         }
8591
8592         snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8593                  (bp->common.bc_ver & 0xff0000) >> 16,
8594                  (bp->common.bc_ver & 0xff00) >> 8,
8595                  (bp->common.bc_ver & 0xff),
8596                  ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
8597         strcpy(info->bus_info, pci_name(bp->pdev));
8598         info->n_stats = BNX2X_NUM_STATS;
8599         info->testinfo_len = BNX2X_NUM_TESTS;
8600         info->eedump_len = bp->common.flash_size;
8601         info->regdump_len = 0;
8602 }
8603
8604 #define IS_E1_ONLINE(info)      (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8605 #define IS_E1H_ONLINE(info)     (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8606
8607 static int bnx2x_get_regs_len(struct net_device *dev)
8608 {
8609         static u32 regdump_len;
8610         struct bnx2x *bp = netdev_priv(dev);
8611         int i;
8612
8613         if (regdump_len)
8614                 return regdump_len;
8615
8616         if (CHIP_IS_E1(bp)) {
8617                 for (i = 0; i < REGS_COUNT; i++)
8618                         if (IS_E1_ONLINE(reg_addrs[i].info))
8619                                 regdump_len += reg_addrs[i].size;
8620
8621                 for (i = 0; i < WREGS_COUNT_E1; i++)
8622                         if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
8623                                 regdump_len += wreg_addrs_e1[i].size *
8624                                         (1 + wreg_addrs_e1[i].read_regs_count);
8625
8626         } else { /* E1H */
8627                 for (i = 0; i < REGS_COUNT; i++)
8628                         if (IS_E1H_ONLINE(reg_addrs[i].info))
8629                                 regdump_len += reg_addrs[i].size;
8630
8631                 for (i = 0; i < WREGS_COUNT_E1H; i++)
8632                         if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
8633                                 regdump_len += wreg_addrs_e1h[i].size *
8634                                         (1 + wreg_addrs_e1h[i].read_regs_count);
8635         }
8636         regdump_len *= 4;
8637         regdump_len += sizeof(struct dump_hdr);
8638
8639         return regdump_len;
8640 }
8641
8642 static void bnx2x_get_regs(struct net_device *dev,
8643                            struct ethtool_regs *regs, void *_p)
8644 {
8645         u32 *p = _p, i, j;
8646         struct bnx2x *bp = netdev_priv(dev);
8647         struct dump_hdr dump_hdr = {0};
8648
8649         regs->version = 0;
8650         memset(p, 0, regs->len);
8651
8652         if (!netif_running(bp->dev))
8653                 return;
8654
8655         dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
8656         dump_hdr.dump_sign = dump_sign_all;
8657         dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
8658         dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
8659         dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
8660         dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
8661         dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
8662
8663         memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
8664         p += dump_hdr.hdr_size + 1;
8665
8666         if (CHIP_IS_E1(bp)) {
8667                 for (i = 0; i < REGS_COUNT; i++)
8668                         if (IS_E1_ONLINE(reg_addrs[i].info))
8669                                 for (j = 0; j < reg_addrs[i].size; j++)
8670                                         *p++ = REG_RD(bp,
8671                                                       reg_addrs[i].addr + j*4);
8672
8673         } else { /* E1H */
8674                 for (i = 0; i < REGS_COUNT; i++)
8675                         if (IS_E1H_ONLINE(reg_addrs[i].info))
8676                                 for (j = 0; j < reg_addrs[i].size; j++)
8677                                         *p++ = REG_RD(bp,
8678                                                       reg_addrs[i].addr + j*4);
8679         }
8680 }
8681
8682 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8683 {
8684         struct bnx2x *bp = netdev_priv(dev);
8685
8686         if (bp->flags & NO_WOL_FLAG) {
8687                 wol->supported = 0;
8688                 wol->wolopts = 0;
8689         } else {
8690                 wol->supported = WAKE_MAGIC;
8691                 if (bp->wol)
8692                         wol->wolopts = WAKE_MAGIC;
8693                 else
8694                         wol->wolopts = 0;
8695         }
8696         memset(&wol->sopass, 0, sizeof(wol->sopass));
8697 }
8698
8699 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8700 {
8701         struct bnx2x *bp = netdev_priv(dev);
8702
8703         if (wol->wolopts & ~WAKE_MAGIC)
8704                 return -EINVAL;
8705
8706         if (wol->wolopts & WAKE_MAGIC) {
8707                 if (bp->flags & NO_WOL_FLAG)
8708                         return -EINVAL;
8709
8710                 bp->wol = 1;
8711         } else
8712                 bp->wol = 0;
8713
8714         return 0;
8715 }
8716
8717 static u32 bnx2x_get_msglevel(struct net_device *dev)
8718 {
8719         struct bnx2x *bp = netdev_priv(dev);
8720
8721         return bp->msglevel;
8722 }
8723
8724 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8725 {
8726         struct bnx2x *bp = netdev_priv(dev);
8727
8728         if (capable(CAP_NET_ADMIN))
8729                 bp->msglevel = level;
8730 }
8731
8732 static int bnx2x_nway_reset(struct net_device *dev)
8733 {
8734         struct bnx2x *bp = netdev_priv(dev);
8735
8736         if (!bp->port.pmf)
8737                 return 0;
8738
8739         if (netif_running(dev)) {
8740                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8741                 bnx2x_link_set(bp);
8742         }
8743
8744         return 0;
8745 }
8746
8747 static u32
8748 bnx2x_get_link(struct net_device *dev)
8749 {
8750         struct bnx2x *bp = netdev_priv(dev);
8751
8752         return bp->link_vars.link_up;
8753 }
8754
8755 static int bnx2x_get_eeprom_len(struct net_device *dev)
8756 {
8757         struct bnx2x *bp = netdev_priv(dev);
8758
8759         return bp->common.flash_size;
8760 }
8761
8762 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8763 {
8764         int port = BP_PORT(bp);
8765         int count, i;
8766         u32 val = 0;
8767
8768         /* adjust timeout for emulation/FPGA */
8769         count = NVRAM_TIMEOUT_COUNT;
8770         if (CHIP_REV_IS_SLOW(bp))
8771                 count *= 100;
8772
8773         /* request access to nvram interface */
8774         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8775                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8776
8777         for (i = 0; i < count*10; i++) {
8778                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8779                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8780                         break;
8781
8782                 udelay(5);
8783         }
8784
8785         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
8786                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
8787                 return -EBUSY;
8788         }
8789
8790         return 0;
8791 }
8792
8793 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8794 {
8795         int port = BP_PORT(bp);
8796         int count, i;
8797         u32 val = 0;
8798
8799         /* adjust timeout for emulation/FPGA */
8800         count = NVRAM_TIMEOUT_COUNT;
8801         if (CHIP_REV_IS_SLOW(bp))
8802                 count *= 100;
8803
8804         /* relinquish nvram interface */
8805         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8806                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8807
8808         for (i = 0; i < count*10; i++) {
8809                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8810                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8811                         break;
8812
8813                 udelay(5);
8814         }
8815
8816         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
8817                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
8818                 return -EBUSY;
8819         }
8820
8821         return 0;
8822 }
8823
8824 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8825 {
8826         u32 val;
8827
8828         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8829
8830         /* enable both bits, even on read */
8831         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8832                (val | MCPR_NVM_ACCESS_ENABLE_EN |
8833                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
8834 }
8835
8836 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8837 {
8838         u32 val;
8839
8840         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8841
8842         /* disable both bits, even after read */
8843         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8844                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8845                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8846 }
8847
8848 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
8849                                   u32 cmd_flags)
8850 {
8851         int count, i, rc;
8852         u32 val;
8853
8854         /* build the command word */
8855         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8856
8857         /* need to clear DONE bit separately */
8858         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8859
8860         /* address of the NVRAM to read from */
8861         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8862                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8863
8864         /* issue a read command */
8865         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8866
8867         /* adjust timeout for emulation/FPGA */
8868         count = NVRAM_TIMEOUT_COUNT;
8869         if (CHIP_REV_IS_SLOW(bp))
8870                 count *= 100;
8871
8872         /* wait for completion */
8873         *ret_val = 0;
8874         rc = -EBUSY;
8875         for (i = 0; i < count; i++) {
8876                 udelay(5);
8877                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8878
8879                 if (val & MCPR_NVM_COMMAND_DONE) {
8880                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8881                         /* we read nvram data in cpu order
8882                          * but ethtool sees it as an array of bytes
8883                          * converting to big-endian will do the work */
8884                         *ret_val = cpu_to_be32(val);
8885                         rc = 0;
8886                         break;
8887                 }
8888         }
8889
8890         return rc;
8891 }
8892
8893 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8894                             int buf_size)
8895 {
8896         int rc;
8897         u32 cmd_flags;
8898         __be32 val;
8899
8900         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8901                 DP(BNX2X_MSG_NVM,
8902                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8903                    offset, buf_size);
8904                 return -EINVAL;
8905         }
8906
8907         if (offset + buf_size > bp->common.flash_size) {
8908                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8909                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8910                    offset, buf_size, bp->common.flash_size);
8911                 return -EINVAL;
8912         }
8913
8914         /* request access to nvram interface */
8915         rc = bnx2x_acquire_nvram_lock(bp);
8916         if (rc)
8917                 return rc;
8918
8919         /* enable access to nvram interface */
8920         bnx2x_enable_nvram_access(bp);
8921
8922         /* read the first word(s) */
8923         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8924         while ((buf_size > sizeof(u32)) && (rc == 0)) {
8925                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8926                 memcpy(ret_buf, &val, 4);
8927
8928                 /* advance to the next dword */
8929                 offset += sizeof(u32);
8930                 ret_buf += sizeof(u32);
8931                 buf_size -= sizeof(u32);
8932                 cmd_flags = 0;
8933         }
8934
8935         if (rc == 0) {
8936                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8937                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8938                 memcpy(ret_buf, &val, 4);
8939         }
8940
8941         /* disable access to nvram interface */
8942         bnx2x_disable_nvram_access(bp);
8943         bnx2x_release_nvram_lock(bp);
8944
8945         return rc;
8946 }
8947
8948 static int bnx2x_get_eeprom(struct net_device *dev,
8949                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8950 {
8951         struct bnx2x *bp = netdev_priv(dev);
8952         int rc;
8953
8954         if (!netif_running(dev))
8955                 return -EAGAIN;
8956
8957         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8958            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8959            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8960            eeprom->len, eeprom->len);
8961
8962         /* parameters already validated in ethtool_get_eeprom */
8963
8964         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8965
8966         return rc;
8967 }
8968
8969 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8970                                    u32 cmd_flags)
8971 {
8972         int count, i, rc;
8973
8974         /* build the command word */
8975         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8976
8977         /* need to clear DONE bit separately */
8978         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8979
8980         /* write the data */
8981         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8982
8983         /* address of the NVRAM to write to */
8984         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8985                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8986
8987         /* issue the write command */
8988         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8989
8990         /* adjust timeout for emulation/FPGA */
8991         count = NVRAM_TIMEOUT_COUNT;
8992         if (CHIP_REV_IS_SLOW(bp))
8993                 count *= 100;
8994
8995         /* wait for completion */
8996         rc = -EBUSY;
8997         for (i = 0; i < count; i++) {
8998                 udelay(5);
8999                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9000                 if (val & MCPR_NVM_COMMAND_DONE) {
9001                         rc = 0;
9002                         break;
9003                 }
9004         }
9005
9006         return rc;
9007 }
9008
9009 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
9010
9011 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9012                               int buf_size)
9013 {
9014         int rc;
9015         u32 cmd_flags;
9016         u32 align_offset;
9017         __be32 val;
9018
9019         if (offset + buf_size > bp->common.flash_size) {
9020                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9021                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9022                    offset, buf_size, bp->common.flash_size);
9023                 return -EINVAL;
9024         }
9025
9026         /* request access to nvram interface */
9027         rc = bnx2x_acquire_nvram_lock(bp);
9028         if (rc)
9029                 return rc;
9030
9031         /* enable access to nvram interface */
9032         bnx2x_enable_nvram_access(bp);
9033
9034         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9035         align_offset = (offset & ~0x03);
9036         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9037
9038         if (rc == 0) {
9039                 val &= ~(0xff << BYTE_OFFSET(offset));
9040                 val |= (*data_buf << BYTE_OFFSET(offset));
9041
9042                 /* nvram data is returned as an array of bytes
9043                  * convert it back to cpu order */
9044                 val = be32_to_cpu(val);
9045
9046                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9047                                              cmd_flags);
9048         }
9049
9050         /* disable access to nvram interface */
9051         bnx2x_disable_nvram_access(bp);
9052         bnx2x_release_nvram_lock(bp);
9053
9054         return rc;
9055 }
9056
9057 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9058                              int buf_size)
9059 {
9060         int rc;
9061         u32 cmd_flags;
9062         u32 val;
9063         u32 written_so_far;
9064
9065         if (buf_size == 1)      /* ethtool */
9066                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
9067
9068         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9069                 DP(BNX2X_MSG_NVM,
9070                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
9071                    offset, buf_size);
9072                 return -EINVAL;
9073         }
9074
9075         if (offset + buf_size > bp->common.flash_size) {
9076                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9077                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9078                    offset, buf_size, bp->common.flash_size);
9079                 return -EINVAL;
9080         }
9081
9082         /* request access to nvram interface */
9083         rc = bnx2x_acquire_nvram_lock(bp);
9084         if (rc)
9085                 return rc;
9086
9087         /* enable access to nvram interface */
9088         bnx2x_enable_nvram_access(bp);
9089
9090         written_so_far = 0;
9091         cmd_flags = MCPR_NVM_COMMAND_FIRST;
9092         while ((written_so_far < buf_size) && (rc == 0)) {
9093                 if (written_so_far == (buf_size - sizeof(u32)))
9094                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
9095                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9096                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
9097                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9098                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9099
9100                 memcpy(&val, data_buf, 4);
9101
9102                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9103
9104                 /* advance to the next dword */
9105                 offset += sizeof(u32);
9106                 data_buf += sizeof(u32);
9107                 written_so_far += sizeof(u32);
9108                 cmd_flags = 0;
9109         }
9110
9111         /* disable access to nvram interface */
9112         bnx2x_disable_nvram_access(bp);
9113         bnx2x_release_nvram_lock(bp);
9114
9115         return rc;
9116 }
9117
9118 static int bnx2x_set_eeprom(struct net_device *dev,
9119                             struct ethtool_eeprom *eeprom, u8 *eebuf)
9120 {
9121         struct bnx2x *bp = netdev_priv(dev);
9122         int rc;
9123
9124         if (!netif_running(dev))
9125                 return -EAGAIN;
9126
9127         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9128            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
9129            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9130            eeprom->len, eeprom->len);
9131
9132         /* parameters already validated in ethtool_set_eeprom */
9133
9134         /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
9135         if (eeprom->magic == 0x00504859)
9136                 if (bp->port.pmf) {
9137
9138                         bnx2x_acquire_phy_lock(bp);
9139                         rc = bnx2x_flash_download(bp, BP_PORT(bp),
9140                                              bp->link_params.ext_phy_config,
9141                                              (bp->state != BNX2X_STATE_CLOSED),
9142                                              eebuf, eeprom->len);
9143                         if ((bp->state == BNX2X_STATE_OPEN) ||
9144                             (bp->state == BNX2X_STATE_DISABLED)) {
9145                                 rc |= bnx2x_link_reset(&bp->link_params,
9146                                                        &bp->link_vars, 1);
9147                                 rc |= bnx2x_phy_init(&bp->link_params,
9148                                                      &bp->link_vars);
9149                         }
9150                         bnx2x_release_phy_lock(bp);
9151
9152                 } else /* Only the PMF can access the PHY */
9153                         return -EINVAL;
9154         else
9155                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
9156
9157         return rc;
9158 }
9159
9160 static int bnx2x_get_coalesce(struct net_device *dev,
9161                               struct ethtool_coalesce *coal)
9162 {
9163         struct bnx2x *bp = netdev_priv(dev);
9164
9165         memset(coal, 0, sizeof(struct ethtool_coalesce));
9166
9167         coal->rx_coalesce_usecs = bp->rx_ticks;
9168         coal->tx_coalesce_usecs = bp->tx_ticks;
9169
9170         return 0;
9171 }
9172
9173 static int bnx2x_set_coalesce(struct net_device *dev,
9174                               struct ethtool_coalesce *coal)
9175 {
9176         struct bnx2x *bp = netdev_priv(dev);
9177
9178         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
9179         if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
9180                 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
9181
9182         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
9183         if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
9184                 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
9185
9186         if (netif_running(dev))
9187                 bnx2x_update_coalesce(bp);
9188
9189         return 0;
9190 }
9191
9192 static void bnx2x_get_ringparam(struct net_device *dev,
9193                                 struct ethtool_ringparam *ering)
9194 {
9195         struct bnx2x *bp = netdev_priv(dev);
9196
9197         ering->rx_max_pending = MAX_RX_AVAIL;
9198         ering->rx_mini_max_pending = 0;
9199         ering->rx_jumbo_max_pending = 0;
9200
9201         ering->rx_pending = bp->rx_ring_size;
9202         ering->rx_mini_pending = 0;
9203         ering->rx_jumbo_pending = 0;
9204
9205         ering->tx_max_pending = MAX_TX_AVAIL;
9206         ering->tx_pending = bp->tx_ring_size;
9207 }
9208
9209 static int bnx2x_set_ringparam(struct net_device *dev,
9210                                struct ethtool_ringparam *ering)
9211 {
9212         struct bnx2x *bp = netdev_priv(dev);
9213         int rc = 0;
9214
9215         if ((ering->rx_pending > MAX_RX_AVAIL) ||
9216             (ering->tx_pending > MAX_TX_AVAIL) ||
9217             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9218                 return -EINVAL;
9219
9220         bp->rx_ring_size = ering->rx_pending;
9221         bp->tx_ring_size = ering->tx_pending;
9222
9223         if (netif_running(dev)) {
9224                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9225                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9226         }
9227
9228         return rc;
9229 }
9230
9231 static void bnx2x_get_pauseparam(struct net_device *dev,
9232                                  struct ethtool_pauseparam *epause)
9233 {
9234         struct bnx2x *bp = netdev_priv(dev);
9235
9236         epause->autoneg = (bp->link_params.req_flow_ctrl ==
9237                            BNX2X_FLOW_CTRL_AUTO) &&
9238                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9239
9240         epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9241                             BNX2X_FLOW_CTRL_RX);
9242         epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9243                             BNX2X_FLOW_CTRL_TX);
9244
9245         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9246            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9247            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9248 }
9249
9250 static int bnx2x_set_pauseparam(struct net_device *dev,
9251                                 struct ethtool_pauseparam *epause)
9252 {
9253         struct bnx2x *bp = netdev_priv(dev);
9254
9255         if (IS_E1HMF(bp))
9256                 return 0;
9257
9258         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9259            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9260            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9261
9262         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9263
9264         if (epause->rx_pause)
9265                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9266
9267         if (epause->tx_pause)
9268                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9269
9270         if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9271                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9272
9273         if (epause->autoneg) {
9274                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9275                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
9276                         return -EINVAL;
9277                 }
9278
9279                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9280                         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9281         }
9282
9283         DP(NETIF_MSG_LINK,
9284            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9285
9286         if (netif_running(dev)) {
9287                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9288                 bnx2x_link_set(bp);
9289         }
9290
9291         return 0;
9292 }
9293
9294 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9295 {
9296         struct bnx2x *bp = netdev_priv(dev);
9297         int changed = 0;
9298         int rc = 0;
9299
9300         /* TPA requires Rx CSUM offloading */
9301         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9302                 if (!(dev->features & NETIF_F_LRO)) {
9303                         dev->features |= NETIF_F_LRO;
9304                         bp->flags |= TPA_ENABLE_FLAG;
9305                         changed = 1;
9306                 }
9307
9308         } else if (dev->features & NETIF_F_LRO) {
9309                 dev->features &= ~NETIF_F_LRO;
9310                 bp->flags &= ~TPA_ENABLE_FLAG;
9311                 changed = 1;
9312         }
9313
9314         if (changed && netif_running(dev)) {
9315                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9316                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9317         }
9318
9319         return rc;
9320 }
9321
9322 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9323 {
9324         struct bnx2x *bp = netdev_priv(dev);
9325
9326         return bp->rx_csum;
9327 }
9328
9329 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9330 {
9331         struct bnx2x *bp = netdev_priv(dev);
9332         int rc = 0;
9333
9334         bp->rx_csum = data;
9335
9336         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9337            TPA'ed packets will be discarded due to wrong TCP CSUM */
9338         if (!data) {
9339                 u32 flags = ethtool_op_get_flags(dev);
9340
9341                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9342         }
9343
9344         return rc;
9345 }
9346
9347 static int bnx2x_set_tso(struct net_device *dev, u32 data)
9348 {
9349         if (data) {
9350                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9351                 dev->features |= NETIF_F_TSO6;
9352 #ifdef BCM_VLAN
9353                 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9354                 dev->vlan_features |= NETIF_F_TSO6;
9355 #endif
9356         } else {
9357                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
9358                 dev->features &= ~NETIF_F_TSO6;
9359 #ifdef BCM_VLAN
9360                 dev->vlan_features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
9361                 dev->vlan_features &= ~NETIF_F_TSO6;
9362 #endif
9363         }
9364
9365         return 0;
9366 }
9367
9368 static const struct {
9369         char string[ETH_GSTRING_LEN];
9370 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
9371         { "register_test (offline)" },
9372         { "memory_test (offline)" },
9373         { "loopback_test (offline)" },
9374         { "nvram_test (online)" },
9375         { "interrupt_test (online)" },
9376         { "link_test (online)" },
9377         { "idle check (online)" }
9378 };
9379
9380 static int bnx2x_self_test_count(struct net_device *dev)
9381 {
9382         return BNX2X_NUM_TESTS;
9383 }
9384
9385 static int bnx2x_test_registers(struct bnx2x *bp)
9386 {
9387         int idx, i, rc = -ENODEV;
9388         u32 wr_val = 0;
9389         int port = BP_PORT(bp);
9390         static const struct {
9391                 u32  offset0;
9392                 u32  offset1;
9393                 u32  mask;
9394         } reg_tbl[] = {
9395 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
9396                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
9397                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
9398                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
9399                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
9400                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
9401                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
9402                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
9403                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
9404                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
9405 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
9406                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
9407                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
9408                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
9409                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
9410                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9411                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
9412                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
9413                 { NIG_REG_EGRESS_MNG0_FIFO,           20, 0xffffffff },
9414                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
9415 /* 20 */        { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
9416                 { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
9417                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
9418                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
9419                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
9420                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
9421                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
9422                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
9423                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
9424                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
9425 /* 30 */        { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
9426                 { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
9427                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
9428                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
9429                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9430                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
9431                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9432                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
9433
9434                 { 0xffffffff, 0, 0x00000000 }
9435         };
9436
9437         if (!netif_running(bp->dev))
9438                 return rc;
9439
9440         /* Repeat the test twice:
9441            First by writing 0x00000000, second by writing 0xffffffff */
9442         for (idx = 0; idx < 2; idx++) {
9443
9444                 switch (idx) {
9445                 case 0:
9446                         wr_val = 0;
9447                         break;
9448                 case 1:
9449                         wr_val = 0xffffffff;
9450                         break;
9451                 }
9452
9453                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9454                         u32 offset, mask, save_val, val;
9455
9456                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9457                         mask = reg_tbl[i].mask;
9458
9459                         save_val = REG_RD(bp, offset);
9460
9461                         REG_WR(bp, offset, wr_val);
9462                         val = REG_RD(bp, offset);
9463
9464                         /* Restore the original register's value */
9465                         REG_WR(bp, offset, save_val);
9466
9467                         /* verify that value is as expected value */
9468                         if ((val & mask) != (wr_val & mask))
9469                                 goto test_reg_exit;
9470                 }
9471         }
9472
9473         rc = 0;
9474
9475 test_reg_exit:
9476         return rc;
9477 }
9478
9479 static int bnx2x_test_memory(struct bnx2x *bp)
9480 {
9481         int i, j, rc = -ENODEV;
9482         u32 val;
9483         static const struct {
9484                 u32 offset;
9485                 int size;
9486         } mem_tbl[] = {
9487                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
9488                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9489                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
9490                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
9491                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
9492                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
9493                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
9494
9495                 { 0xffffffff, 0 }
9496         };
9497         static const struct {
9498                 char *name;
9499                 u32 offset;
9500                 u32 e1_mask;
9501                 u32 e1h_mask;
9502         } prty_tbl[] = {
9503                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
9504                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
9505                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
9506                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
9507                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
9508                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
9509
9510                 { NULL, 0xffffffff, 0, 0 }
9511         };
9512
9513         if (!netif_running(bp->dev))
9514                 return rc;
9515
9516         /* Go through all the memories */
9517         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9518                 for (j = 0; j < mem_tbl[i].size; j++)
9519                         REG_RD(bp, mem_tbl[i].offset + j*4);
9520
9521         /* Check the parity status */
9522         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9523                 val = REG_RD(bp, prty_tbl[i].offset);
9524                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9525                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
9526                         DP(NETIF_MSG_HW,
9527                            "%s is 0x%x\n", prty_tbl[i].name, val);
9528                         goto test_mem_exit;
9529                 }
9530         }
9531
9532         rc = 0;
9533
9534 test_mem_exit:
9535         return rc;
9536 }
9537
9538 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9539 {
9540         int cnt = 1000;
9541
9542         if (link_up)
9543                 while (bnx2x_link_test(bp) && cnt--)
9544                         msleep(10);
9545 }
9546
9547 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9548 {
9549         unsigned int pkt_size, num_pkts, i;
9550         struct sk_buff *skb;
9551         unsigned char *packet;
9552         struct bnx2x_fastpath *fp = &bp->fp[0];
9553         u16 tx_start_idx, tx_idx;
9554         u16 rx_start_idx, rx_idx;
9555         u16 pkt_prod;
9556         struct sw_tx_bd *tx_buf;
9557         struct eth_tx_bd *tx_bd;
9558         dma_addr_t mapping;
9559         union eth_rx_cqe *cqe;
9560         u8 cqe_fp_flags;
9561         struct sw_rx_bd *rx_buf;
9562         u16 len;
9563         int rc = -ENODEV;
9564
9565         /* check the loopback mode */
9566         switch (loopback_mode) {
9567         case BNX2X_PHY_LOOPBACK:
9568                 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9569                         return -EINVAL;
9570                 break;
9571         case BNX2X_MAC_LOOPBACK:
9572                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
9573                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
9574                 break;
9575         default:
9576                 return -EINVAL;
9577         }
9578
9579         /* prepare the loopback packet */
9580         pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9581                      bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
9582         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9583         if (!skb) {
9584                 rc = -ENOMEM;
9585                 goto test_loopback_exit;
9586         }
9587         packet = skb_put(skb, pkt_size);
9588         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9589         memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9590         for (i = ETH_HLEN; i < pkt_size; i++)
9591                 packet[i] = (unsigned char) (i & 0xff);
9592
9593         /* send the loopback packet */
9594         num_pkts = 0;
9595         tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9596         rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9597
9598         pkt_prod = fp->tx_pkt_prod++;
9599         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9600         tx_buf->first_bd = fp->tx_bd_prod;
9601         tx_buf->skb = skb;
9602
9603         tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9604         mapping = pci_map_single(bp->pdev, skb->data,
9605                                  skb_headlen(skb), PCI_DMA_TODEVICE);
9606         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9607         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9608         tx_bd->nbd = cpu_to_le16(1);
9609         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9610         tx_bd->vlan = cpu_to_le16(pkt_prod);
9611         tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9612                                        ETH_TX_BD_FLAGS_END_BD);
9613         tx_bd->general_data = ((UNICAST_ADDRESS <<
9614                                 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9615
9616         wmb();
9617
9618         le16_add_cpu(&fp->hw_tx_prods->bds_prod, 1);
9619         mb(); /* FW restriction: must not reorder writing nbd and packets */
9620         le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
9621         DOORBELL(bp, fp->index, 0);
9622
9623         mmiowb();
9624
9625         num_pkts++;
9626         fp->tx_bd_prod++;
9627         bp->dev->trans_start = jiffies;
9628
9629         udelay(100);
9630
9631         tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9632         if (tx_idx != tx_start_idx + num_pkts)
9633                 goto test_loopback_exit;
9634
9635         rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9636         if (rx_idx != rx_start_idx + num_pkts)
9637                 goto test_loopback_exit;
9638
9639         cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9640         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9641         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9642                 goto test_loopback_rx_exit;
9643
9644         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9645         if (len != pkt_size)
9646                 goto test_loopback_rx_exit;
9647
9648         rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9649         skb = rx_buf->skb;
9650         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9651         for (i = ETH_HLEN; i < pkt_size; i++)
9652                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9653                         goto test_loopback_rx_exit;
9654
9655         rc = 0;
9656
9657 test_loopback_rx_exit:
9658
9659         fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9660         fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9661         fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9662         fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9663
9664         /* Update producers */
9665         bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9666                              fp->rx_sge_prod);
9667
9668 test_loopback_exit:
9669         bp->link_params.loopback_mode = LOOPBACK_NONE;
9670
9671         return rc;
9672 }
9673
9674 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9675 {
9676         int rc = 0, res;
9677
9678         if (!netif_running(bp->dev))
9679                 return BNX2X_LOOPBACK_FAILED;
9680
9681         bnx2x_netif_stop(bp, 1);
9682         bnx2x_acquire_phy_lock(bp);
9683
9684         res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9685         if (res) {
9686                 DP(NETIF_MSG_PROBE, "  PHY loopback failed  (res %d)\n", res);
9687                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
9688         }
9689
9690         res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9691         if (res) {
9692                 DP(NETIF_MSG_PROBE, "  MAC loopback failed  (res %d)\n", res);
9693                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
9694         }
9695
9696         bnx2x_release_phy_lock(bp);
9697         bnx2x_netif_start(bp);
9698
9699         return rc;
9700 }
9701
9702 #define CRC32_RESIDUAL                  0xdebb20e3
9703
9704 static int bnx2x_test_nvram(struct bnx2x *bp)
9705 {
9706         static const struct {
9707                 int offset;
9708                 int size;
9709         } nvram_tbl[] = {
9710                 {     0,  0x14 }, /* bootstrap */
9711                 {  0x14,  0xec }, /* dir */
9712                 { 0x100, 0x350 }, /* manuf_info */
9713                 { 0x450,  0xf0 }, /* feature_info */
9714                 { 0x640,  0x64 }, /* upgrade_key_info */
9715                 { 0x6a4,  0x64 },
9716                 { 0x708,  0x70 }, /* manuf_key_info */
9717                 { 0x778,  0x70 },
9718                 {     0,     0 }
9719         };
9720         __be32 buf[0x350 / 4];
9721         u8 *data = (u8 *)buf;
9722         int i, rc;
9723         u32 magic, csum;
9724
9725         rc = bnx2x_nvram_read(bp, 0, data, 4);
9726         if (rc) {
9727                 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
9728                 goto test_nvram_exit;
9729         }
9730
9731         magic = be32_to_cpu(buf[0]);
9732         if (magic != 0x669955aa) {
9733                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9734                 rc = -ENODEV;
9735                 goto test_nvram_exit;
9736         }
9737
9738         for (i = 0; nvram_tbl[i].size; i++) {
9739
9740                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9741                                       nvram_tbl[i].size);
9742                 if (rc) {
9743                         DP(NETIF_MSG_PROBE,
9744                            "nvram_tbl[%d] read data (rc %d)\n", i, rc);
9745                         goto test_nvram_exit;
9746                 }
9747
9748                 csum = ether_crc_le(nvram_tbl[i].size, data);
9749                 if (csum != CRC32_RESIDUAL) {
9750                         DP(NETIF_MSG_PROBE,
9751                            "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9752                         rc = -ENODEV;
9753                         goto test_nvram_exit;
9754                 }
9755         }
9756
9757 test_nvram_exit:
9758         return rc;
9759 }
9760
9761 static int bnx2x_test_intr(struct bnx2x *bp)
9762 {
9763         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9764         int i, rc;
9765
9766         if (!netif_running(bp->dev))
9767                 return -ENODEV;
9768
9769         config->hdr.length = 0;
9770         if (CHIP_IS_E1(bp))
9771                 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9772         else
9773                 config->hdr.offset = BP_FUNC(bp);
9774         config->hdr.client_id = bp->fp->cl_id;
9775         config->hdr.reserved1 = 0;
9776
9777         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9778                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9779                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9780         if (rc == 0) {
9781                 bp->set_mac_pending++;
9782                 for (i = 0; i < 10; i++) {
9783                         if (!bp->set_mac_pending)
9784                                 break;
9785                         msleep_interruptible(10);
9786                 }
9787                 if (i == 10)
9788                         rc = -ENODEV;
9789         }
9790
9791         return rc;
9792 }
9793
9794 static void bnx2x_self_test(struct net_device *dev,
9795                             struct ethtool_test *etest, u64 *buf)
9796 {
9797         struct bnx2x *bp = netdev_priv(dev);
9798
9799         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9800
9801         if (!netif_running(dev))
9802                 return;
9803
9804         /* offline tests are not supported in MF mode */
9805         if (IS_E1HMF(bp))
9806                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9807
9808         if (etest->flags & ETH_TEST_FL_OFFLINE) {
9809                 int port = BP_PORT(bp);
9810                 u32 val;
9811                 u8 link_up;
9812
9813                 /* save current value of input enable for TX port IF */
9814                 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
9815                 /* disable input for TX port IF */
9816                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
9817
9818                 link_up = bp->link_vars.link_up;
9819                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9820                 bnx2x_nic_load(bp, LOAD_DIAG);
9821                 /* wait until link state is restored */
9822                 bnx2x_wait_for_link(bp, link_up);
9823
9824                 if (bnx2x_test_registers(bp) != 0) {
9825                         buf[0] = 1;
9826                         etest->flags |= ETH_TEST_FL_FAILED;
9827                 }
9828                 if (bnx2x_test_memory(bp) != 0) {
9829                         buf[1] = 1;
9830                         etest->flags |= ETH_TEST_FL_FAILED;
9831                 }
9832                 buf[2] = bnx2x_test_loopback(bp, link_up);
9833                 if (buf[2] != 0)
9834                         etest->flags |= ETH_TEST_FL_FAILED;
9835
9836                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9837
9838                 /* restore input for TX port IF */
9839                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
9840
9841                 bnx2x_nic_load(bp, LOAD_NORMAL);
9842                 /* wait until link state is restored */
9843                 bnx2x_wait_for_link(bp, link_up);
9844         }
9845         if (bnx2x_test_nvram(bp) != 0) {
9846                 buf[3] = 1;
9847                 etest->flags |= ETH_TEST_FL_FAILED;
9848         }
9849         if (bnx2x_test_intr(bp) != 0) {
9850                 buf[4] = 1;
9851                 etest->flags |= ETH_TEST_FL_FAILED;
9852         }
9853         if (bp->port.pmf)
9854                 if (bnx2x_link_test(bp) != 0) {
9855                         buf[5] = 1;
9856                         etest->flags |= ETH_TEST_FL_FAILED;
9857                 }
9858
9859 #ifdef BNX2X_EXTRA_DEBUG
9860         bnx2x_panic_dump(bp);
9861 #endif
9862 }
9863
9864 static const struct {
9865         long offset;
9866         int size;
9867         u8 string[ETH_GSTRING_LEN];
9868 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9869 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9870         { Q_STATS_OFFSET32(error_bytes_received_hi),
9871                                                 8, "[%d]: rx_error_bytes" },
9872         { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9873                                                 8, "[%d]: rx_ucast_packets" },
9874         { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9875                                                 8, "[%d]: rx_mcast_packets" },
9876         { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9877                                                 8, "[%d]: rx_bcast_packets" },
9878         { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9879         { Q_STATS_OFFSET32(rx_err_discard_pkt),
9880                                          4, "[%d]: rx_phy_ip_err_discards"},
9881         { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9882                                          4, "[%d]: rx_skb_alloc_discard" },
9883         { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9884
9885 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9886         { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9887                                                         8, "[%d]: tx_packets" }
9888 };
9889
9890 static const struct {
9891         long offset;
9892         int size;
9893         u32 flags;
9894 #define STATS_FLAGS_PORT                1
9895 #define STATS_FLAGS_FUNC                2
9896 #define STATS_FLAGS_BOTH                (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
9897         u8 string[ETH_GSTRING_LEN];
9898 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9899 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9900                                 8, STATS_FLAGS_BOTH, "rx_bytes" },
9901         { STATS_OFFSET32(error_bytes_received_hi),
9902                                 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
9903         { STATS_OFFSET32(total_unicast_packets_received_hi),
9904                                 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
9905         { STATS_OFFSET32(total_multicast_packets_received_hi),
9906                                 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
9907         { STATS_OFFSET32(total_broadcast_packets_received_hi),
9908                                 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
9909         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9910                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9911         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9912                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
9913         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9914                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9915         { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9916                                 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9917 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9918                                 8, STATS_FLAGS_PORT, "rx_fragments" },
9919         { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9920                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
9921         { STATS_OFFSET32(no_buff_discard_hi),
9922                                 8, STATS_FLAGS_BOTH, "rx_discards" },
9923         { STATS_OFFSET32(mac_filter_discard),
9924                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9925         { STATS_OFFSET32(xxoverflow_discard),
9926                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9927         { STATS_OFFSET32(brb_drop_hi),
9928                                 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9929         { STATS_OFFSET32(brb_truncate_hi),
9930                                 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9931         { STATS_OFFSET32(pause_frames_received_hi),
9932                                 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9933         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9934                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9935         { STATS_OFFSET32(nig_timer_max),
9936                         4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9937 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9938                                 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9939         { STATS_OFFSET32(rx_skb_alloc_failed),
9940                                 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9941         { STATS_OFFSET32(hw_csum_err),
9942                                 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9943
9944         { STATS_OFFSET32(total_bytes_transmitted_hi),
9945                                 8, STATS_FLAGS_BOTH, "tx_bytes" },
9946         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9947                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9948         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9949                                 8, STATS_FLAGS_BOTH, "tx_packets" },
9950         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9951                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9952         { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9953                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9954         { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9955                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9956         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9957                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9958 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9959                                 8, STATS_FLAGS_PORT, "tx_deferred" },
9960         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9961                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9962         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9963                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9964         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9965                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9966         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9967                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9968         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9969                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9970         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9971                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9972         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9973                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9974         { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9975                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9976         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9977                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9978 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
9979                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9980         { STATS_OFFSET32(pause_frames_sent_hi),
9981                                 8, STATS_FLAGS_PORT, "tx_pause_frames" }
9982 };
9983
9984 #define IS_PORT_STAT(i) \
9985         ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9986 #define IS_FUNC_STAT(i)         (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9987 #define IS_E1HMF_MODE_STAT(bp) \
9988                         (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
9989
9990 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9991 {
9992         struct bnx2x *bp = netdev_priv(dev);
9993         int i, j, k;
9994
9995         switch (stringset) {
9996         case ETH_SS_STATS:
9997                 if (is_multi(bp)) {
9998                         k = 0;
9999                         for_each_queue(bp, i) {
10000                                 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10001                                         sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10002                                                 bnx2x_q_stats_arr[j].string, i);
10003                                 k += BNX2X_NUM_Q_STATS;
10004                         }
10005                         if (IS_E1HMF_MODE_STAT(bp))
10006                                 break;
10007                         for (j = 0; j < BNX2X_NUM_STATS; j++)
10008                                 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10009                                        bnx2x_stats_arr[j].string);
10010                 } else {
10011                         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10012                                 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10013                                         continue;
10014                                 strcpy(buf + j*ETH_GSTRING_LEN,
10015                                        bnx2x_stats_arr[i].string);
10016                                 j++;
10017                         }
10018                 }
10019                 break;
10020
10021         case ETH_SS_TEST:
10022                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10023                 break;
10024         }
10025 }
10026
10027 static int bnx2x_get_stats_count(struct net_device *dev)
10028 {
10029         struct bnx2x *bp = netdev_priv(dev);
10030         int i, num_stats;
10031
10032         if (is_multi(bp)) {
10033                 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
10034                 if (!IS_E1HMF_MODE_STAT(bp))
10035                         num_stats += BNX2X_NUM_STATS;
10036         } else {
10037                 if (IS_E1HMF_MODE_STAT(bp)) {
10038                         num_stats = 0;
10039                         for (i = 0; i < BNX2X_NUM_STATS; i++)
10040                                 if (IS_FUNC_STAT(i))
10041                                         num_stats++;
10042                 } else
10043                         num_stats = BNX2X_NUM_STATS;
10044         }
10045
10046         return num_stats;
10047 }
10048
10049 static void bnx2x_get_ethtool_stats(struct net_device *dev,
10050                                     struct ethtool_stats *stats, u64 *buf)
10051 {
10052         struct bnx2x *bp = netdev_priv(dev);
10053         u32 *hw_stats, *offset;
10054         int i, j, k;
10055
10056         if (is_multi(bp)) {
10057                 k = 0;
10058                 for_each_queue(bp, i) {
10059                         hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10060                         for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10061                                 if (bnx2x_q_stats_arr[j].size == 0) {
10062                                         /* skip this counter */
10063                                         buf[k + j] = 0;
10064                                         continue;
10065                                 }
10066                                 offset = (hw_stats +
10067                                           bnx2x_q_stats_arr[j].offset);
10068                                 if (bnx2x_q_stats_arr[j].size == 4) {
10069                                         /* 4-byte counter */
10070                                         buf[k + j] = (u64) *offset;
10071                                         continue;
10072                                 }
10073                                 /* 8-byte counter */
10074                                 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10075                         }
10076                         k += BNX2X_NUM_Q_STATS;
10077                 }
10078                 if (IS_E1HMF_MODE_STAT(bp))
10079                         return;
10080                 hw_stats = (u32 *)&bp->eth_stats;
10081                 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10082                         if (bnx2x_stats_arr[j].size == 0) {
10083                                 /* skip this counter */
10084                                 buf[k + j] = 0;
10085                                 continue;
10086                         }
10087                         offset = (hw_stats + bnx2x_stats_arr[j].offset);
10088                         if (bnx2x_stats_arr[j].size == 4) {
10089                                 /* 4-byte counter */
10090                                 buf[k + j] = (u64) *offset;
10091                                 continue;
10092                         }
10093                         /* 8-byte counter */
10094                         buf[k + j] = HILO_U64(*offset, *(offset + 1));
10095                 }
10096         } else {
10097                 hw_stats = (u32 *)&bp->eth_stats;
10098                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10099                         if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10100                                 continue;
10101                         if (bnx2x_stats_arr[i].size == 0) {
10102                                 /* skip this counter */
10103                                 buf[j] = 0;
10104                                 j++;
10105                                 continue;
10106                         }
10107                         offset = (hw_stats + bnx2x_stats_arr[i].offset);
10108                         if (bnx2x_stats_arr[i].size == 4) {
10109                                 /* 4-byte counter */
10110                                 buf[j] = (u64) *offset;
10111                                 j++;
10112                                 continue;
10113                         }
10114                         /* 8-byte counter */
10115                         buf[j] = HILO_U64(*offset, *(offset + 1));
10116                         j++;
10117                 }
10118         }
10119 }
10120
10121 static int bnx2x_phys_id(struct net_device *dev, u32 data)
10122 {
10123         struct bnx2x *bp = netdev_priv(dev);
10124         int port = BP_PORT(bp);
10125         int i;
10126
10127         if (!netif_running(dev))
10128                 return 0;
10129
10130         if (!bp->port.pmf)
10131                 return 0;
10132
10133         if (data == 0)
10134                 data = 2;
10135
10136         for (i = 0; i < (data * 2); i++) {
10137                 if ((i % 2) == 0)
10138                         bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
10139                                       bp->link_params.hw_led_mode,
10140                                       bp->link_params.chip_id);
10141                 else
10142                         bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
10143                                       bp->link_params.hw_led_mode,
10144                                       bp->link_params.chip_id);
10145
10146                 msleep_interruptible(500);
10147                 if (signal_pending(current))
10148                         break;
10149         }
10150
10151         if (bp->link_vars.link_up)
10152                 bnx2x_set_led(bp, port, LED_MODE_OPER,
10153                               bp->link_vars.line_speed,
10154                               bp->link_params.hw_led_mode,
10155                               bp->link_params.chip_id);
10156
10157         return 0;
10158 }
10159
10160 static struct ethtool_ops bnx2x_ethtool_ops = {
10161         .get_settings           = bnx2x_get_settings,
10162         .set_settings           = bnx2x_set_settings,
10163         .get_drvinfo            = bnx2x_get_drvinfo,
10164         .get_regs_len           = bnx2x_get_regs_len,
10165         .get_regs               = bnx2x_get_regs,
10166         .get_wol                = bnx2x_get_wol,
10167         .set_wol                = bnx2x_set_wol,
10168         .get_msglevel           = bnx2x_get_msglevel,
10169         .set_msglevel           = bnx2x_set_msglevel,
10170         .nway_reset             = bnx2x_nway_reset,
10171         .get_link               = bnx2x_get_link,
10172         .get_eeprom_len         = bnx2x_get_eeprom_len,
10173         .get_eeprom             = bnx2x_get_eeprom,
10174         .set_eeprom             = bnx2x_set_eeprom,
10175         .get_coalesce           = bnx2x_get_coalesce,
10176         .set_coalesce           = bnx2x_set_coalesce,
10177         .get_ringparam          = bnx2x_get_ringparam,
10178         .set_ringparam          = bnx2x_set_ringparam,
10179         .get_pauseparam         = bnx2x_get_pauseparam,
10180         .set_pauseparam         = bnx2x_set_pauseparam,
10181         .get_rx_csum            = bnx2x_get_rx_csum,
10182         .set_rx_csum            = bnx2x_set_rx_csum,
10183         .get_tx_csum            = ethtool_op_get_tx_csum,
10184         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
10185         .set_flags              = bnx2x_set_flags,
10186         .get_flags              = ethtool_op_get_flags,
10187         .get_sg                 = ethtool_op_get_sg,
10188         .set_sg                 = ethtool_op_set_sg,
10189         .get_tso                = ethtool_op_get_tso,
10190         .set_tso                = bnx2x_set_tso,
10191         .self_test_count        = bnx2x_self_test_count,
10192         .self_test              = bnx2x_self_test,
10193         .get_strings            = bnx2x_get_strings,
10194         .phys_id                = bnx2x_phys_id,
10195         .get_stats_count        = bnx2x_get_stats_count,
10196         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
10197 };
10198
10199 /* end of ethtool_ops */
10200
10201 /****************************************************************************
10202 * General service functions
10203 ****************************************************************************/
10204
10205 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10206 {
10207         u16 pmcsr;
10208
10209         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10210
10211         switch (state) {
10212         case PCI_D0:
10213                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10214                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10215                                        PCI_PM_CTRL_PME_STATUS));
10216
10217                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10218                         /* delay required during transition out of D3hot */
10219                         msleep(20);
10220                 break;
10221
10222         case PCI_D3hot:
10223                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10224                 pmcsr |= 3;
10225
10226                 if (bp->wol)
10227                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10228
10229                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10230                                       pmcsr);
10231
10232                 /* No more memory access after this point until
10233                 * device is brought back to D0.
10234                 */
10235                 break;
10236
10237         default:
10238                 return -EINVAL;
10239         }
10240         return 0;
10241 }
10242
10243 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10244 {
10245         u16 rx_cons_sb;
10246
10247         /* Tell compiler that status block fields can change */
10248         barrier();
10249         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10250         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10251                 rx_cons_sb++;
10252         return (fp->rx_comp_cons != rx_cons_sb);
10253 }
10254
10255 /*
10256  * net_device service functions
10257  */
10258
10259 static int bnx2x_poll(struct napi_struct *napi, int budget)
10260 {
10261         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10262                                                  napi);
10263         struct bnx2x *bp = fp->bp;
10264         int work_done = 0;
10265
10266 #ifdef BNX2X_STOP_ON_ERROR
10267         if (unlikely(bp->panic))
10268                 goto poll_panic;
10269 #endif
10270
10271         prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
10272         prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10273         prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10274
10275         bnx2x_update_fpsb_idx(fp);
10276
10277         if (bnx2x_has_tx_work(fp))
10278                 bnx2x_tx_int(fp);
10279
10280         if (bnx2x_has_rx_work(fp)) {
10281                 work_done = bnx2x_rx_int(fp, budget);
10282
10283                 /* must not complete if we consumed full budget */
10284                 if (work_done >= budget)
10285                         goto poll_again;
10286         }
10287
10288         /* BNX2X_HAS_WORK() reads the status block, thus we need to
10289          * ensure that status block indices have been actually read
10290          * (bnx2x_update_fpsb_idx) prior to this check (BNX2X_HAS_WORK)
10291          * so that we won't write the "newer" value of the status block to IGU
10292          * (if there was a DMA right after BNX2X_HAS_WORK and
10293          * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10294          * may be postponed to right before bnx2x_ack_sb). In this case
10295          * there will never be another interrupt until there is another update
10296          * of the status block, while there is still unhandled work.
10297          */
10298         rmb();
10299
10300         if (!BNX2X_HAS_WORK(fp)) {
10301 #ifdef BNX2X_STOP_ON_ERROR
10302 poll_panic:
10303 #endif
10304                 napi_complete(napi);
10305
10306                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10307                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
10308                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10309                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10310         }
10311
10312 poll_again:
10313         return work_done;
10314 }
10315
10316
10317 /* we split the first BD into headers and data BDs
10318  * to ease the pain of our fellow microcode engineers
10319  * we use one mapping for both BDs
10320  * So far this has only been observed to happen
10321  * in Other Operating Systems(TM)
10322  */
10323 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10324                                    struct bnx2x_fastpath *fp,
10325                                    struct eth_tx_bd **tx_bd, u16 hlen,
10326                                    u16 bd_prod, int nbd)
10327 {
10328         struct eth_tx_bd *h_tx_bd = *tx_bd;
10329         struct eth_tx_bd *d_tx_bd;
10330         dma_addr_t mapping;
10331         int old_len = le16_to_cpu(h_tx_bd->nbytes);
10332
10333         /* first fix first BD */
10334         h_tx_bd->nbd = cpu_to_le16(nbd);
10335         h_tx_bd->nbytes = cpu_to_le16(hlen);
10336
10337         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10338            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10339            h_tx_bd->addr_lo, h_tx_bd->nbd);
10340
10341         /* now get a new data BD
10342          * (after the pbd) and fill it */
10343         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10344         d_tx_bd = &fp->tx_desc_ring[bd_prod];
10345
10346         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10347                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10348
10349         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10350         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10351         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10352         d_tx_bd->vlan = 0;
10353         /* this marks the BD as one that has no individual mapping
10354          * the FW ignores this flag in a BD not marked start
10355          */
10356         d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10357         DP(NETIF_MSG_TX_QUEUED,
10358            "TSO split data size is %d (%x:%x)\n",
10359            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10360
10361         /* update tx_bd for marking the last BD flag */
10362         *tx_bd = d_tx_bd;
10363
10364         return bd_prod;
10365 }
10366
10367 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10368 {
10369         if (fix > 0)
10370                 csum = (u16) ~csum_fold(csum_sub(csum,
10371                                 csum_partial(t_header - fix, fix, 0)));
10372
10373         else if (fix < 0)
10374                 csum = (u16) ~csum_fold(csum_add(csum,
10375                                 csum_partial(t_header, -fix, 0)));
10376
10377         return swab16(csum);
10378 }
10379
10380 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10381 {
10382         u32 rc;
10383
10384         if (skb->ip_summed != CHECKSUM_PARTIAL)
10385                 rc = XMIT_PLAIN;
10386
10387         else {
10388                 if (skb->protocol == htons(ETH_P_IPV6)) {
10389                         rc = XMIT_CSUM_V6;
10390                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10391                                 rc |= XMIT_CSUM_TCP;
10392
10393                 } else {
10394                         rc = XMIT_CSUM_V4;
10395                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10396                                 rc |= XMIT_CSUM_TCP;
10397                 }
10398         }
10399
10400         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10401                 rc |= XMIT_GSO_V4;
10402
10403         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10404                 rc |= XMIT_GSO_V6;
10405
10406         return rc;
10407 }
10408
10409 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10410 /* check if packet requires linearization (packet is too fragmented)
10411    no need to check fragmentation if page size > 8K (there will be no
10412    violation to FW restrictions) */
10413 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10414                              u32 xmit_type)
10415 {
10416         int to_copy = 0;
10417         int hlen = 0;
10418         int first_bd_sz = 0;
10419
10420         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10421         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10422
10423                 if (xmit_type & XMIT_GSO) {
10424                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10425                         /* Check if LSO packet needs to be copied:
10426                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10427                         int wnd_size = MAX_FETCH_BD - 3;
10428                         /* Number of windows to check */
10429                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10430                         int wnd_idx = 0;
10431                         int frag_idx = 0;
10432                         u32 wnd_sum = 0;
10433
10434                         /* Headers length */
10435                         hlen = (int)(skb_transport_header(skb) - skb->data) +
10436                                 tcp_hdrlen(skb);
10437
10438                         /* Amount of data (w/o headers) on linear part of SKB*/
10439                         first_bd_sz = skb_headlen(skb) - hlen;
10440
10441                         wnd_sum  = first_bd_sz;
10442
10443                         /* Calculate the first sum - it's special */
10444                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10445                                 wnd_sum +=
10446                                         skb_shinfo(skb)->frags[frag_idx].size;
10447
10448                         /* If there was data on linear skb data - check it */
10449                         if (first_bd_sz > 0) {
10450                                 if (unlikely(wnd_sum < lso_mss)) {
10451                                         to_copy = 1;
10452                                         goto exit_lbl;
10453                                 }
10454
10455                                 wnd_sum -= first_bd_sz;
10456                         }
10457
10458                         /* Others are easier: run through the frag list and
10459                            check all windows */
10460                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10461                                 wnd_sum +=
10462                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10463
10464                                 if (unlikely(wnd_sum < lso_mss)) {
10465                                         to_copy = 1;
10466                                         break;
10467                                 }
10468                                 wnd_sum -=
10469                                         skb_shinfo(skb)->frags[wnd_idx].size;
10470                         }
10471                 } else {
10472                         /* in non-LSO too fragmented packet should always
10473                            be linearized */
10474                         to_copy = 1;
10475                 }
10476         }
10477
10478 exit_lbl:
10479         if (unlikely(to_copy))
10480                 DP(NETIF_MSG_TX_QUEUED,
10481                    "Linearization IS REQUIRED for %s packet. "
10482                    "num_frags %d  hlen %d  first_bd_sz %d\n",
10483                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10484                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10485
10486         return to_copy;
10487 }
10488 #endif
10489
10490 /* called with netif_tx_lock
10491  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
10492  * netif_wake_queue()
10493  */
10494 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10495 {
10496         struct bnx2x *bp = netdev_priv(dev);
10497         struct bnx2x_fastpath *fp;
10498         struct netdev_queue *txq;
10499         struct sw_tx_bd *tx_buf;
10500         struct eth_tx_bd *tx_bd;
10501         struct eth_tx_parse_bd *pbd = NULL;
10502         u16 pkt_prod, bd_prod;
10503         int nbd, fp_index;
10504         dma_addr_t mapping;
10505         u32 xmit_type = bnx2x_xmit_type(bp, skb);
10506         int vlan_off = (bp->e1hov ? 4 : 0);
10507         int i;
10508         u8 hlen = 0;
10509
10510 #ifdef BNX2X_STOP_ON_ERROR
10511         if (unlikely(bp->panic))
10512                 return NETDEV_TX_BUSY;
10513 #endif
10514
10515         fp_index = skb_get_queue_mapping(skb);
10516         txq = netdev_get_tx_queue(dev, fp_index);
10517
10518         fp = &bp->fp[fp_index];
10519
10520         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
10521                 fp->eth_q_stats.driver_xoff++,
10522                 netif_tx_stop_queue(txq);
10523                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10524                 return NETDEV_TX_BUSY;
10525         }
10526
10527         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
10528            "  gso type %x  xmit_type %x\n",
10529            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10530            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10531
10532 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10533         /* First, check if we need to linearize the skb (due to FW
10534            restrictions). No need to check fragmentation if page size > 8K
10535            (there will be no violation to FW restrictions) */
10536         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10537                 /* Statistics of linearization */
10538                 bp->lin_cnt++;
10539                 if (skb_linearize(skb) != 0) {
10540                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10541                            "silently dropping this SKB\n");
10542                         dev_kfree_skb_any(skb);
10543                         return NETDEV_TX_OK;
10544                 }
10545         }
10546 #endif
10547
10548         /*
10549         Please read carefully. First we use one BD which we mark as start,
10550         then for TSO or xsum we have a parsing info BD,
10551         and only then we have the rest of the TSO BDs.
10552         (don't forget to mark the last one as last,
10553         and to unmap only AFTER you write to the BD ...)
10554         And above all, all pdb sizes are in words - NOT DWORDS!
10555         */
10556
10557         pkt_prod = fp->tx_pkt_prod++;
10558         bd_prod = TX_BD(fp->tx_bd_prod);
10559
10560         /* get a tx_buf and first BD */
10561         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10562         tx_bd = &fp->tx_desc_ring[bd_prod];
10563
10564         tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10565         tx_bd->general_data = (UNICAST_ADDRESS <<
10566                                ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
10567         /* header nbd */
10568         tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
10569
10570         /* remember the first BD of the packet */
10571         tx_buf->first_bd = fp->tx_bd_prod;
10572         tx_buf->skb = skb;
10573
10574         DP(NETIF_MSG_TX_QUEUED,
10575            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
10576            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10577
10578 #ifdef BCM_VLAN
10579         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10580             (bp->flags & HW_VLAN_TX_FLAG)) {
10581                 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10582                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10583                 vlan_off += 4;
10584         } else
10585 #endif
10586                 tx_bd->vlan = cpu_to_le16(pkt_prod);
10587
10588         if (xmit_type) {
10589                 /* turn on parsing and get a BD */
10590                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10591                 pbd = (void *)&fp->tx_desc_ring[bd_prod];
10592
10593                 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10594         }
10595
10596         if (xmit_type & XMIT_CSUM) {
10597                 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
10598
10599                 /* for now NS flag is not used in Linux */
10600                 pbd->global_data =
10601                         (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10602                                  ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
10603
10604                 pbd->ip_hlen = (skb_transport_header(skb) -
10605                                 skb_network_header(skb)) / 2;
10606
10607                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
10608
10609                 pbd->total_hlen = cpu_to_le16(hlen);
10610                 hlen = hlen*2 - vlan_off;
10611
10612                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10613
10614                 if (xmit_type & XMIT_CSUM_V4)
10615                         tx_bd->bd_flags.as_bitfield |=
10616                                                 ETH_TX_BD_FLAGS_IP_CSUM;
10617                 else
10618                         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10619
10620                 if (xmit_type & XMIT_CSUM_TCP) {
10621                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10622
10623                 } else {
10624                         s8 fix = SKB_CS_OFF(skb); /* signed! */
10625
10626                         pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
10627                         pbd->cs_offset = fix / 2;
10628
10629                         DP(NETIF_MSG_TX_QUEUED,
10630                            "hlen %d  offset %d  fix %d  csum before fix %x\n",
10631                            le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10632                            SKB_CS(skb));
10633
10634                         /* HW bug: fixup the CSUM */
10635                         pbd->tcp_pseudo_csum =
10636                                 bnx2x_csum_fix(skb_transport_header(skb),
10637                                                SKB_CS(skb), fix);
10638
10639                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10640                            pbd->tcp_pseudo_csum);
10641                 }
10642         }
10643
10644         mapping = pci_map_single(bp->pdev, skb->data,
10645                                  skb_headlen(skb), PCI_DMA_TODEVICE);
10646
10647         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10648         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10649         nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
10650         tx_bd->nbd = cpu_to_le16(nbd);
10651         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10652
10653         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
10654            "  nbytes %d  flags %x  vlan %x\n",
10655            tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10656            le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10657            le16_to_cpu(tx_bd->vlan));
10658
10659         if (xmit_type & XMIT_GSO) {
10660
10661                 DP(NETIF_MSG_TX_QUEUED,
10662                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
10663                    skb->len, hlen, skb_headlen(skb),
10664                    skb_shinfo(skb)->gso_size);
10665
10666                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10667
10668                 if (unlikely(skb_headlen(skb) > hlen))
10669                         bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10670                                                  bd_prod, ++nbd);
10671
10672                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10673                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
10674                 pbd->tcp_flags = pbd_tcp_flags(skb);
10675
10676                 if (xmit_type & XMIT_GSO_V4) {
10677                         pbd->ip_id = swab16(ip_hdr(skb)->id);
10678                         pbd->tcp_pseudo_csum =
10679                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10680                                                           ip_hdr(skb)->daddr,
10681                                                           0, IPPROTO_TCP, 0));
10682
10683                 } else
10684                         pbd->tcp_pseudo_csum =
10685                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10686                                                         &ipv6_hdr(skb)->daddr,
10687                                                         0, IPPROTO_TCP, 0));
10688
10689                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10690         }
10691
10692         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10693                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
10694
10695                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10696                 tx_bd = &fp->tx_desc_ring[bd_prod];
10697
10698                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10699                                        frag->size, PCI_DMA_TODEVICE);
10700
10701                 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10702                 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10703                 tx_bd->nbytes = cpu_to_le16(frag->size);
10704                 tx_bd->vlan = cpu_to_le16(pkt_prod);
10705                 tx_bd->bd_flags.as_bitfield = 0;
10706
10707                 DP(NETIF_MSG_TX_QUEUED,
10708                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d  flags %x\n",
10709                    i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10710                    le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
10711         }
10712
10713         /* now at last mark the BD as the last BD */
10714         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10715
10716         DP(NETIF_MSG_TX_QUEUED, "last bd @%p  flags %x\n",
10717            tx_bd, tx_bd->bd_flags.as_bitfield);
10718
10719         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10720
10721         /* now send a tx doorbell, counting the next BD
10722          * if the packet contains or ends with it
10723          */
10724         if (TX_BD_POFF(bd_prod) < nbd)
10725                 nbd++;
10726
10727         if (pbd)
10728                 DP(NETIF_MSG_TX_QUEUED,
10729                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
10730                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
10731                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10732                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
10733                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
10734
10735         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
10736
10737         /*
10738          * Make sure that the BD data is updated before updating the producer
10739          * since FW might read the BD right after the producer is updated.
10740          * This is only applicable for weak-ordered memory model archs such
10741          * as IA-64. The following barrier is also mandatory since FW will
10742          * assumes packets must have BDs.
10743          */
10744         wmb();
10745
10746         le16_add_cpu(&fp->hw_tx_prods->bds_prod, nbd);
10747         mb(); /* FW restriction: must not reorder writing nbd and packets */
10748         le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
10749         DOORBELL(bp, fp->index, 0);
10750
10751         mmiowb();
10752
10753         fp->tx_bd_prod += nbd;
10754
10755         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
10756                 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10757                    if we put Tx into XOFF state. */
10758                 smp_mb();
10759                 netif_tx_stop_queue(txq);
10760                 fp->eth_q_stats.driver_xoff++;
10761                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
10762                         netif_tx_wake_queue(txq);
10763         }
10764         fp->tx_pkt++;
10765
10766         return NETDEV_TX_OK;
10767 }
10768
10769 /* called with rtnl_lock */
10770 static int bnx2x_open(struct net_device *dev)
10771 {
10772         struct bnx2x *bp = netdev_priv(dev);
10773
10774         netif_carrier_off(dev);
10775
10776         bnx2x_set_power_state(bp, PCI_D0);
10777
10778         return bnx2x_nic_load(bp, LOAD_OPEN);
10779 }
10780
10781 /* called with rtnl_lock */
10782 static int bnx2x_close(struct net_device *dev)
10783 {
10784         struct bnx2x *bp = netdev_priv(dev);
10785
10786         /* Unload the driver, release IRQs */
10787         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10788         if (atomic_read(&bp->pdev->enable_cnt) == 1)
10789                 if (!CHIP_REV_IS_SLOW(bp))
10790                         bnx2x_set_power_state(bp, PCI_D3hot);
10791
10792         return 0;
10793 }
10794
10795 /* called with netif_tx_lock from dev_mcast.c */
10796 static void bnx2x_set_rx_mode(struct net_device *dev)
10797 {
10798         struct bnx2x *bp = netdev_priv(dev);
10799         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10800         int port = BP_PORT(bp);
10801
10802         if (bp->state != BNX2X_STATE_OPEN) {
10803                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10804                 return;
10805         }
10806
10807         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10808
10809         if (dev->flags & IFF_PROMISC)
10810                 rx_mode = BNX2X_RX_MODE_PROMISC;
10811
10812         else if ((dev->flags & IFF_ALLMULTI) ||
10813                  ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10814                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10815
10816         else { /* some multicasts */
10817                 if (CHIP_IS_E1(bp)) {
10818                         int i, old, offset;
10819                         struct dev_mc_list *mclist;
10820                         struct mac_configuration_cmd *config =
10821                                                 bnx2x_sp(bp, mcast_config);
10822
10823                         for (i = 0, mclist = dev->mc_list;
10824                              mclist && (i < dev->mc_count);
10825                              i++, mclist = mclist->next) {
10826
10827                                 config->config_table[i].
10828                                         cam_entry.msb_mac_addr =
10829                                         swab16(*(u16 *)&mclist->dmi_addr[0]);
10830                                 config->config_table[i].
10831                                         cam_entry.middle_mac_addr =
10832                                         swab16(*(u16 *)&mclist->dmi_addr[2]);
10833                                 config->config_table[i].
10834                                         cam_entry.lsb_mac_addr =
10835                                         swab16(*(u16 *)&mclist->dmi_addr[4]);
10836                                 config->config_table[i].cam_entry.flags =
10837                                                         cpu_to_le16(port);
10838                                 config->config_table[i].
10839                                         target_table_entry.flags = 0;
10840                                 config->config_table[i].
10841                                         target_table_entry.client_id = 0;
10842                                 config->config_table[i].
10843                                         target_table_entry.vlan_id = 0;
10844
10845                                 DP(NETIF_MSG_IFUP,
10846                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10847                                    config->config_table[i].
10848                                                 cam_entry.msb_mac_addr,
10849                                    config->config_table[i].
10850                                                 cam_entry.middle_mac_addr,
10851                                    config->config_table[i].
10852                                                 cam_entry.lsb_mac_addr);
10853                         }
10854                         old = config->hdr.length;
10855                         if (old > i) {
10856                                 for (; i < old; i++) {
10857                                         if (CAM_IS_INVALID(config->
10858                                                            config_table[i])) {
10859                                                 /* already invalidated */
10860                                                 break;
10861                                         }
10862                                         /* invalidate */
10863                                         CAM_INVALIDATE(config->
10864                                                        config_table[i]);
10865                                 }
10866                         }
10867
10868                         if (CHIP_REV_IS_SLOW(bp))
10869                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10870                         else
10871                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
10872
10873                         config->hdr.length = i;
10874                         config->hdr.offset = offset;
10875                         config->hdr.client_id = bp->fp->cl_id;
10876                         config->hdr.reserved1 = 0;
10877
10878                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10879                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10880                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10881                                       0);
10882                 } else { /* E1H */
10883                         /* Accept one or more multicasts */
10884                         struct dev_mc_list *mclist;
10885                         u32 mc_filter[MC_HASH_SIZE];
10886                         u32 crc, bit, regidx;
10887                         int i;
10888
10889                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10890
10891                         for (i = 0, mclist = dev->mc_list;
10892                              mclist && (i < dev->mc_count);
10893                              i++, mclist = mclist->next) {
10894
10895                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10896                                    mclist->dmi_addr);
10897
10898                                 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10899                                 bit = (crc >> 24) & 0xff;
10900                                 regidx = bit >> 5;
10901                                 bit &= 0x1f;
10902                                 mc_filter[regidx] |= (1 << bit);
10903                         }
10904
10905                         for (i = 0; i < MC_HASH_SIZE; i++)
10906                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10907                                        mc_filter[i]);
10908                 }
10909         }
10910
10911         bp->rx_mode = rx_mode;
10912         bnx2x_set_storm_rx_mode(bp);
10913 }
10914
10915 /* called with rtnl_lock */
10916 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10917 {
10918         struct sockaddr *addr = p;
10919         struct bnx2x *bp = netdev_priv(dev);
10920
10921         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
10922                 return -EINVAL;
10923
10924         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
10925         if (netif_running(dev)) {
10926                 if (CHIP_IS_E1(bp))
10927                         bnx2x_set_mac_addr_e1(bp, 1);
10928                 else
10929                         bnx2x_set_mac_addr_e1h(bp, 1);
10930         }
10931
10932         return 0;
10933 }
10934
10935 /* called with rtnl_lock */
10936 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10937 {
10938         struct mii_ioctl_data *data = if_mii(ifr);
10939         struct bnx2x *bp = netdev_priv(dev);
10940         int port = BP_PORT(bp);
10941         int err;
10942
10943         switch (cmd) {
10944         case SIOCGMIIPHY:
10945                 data->phy_id = bp->port.phy_addr;
10946
10947                 /* fallthrough */
10948
10949         case SIOCGMIIREG: {
10950                 u16 mii_regval;
10951
10952                 if (!netif_running(dev))
10953                         return -EAGAIN;
10954
10955                 mutex_lock(&bp->port.phy_mutex);
10956                 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
10957                                       DEFAULT_PHY_DEV_ADDR,
10958                                       (data->reg_num & 0x1f), &mii_regval);
10959                 data->val_out = mii_regval;
10960                 mutex_unlock(&bp->port.phy_mutex);
10961                 return err;
10962         }
10963
10964         case SIOCSMIIREG:
10965                 if (!capable(CAP_NET_ADMIN))
10966                         return -EPERM;
10967
10968                 if (!netif_running(dev))
10969                         return -EAGAIN;
10970
10971                 mutex_lock(&bp->port.phy_mutex);
10972                 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
10973                                        DEFAULT_PHY_DEV_ADDR,
10974                                        (data->reg_num & 0x1f), data->val_in);
10975                 mutex_unlock(&bp->port.phy_mutex);
10976                 return err;
10977
10978         default:
10979                 /* do nothing */
10980                 break;
10981         }
10982
10983         return -EOPNOTSUPP;
10984 }
10985
10986 /* called with rtnl_lock */
10987 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10988 {
10989         struct bnx2x *bp = netdev_priv(dev);
10990         int rc = 0;
10991
10992         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10993             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10994                 return -EINVAL;
10995
10996         /* This does not race with packet allocation
10997          * because the actual alloc size is
10998          * only updated as part of load
10999          */
11000         dev->mtu = new_mtu;
11001
11002         if (netif_running(dev)) {
11003                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11004                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11005         }
11006
11007         return rc;
11008 }
11009
11010 static void bnx2x_tx_timeout(struct net_device *dev)
11011 {
11012         struct bnx2x *bp = netdev_priv(dev);
11013
11014 #ifdef BNX2X_STOP_ON_ERROR
11015         if (!bp->panic)
11016                 bnx2x_panic();
11017 #endif
11018         /* This allows the netif to be shutdown gracefully before resetting */
11019         schedule_work(&bp->reset_task);
11020 }
11021
11022 #ifdef BCM_VLAN
11023 /* called with rtnl_lock */
11024 static void bnx2x_vlan_rx_register(struct net_device *dev,
11025                                    struct vlan_group *vlgrp)
11026 {
11027         struct bnx2x *bp = netdev_priv(dev);
11028
11029         bp->vlgrp = vlgrp;
11030
11031         /* Set flags according to the required capabilities */
11032         bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11033
11034         if (dev->features & NETIF_F_HW_VLAN_TX)
11035                 bp->flags |= HW_VLAN_TX_FLAG;
11036
11037         if (dev->features & NETIF_F_HW_VLAN_RX)
11038                 bp->flags |= HW_VLAN_RX_FLAG;
11039
11040         if (netif_running(dev))
11041                 bnx2x_set_client_config(bp);
11042 }
11043
11044 #endif
11045
11046 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11047 static void poll_bnx2x(struct net_device *dev)
11048 {
11049         struct bnx2x *bp = netdev_priv(dev);
11050
11051         disable_irq(bp->pdev->irq);
11052         bnx2x_interrupt(bp->pdev->irq, dev);
11053         enable_irq(bp->pdev->irq);
11054 }
11055 #endif
11056
11057 static const struct net_device_ops bnx2x_netdev_ops = {
11058         .ndo_open               = bnx2x_open,
11059         .ndo_stop               = bnx2x_close,
11060         .ndo_start_xmit         = bnx2x_start_xmit,
11061         .ndo_set_multicast_list = bnx2x_set_rx_mode,
11062         .ndo_set_mac_address    = bnx2x_change_mac_addr,
11063         .ndo_validate_addr      = eth_validate_addr,
11064         .ndo_do_ioctl           = bnx2x_ioctl,
11065         .ndo_change_mtu         = bnx2x_change_mtu,
11066         .ndo_tx_timeout         = bnx2x_tx_timeout,
11067 #ifdef BCM_VLAN
11068         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
11069 #endif
11070 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11071         .ndo_poll_controller    = poll_bnx2x,
11072 #endif
11073 };
11074
11075 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11076                                     struct net_device *dev)
11077 {
11078         struct bnx2x *bp;
11079         int rc;
11080
11081         SET_NETDEV_DEV(dev, &pdev->dev);
11082         bp = netdev_priv(dev);
11083
11084         bp->dev = dev;
11085         bp->pdev = pdev;
11086         bp->flags = 0;
11087         bp->func = PCI_FUNC(pdev->devfn);
11088
11089         rc = pci_enable_device(pdev);
11090         if (rc) {
11091                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11092                 goto err_out;
11093         }
11094
11095         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11096                 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11097                        " aborting\n");
11098                 rc = -ENODEV;
11099                 goto err_out_disable;
11100         }
11101
11102         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11103                 printk(KERN_ERR PFX "Cannot find second PCI device"
11104                        " base address, aborting\n");
11105                 rc = -ENODEV;
11106                 goto err_out_disable;
11107         }
11108
11109         if (atomic_read(&pdev->enable_cnt) == 1) {
11110                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11111                 if (rc) {
11112                         printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11113                                " aborting\n");
11114                         goto err_out_disable;
11115                 }
11116
11117                 pci_set_master(pdev);
11118                 pci_save_state(pdev);
11119         }
11120
11121         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11122         if (bp->pm_cap == 0) {
11123                 printk(KERN_ERR PFX "Cannot find power management"
11124                        " capability, aborting\n");
11125                 rc = -EIO;
11126                 goto err_out_release;
11127         }
11128
11129         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11130         if (bp->pcie_cap == 0) {
11131                 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11132                        " aborting\n");
11133                 rc = -EIO;
11134                 goto err_out_release;
11135         }
11136
11137         if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
11138                 bp->flags |= USING_DAC_FLAG;
11139                 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
11140                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11141                                " failed, aborting\n");
11142                         rc = -EIO;
11143                         goto err_out_release;
11144                 }
11145
11146         } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
11147                 printk(KERN_ERR PFX "System does not support DMA,"
11148                        " aborting\n");
11149                 rc = -EIO;
11150                 goto err_out_release;
11151         }
11152
11153         dev->mem_start = pci_resource_start(pdev, 0);
11154         dev->base_addr = dev->mem_start;
11155         dev->mem_end = pci_resource_end(pdev, 0);
11156
11157         dev->irq = pdev->irq;
11158
11159         bp->regview = pci_ioremap_bar(pdev, 0);
11160         if (!bp->regview) {
11161                 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11162                 rc = -ENOMEM;
11163                 goto err_out_release;
11164         }
11165
11166         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11167                                         min_t(u64, BNX2X_DB_SIZE,
11168                                               pci_resource_len(pdev, 2)));
11169         if (!bp->doorbells) {
11170                 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11171                 rc = -ENOMEM;
11172                 goto err_out_unmap;
11173         }
11174
11175         bnx2x_set_power_state(bp, PCI_D0);
11176
11177         /* clean indirect addresses */
11178         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11179                                PCICFG_VENDOR_ID_OFFSET);
11180         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11181         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11182         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11183         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11184
11185         dev->watchdog_timeo = TX_TIMEOUT;
11186
11187         dev->netdev_ops = &bnx2x_netdev_ops;
11188         dev->ethtool_ops = &bnx2x_ethtool_ops;
11189         dev->features |= NETIF_F_SG;
11190         dev->features |= NETIF_F_HW_CSUM;
11191         if (bp->flags & USING_DAC_FLAG)
11192                 dev->features |= NETIF_F_HIGHDMA;
11193         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11194         dev->features |= NETIF_F_TSO6;
11195 #ifdef BCM_VLAN
11196         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
11197         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11198
11199         dev->vlan_features |= NETIF_F_SG;
11200         dev->vlan_features |= NETIF_F_HW_CSUM;
11201         if (bp->flags & USING_DAC_FLAG)
11202                 dev->vlan_features |= NETIF_F_HIGHDMA;
11203         dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11204         dev->vlan_features |= NETIF_F_TSO6;
11205 #endif
11206
11207         return 0;
11208
11209 err_out_unmap:
11210         if (bp->regview) {
11211                 iounmap(bp->regview);
11212                 bp->regview = NULL;
11213         }
11214         if (bp->doorbells) {
11215                 iounmap(bp->doorbells);
11216                 bp->doorbells = NULL;
11217         }
11218
11219 err_out_release:
11220         if (atomic_read(&pdev->enable_cnt) == 1)
11221                 pci_release_regions(pdev);
11222
11223 err_out_disable:
11224         pci_disable_device(pdev);
11225         pci_set_drvdata(pdev, NULL);
11226
11227 err_out:
11228         return rc;
11229 }
11230
11231 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
11232 {
11233         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11234
11235         val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11236         return val;
11237 }
11238
11239 /* return value of 1=2.5GHz 2=5GHz */
11240 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11241 {
11242         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11243
11244         val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11245         return val;
11246 }
11247 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11248 {
11249         struct bnx2x_fw_file_hdr *fw_hdr;
11250         struct bnx2x_fw_file_section *sections;
11251         u16 *ops_offsets;
11252         u32 offset, len, num_ops;
11253         int i;
11254         const struct firmware *firmware = bp->firmware;
11255         const u8 * fw_ver;
11256
11257         if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11258                 return -EINVAL;
11259
11260         fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11261         sections = (struct bnx2x_fw_file_section *)fw_hdr;
11262
11263         /* Make sure none of the offsets and sizes make us read beyond
11264          * the end of the firmware data */
11265         for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11266                 offset = be32_to_cpu(sections[i].offset);
11267                 len = be32_to_cpu(sections[i].len);
11268                 if (offset + len > firmware->size) {
11269                         printk(KERN_ERR PFX "Section %d length is out of bounds\n", i);
11270                         return -EINVAL;
11271                 }
11272         }
11273
11274         /* Likewise for the init_ops offsets */
11275         offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11276         ops_offsets = (u16 *)(firmware->data + offset);
11277         num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11278
11279         for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11280                 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11281                         printk(KERN_ERR PFX "Section offset %d is out of bounds\n", i);
11282                         return -EINVAL;
11283                 }
11284         }
11285
11286         /* Check FW version */
11287         offset = be32_to_cpu(fw_hdr->fw_version.offset);
11288         fw_ver = firmware->data + offset;
11289         if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11290             (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11291             (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11292             (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11293                 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11294                                     " Should be %d.%d.%d.%d\n",
11295                        fw_ver[0], fw_ver[1], fw_ver[2],
11296                        fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11297                        BCM_5710_FW_MINOR_VERSION,
11298                        BCM_5710_FW_REVISION_VERSION,
11299                        BCM_5710_FW_ENGINEERING_VERSION);
11300                 return -EINVAL;
11301         }
11302
11303         return 0;
11304 }
11305
11306 static void inline be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11307 {
11308         u32 i;
11309         const __be32 *source = (const __be32*)_source;
11310         u32 *target = (u32*)_target;
11311
11312         for (i = 0; i < n/4; i++)
11313                 target[i] = be32_to_cpu(source[i]);
11314 }
11315
11316 /*
11317    Ops array is stored in the following format:
11318    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11319  */
11320 static void inline bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11321 {
11322         u32 i, j, tmp;
11323         const __be32 *source = (const __be32*)_source;
11324         struct raw_op *target = (struct raw_op*)_target;
11325
11326         for (i = 0, j = 0; i < n/8; i++, j+=2) {
11327                 tmp = be32_to_cpu(source[j]);
11328                 target[i].op = (tmp >> 24) & 0xff;
11329                 target[i].offset =  tmp & 0xffffff;
11330                 target[i].raw_data = be32_to_cpu(source[j+1]);
11331         }
11332 }
11333 static void inline be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11334 {
11335         u32 i;
11336         u16 *target = (u16*)_target;
11337         const __be16 *source = (const __be16*)_source;
11338
11339         for (i = 0; i < n/2; i++)
11340                 target[i] = be16_to_cpu(source[i]);
11341 }
11342
11343 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11344         do {   \
11345                 u32 len = be32_to_cpu(fw_hdr->arr.len);   \
11346                 bp->arr = kmalloc(len, GFP_KERNEL);  \
11347                 if (!bp->arr) { \
11348                         printk(KERN_ERR PFX "Failed to allocate %d bytes for "#arr"\n", len); \
11349                         goto lbl; \
11350                 } \
11351                 func(bp->firmware->data + \
11352                         be32_to_cpu(fw_hdr->arr.offset), \
11353                         (u8*)bp->arr, len); \
11354         } while (0)
11355
11356
11357 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11358 {
11359         char fw_file_name[40] = {0};
11360         int rc, offset;
11361         struct bnx2x_fw_file_hdr *fw_hdr;
11362
11363         /* Create a FW file name */
11364         if (CHIP_IS_E1(bp))
11365                 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
11366         else
11367                 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
11368
11369         sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11370                 BCM_5710_FW_MAJOR_VERSION,
11371                 BCM_5710_FW_MINOR_VERSION,
11372                 BCM_5710_FW_REVISION_VERSION,
11373                 BCM_5710_FW_ENGINEERING_VERSION);
11374
11375         printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11376
11377         rc = request_firmware(&bp->firmware, fw_file_name, dev);
11378         if (rc) {
11379                 printk(KERN_ERR PFX "Can't load firmware file %s\n", fw_file_name);
11380                 goto request_firmware_exit;
11381         }
11382
11383         rc = bnx2x_check_firmware(bp);
11384         if (rc) {
11385                 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
11386                 goto request_firmware_exit;
11387         }
11388
11389         fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11390
11391         /* Initialize the pointers to the init arrays */
11392         /* Blob */
11393         BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11394
11395         /* Opcodes */
11396         BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11397
11398         /* Offsets */
11399         BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, be16_to_cpu_n);
11400
11401         /* STORMs firmware */
11402         bp->tsem_int_table_data = bp->firmware->data +
11403                 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11404         bp->tsem_pram_data      = bp->firmware->data +
11405                 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11406         bp->usem_int_table_data = bp->firmware->data +
11407                 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11408         bp->usem_pram_data      = bp->firmware->data +
11409                 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11410         bp->xsem_int_table_data = bp->firmware->data +
11411                 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11412         bp->xsem_pram_data      = bp->firmware->data +
11413                 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11414         bp->csem_int_table_data = bp->firmware->data +
11415                 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11416         bp->csem_pram_data      = bp->firmware->data +
11417                 be32_to_cpu(fw_hdr->csem_pram_data.offset);
11418
11419         return 0;
11420 init_offsets_alloc_err:
11421         kfree(bp->init_ops);
11422 init_ops_alloc_err:
11423         kfree(bp->init_data);
11424 request_firmware_exit:
11425         release_firmware(bp->firmware);
11426
11427         return rc;
11428 }
11429
11430
11431
11432 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11433                                     const struct pci_device_id *ent)
11434 {
11435         static int version_printed;
11436         struct net_device *dev = NULL;
11437         struct bnx2x *bp;
11438         int rc;
11439
11440         if (version_printed++ == 0)
11441                 printk(KERN_INFO "%s", version);
11442
11443         /* dev zeroed in init_etherdev */
11444         dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
11445         if (!dev) {
11446                 printk(KERN_ERR PFX "Cannot allocate net device\n");
11447                 return -ENOMEM;
11448         }
11449
11450         bp = netdev_priv(dev);
11451         bp->msglevel = debug;
11452
11453         rc = bnx2x_init_dev(pdev, dev);
11454         if (rc < 0) {
11455                 free_netdev(dev);
11456                 return rc;
11457         }
11458
11459         pci_set_drvdata(pdev, dev);
11460
11461         rc = bnx2x_init_bp(bp);
11462         if (rc)
11463                 goto init_one_exit;
11464
11465         /* Set init arrays */
11466         rc = bnx2x_init_firmware(bp, &pdev->dev);
11467         if (rc) {
11468                 printk(KERN_ERR PFX "Error loading firmware\n");
11469                 goto init_one_exit;
11470         }
11471
11472         rc = register_netdev(dev);
11473         if (rc) {
11474                 dev_err(&pdev->dev, "Cannot register net device\n");
11475                 goto init_one_exit;
11476         }
11477
11478         printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
11479                " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
11480                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
11481                bnx2x_get_pcie_width(bp),
11482                (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11483                dev->base_addr, bp->pdev->irq);
11484         printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
11485
11486         return 0;
11487
11488 init_one_exit:
11489         if (bp->regview)
11490                 iounmap(bp->regview);
11491
11492         if (bp->doorbells)
11493                 iounmap(bp->doorbells);
11494
11495         free_netdev(dev);
11496
11497         if (atomic_read(&pdev->enable_cnt) == 1)
11498                 pci_release_regions(pdev);
11499
11500         pci_disable_device(pdev);
11501         pci_set_drvdata(pdev, NULL);
11502
11503         return rc;
11504 }
11505
11506 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11507 {
11508         struct net_device *dev = pci_get_drvdata(pdev);
11509         struct bnx2x *bp;
11510
11511         if (!dev) {
11512                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11513                 return;
11514         }
11515         bp = netdev_priv(dev);
11516
11517         unregister_netdev(dev);
11518
11519         kfree(bp->init_ops_offsets);
11520         kfree(bp->init_ops);
11521         kfree(bp->init_data);
11522         release_firmware(bp->firmware);
11523
11524         if (bp->regview)
11525                 iounmap(bp->regview);
11526
11527         if (bp->doorbells)
11528                 iounmap(bp->doorbells);
11529
11530         free_netdev(dev);
11531
11532         if (atomic_read(&pdev->enable_cnt) == 1)
11533                 pci_release_regions(pdev);
11534
11535         pci_disable_device(pdev);
11536         pci_set_drvdata(pdev, NULL);
11537 }
11538
11539 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11540 {
11541         struct net_device *dev = pci_get_drvdata(pdev);
11542         struct bnx2x *bp;
11543
11544         if (!dev) {
11545                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11546                 return -ENODEV;
11547         }
11548         bp = netdev_priv(dev);
11549
11550         rtnl_lock();
11551
11552         pci_save_state(pdev);
11553
11554         if (!netif_running(dev)) {
11555                 rtnl_unlock();
11556                 return 0;
11557         }
11558
11559         netif_device_detach(dev);
11560
11561         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11562
11563         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
11564
11565         rtnl_unlock();
11566
11567         return 0;
11568 }
11569
11570 static int bnx2x_resume(struct pci_dev *pdev)
11571 {
11572         struct net_device *dev = pci_get_drvdata(pdev);
11573         struct bnx2x *bp;
11574         int rc;
11575
11576         if (!dev) {
11577                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11578                 return -ENODEV;
11579         }
11580         bp = netdev_priv(dev);
11581
11582         rtnl_lock();
11583
11584         pci_restore_state(pdev);
11585
11586         if (!netif_running(dev)) {
11587                 rtnl_unlock();
11588                 return 0;
11589         }
11590
11591         bnx2x_set_power_state(bp, PCI_D0);
11592         netif_device_attach(dev);
11593
11594         rc = bnx2x_nic_load(bp, LOAD_OPEN);
11595
11596         rtnl_unlock();
11597
11598         return rc;
11599 }
11600
11601 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11602 {
11603         int i;
11604
11605         bp->state = BNX2X_STATE_ERROR;
11606
11607         bp->rx_mode = BNX2X_RX_MODE_NONE;
11608
11609         bnx2x_netif_stop(bp, 0);
11610
11611         del_timer_sync(&bp->timer);
11612         bp->stats_state = STATS_STATE_DISABLED;
11613         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11614
11615         /* Release IRQs */
11616         bnx2x_free_irq(bp);
11617
11618         if (CHIP_IS_E1(bp)) {
11619                 struct mac_configuration_cmd *config =
11620                                                 bnx2x_sp(bp, mcast_config);
11621
11622                 for (i = 0; i < config->hdr.length; i++)
11623                         CAM_INVALIDATE(config->config_table[i]);
11624         }
11625
11626         /* Free SKBs, SGEs, TPA pool and driver internals */
11627         bnx2x_free_skbs(bp);
11628         for_each_rx_queue(bp, i)
11629                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
11630         for_each_rx_queue(bp, i)
11631                 netif_napi_del(&bnx2x_fp(bp, i, napi));
11632         bnx2x_free_mem(bp);
11633
11634         bp->state = BNX2X_STATE_CLOSED;
11635
11636         netif_carrier_off(bp->dev);
11637
11638         return 0;
11639 }
11640
11641 static void bnx2x_eeh_recover(struct bnx2x *bp)
11642 {
11643         u32 val;
11644
11645         mutex_init(&bp->port.phy_mutex);
11646
11647         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11648         bp->link_params.shmem_base = bp->common.shmem_base;
11649         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11650
11651         if (!bp->common.shmem_base ||
11652             (bp->common.shmem_base < 0xA0000) ||
11653             (bp->common.shmem_base >= 0xC0000)) {
11654                 BNX2X_DEV_INFO("MCP not active\n");
11655                 bp->flags |= NO_MCP_FLAG;
11656                 return;
11657         }
11658
11659         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11660         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11661                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11662                 BNX2X_ERR("BAD MCP validity signature\n");
11663
11664         if (!BP_NOMCP(bp)) {
11665                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11666                               & DRV_MSG_SEQ_NUMBER_MASK);
11667                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11668         }
11669 }
11670
11671 /**
11672  * bnx2x_io_error_detected - called when PCI error is detected
11673  * @pdev: Pointer to PCI device
11674  * @state: The current pci connection state
11675  *
11676  * This function is called after a PCI bus error affecting
11677  * this device has been detected.
11678  */
11679 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11680                                                 pci_channel_state_t state)
11681 {
11682         struct net_device *dev = pci_get_drvdata(pdev);
11683         struct bnx2x *bp = netdev_priv(dev);
11684
11685         rtnl_lock();
11686
11687         netif_device_detach(dev);
11688
11689         if (netif_running(dev))
11690                 bnx2x_eeh_nic_unload(bp);
11691
11692         pci_disable_device(pdev);
11693
11694         rtnl_unlock();
11695
11696         /* Request a slot reset */
11697         return PCI_ERS_RESULT_NEED_RESET;
11698 }
11699
11700 /**
11701  * bnx2x_io_slot_reset - called after the PCI bus has been reset
11702  * @pdev: Pointer to PCI device
11703  *
11704  * Restart the card from scratch, as if from a cold-boot.
11705  */
11706 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11707 {
11708         struct net_device *dev = pci_get_drvdata(pdev);
11709         struct bnx2x *bp = netdev_priv(dev);
11710
11711         rtnl_lock();
11712
11713         if (pci_enable_device(pdev)) {
11714                 dev_err(&pdev->dev,
11715                         "Cannot re-enable PCI device after reset\n");
11716                 rtnl_unlock();
11717                 return PCI_ERS_RESULT_DISCONNECT;
11718         }
11719
11720         pci_set_master(pdev);
11721         pci_restore_state(pdev);
11722
11723         if (netif_running(dev))
11724                 bnx2x_set_power_state(bp, PCI_D0);
11725
11726         rtnl_unlock();
11727
11728         return PCI_ERS_RESULT_RECOVERED;
11729 }
11730
11731 /**
11732  * bnx2x_io_resume - called when traffic can start flowing again
11733  * @pdev: Pointer to PCI device
11734  *
11735  * This callback is called when the error recovery driver tells us that
11736  * its OK to resume normal operation.
11737  */
11738 static void bnx2x_io_resume(struct pci_dev *pdev)
11739 {
11740         struct net_device *dev = pci_get_drvdata(pdev);
11741         struct bnx2x *bp = netdev_priv(dev);
11742
11743         rtnl_lock();
11744
11745         bnx2x_eeh_recover(bp);
11746
11747         if (netif_running(dev))
11748                 bnx2x_nic_load(bp, LOAD_NORMAL);
11749
11750         netif_device_attach(dev);
11751
11752         rtnl_unlock();
11753 }
11754
11755 static struct pci_error_handlers bnx2x_err_handler = {
11756         .error_detected = bnx2x_io_error_detected,
11757         .slot_reset     = bnx2x_io_slot_reset,
11758         .resume         = bnx2x_io_resume,
11759 };
11760
11761 static struct pci_driver bnx2x_pci_driver = {
11762         .name        = DRV_MODULE_NAME,
11763         .id_table    = bnx2x_pci_tbl,
11764         .probe       = bnx2x_init_one,
11765         .remove      = __devexit_p(bnx2x_remove_one),
11766         .suspend     = bnx2x_suspend,
11767         .resume      = bnx2x_resume,
11768         .err_handler = &bnx2x_err_handler,
11769 };
11770
11771 static int __init bnx2x_init(void)
11772 {
11773         int ret;
11774
11775         bnx2x_wq = create_singlethread_workqueue("bnx2x");
11776         if (bnx2x_wq == NULL) {
11777                 printk(KERN_ERR PFX "Cannot create workqueue\n");
11778                 return -ENOMEM;
11779         }
11780
11781         ret = pci_register_driver(&bnx2x_pci_driver);
11782         if (ret) {
11783                 printk(KERN_ERR PFX "Cannot register driver\n");
11784                 destroy_workqueue(bnx2x_wq);
11785         }
11786         return ret;
11787 }
11788
11789 static void __exit bnx2x_cleanup(void)
11790 {
11791         pci_unregister_driver(&bnx2x_pci_driver);
11792
11793         destroy_workqueue(bnx2x_wq);
11794 }
11795
11796 module_init(bnx2x_init);
11797 module_exit(bnx2x_cleanup);
11798
11799