bnx2x: Version
[safe/jmp/linux-2.6] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52
53
54 #include "bnx2x.h"
55 #include "bnx2x_init.h"
56
57 #define DRV_MODULE_VERSION      "1.48.102"
58 #define DRV_MODULE_RELDATE      "2009/02/12"
59 #define BNX2X_BC_VER            0x040200
60
61 /* Time in jiffies before concluding the transmitter is hung */
62 #define TX_TIMEOUT              (5*HZ)
63
64 static char version[] __devinitdata =
65         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
66         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
67
68 MODULE_AUTHOR("Eliezer Tamir");
69 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_MODULE_VERSION);
72
73 static int multi_mode = 1;
74 module_param(multi_mode, int, 0);
75
76 static int disable_tpa;
77 module_param(disable_tpa, int, 0);
78 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
79
80 static int int_mode;
81 module_param(int_mode, int, 0);
82 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
83
84 static int poll;
85 module_param(poll, int, 0);
86 MODULE_PARM_DESC(poll, " Use polling (for debug)");
87
88 static int mrrs = -1;
89 module_param(mrrs, int, 0);
90 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
91
92 static int debug;
93 module_param(debug, int, 0);
94 MODULE_PARM_DESC(debug, " Default debug msglevel");
95
96 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
97
98 static struct workqueue_struct *bnx2x_wq;
99
100 enum bnx2x_board_type {
101         BCM57710 = 0,
102         BCM57711 = 1,
103         BCM57711E = 2,
104 };
105
106 /* indexed by board_type, above */
107 static struct {
108         char *name;
109 } board_info[] __devinitdata = {
110         { "Broadcom NetXtreme II BCM57710 XGb" },
111         { "Broadcom NetXtreme II BCM57711 XGb" },
112         { "Broadcom NetXtreme II BCM57711E XGb" }
113 };
114
115
116 static const struct pci_device_id bnx2x_pci_tbl[] = {
117         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
119         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
123         { 0 }
124 };
125
126 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
127
128 /****************************************************************************
129 * General service functions
130 ****************************************************************************/
131
132 /* used only at init
133  * locking is done by mcp
134  */
135 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
136 {
137         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140                                PCICFG_VENDOR_ID_OFFSET);
141 }
142
143 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
144 {
145         u32 val;
146
147         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150                                PCICFG_VENDOR_ID_OFFSET);
151
152         return val;
153 }
154
155 static const u32 dmae_reg_go_c[] = {
156         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160 };
161
162 /* copy command into DMAE command memory and set DMAE command go */
163 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
164                             int idx)
165 {
166         u32 cmd_offset;
167         int i;
168
169         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
172
173                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
175         }
176         REG_WR(bp, dmae_reg_go_c[idx], 1);
177 }
178
179 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
180                       u32 len32)
181 {
182         struct dmae_command *dmae = &bp->init_dmae;
183         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
184         int cnt = 200;
185
186         if (!bp->dmae_ready) {
187                 u32 *data = bnx2x_sp(bp, wb_data[0]);
188
189                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
190                    "  using indirect\n", dst_addr, len32);
191                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
192                 return;
193         }
194
195         mutex_lock(&bp->dmae_mutex);
196
197         memset(dmae, 0, sizeof(struct dmae_command));
198
199         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
202 #ifdef __BIG_ENDIAN
203                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
204 #else
205                         DMAE_CMD_ENDIANITY_DW_SWAP |
206 #endif
207                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
209         dmae->src_addr_lo = U64_LO(dma_addr);
210         dmae->src_addr_hi = U64_HI(dma_addr);
211         dmae->dst_addr_lo = dst_addr >> 2;
212         dmae->dst_addr_hi = 0;
213         dmae->len = len32;
214         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
216         dmae->comp_val = DMAE_COMP_VAL;
217
218         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
219            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
220                     "dst_addr [%x:%08x (%08x)]\n"
221            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
222            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
225         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
226            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
228
229         *wb_comp = 0;
230
231         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
232
233         udelay(5);
234
235         while (*wb_comp != DMAE_COMP_VAL) {
236                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237
238                 if (!cnt) {
239                         BNX2X_ERR("dmae timeout!\n");
240                         break;
241                 }
242                 cnt--;
243                 /* adjust delay for emulation/FPGA */
244                 if (CHIP_REV_IS_SLOW(bp))
245                         msleep(100);
246                 else
247                         udelay(5);
248         }
249
250         mutex_unlock(&bp->dmae_mutex);
251 }
252
253 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
254 {
255         struct dmae_command *dmae = &bp->init_dmae;
256         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
257         int cnt = 200;
258
259         if (!bp->dmae_ready) {
260                 u32 *data = bnx2x_sp(bp, wb_data[0]);
261                 int i;
262
263                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
264                    "  using indirect\n", src_addr, len32);
265                 for (i = 0; i < len32; i++)
266                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
267                 return;
268         }
269
270         mutex_lock(&bp->dmae_mutex);
271
272         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
273         memset(dmae, 0, sizeof(struct dmae_command));
274
275         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
276                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
277                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
278 #ifdef __BIG_ENDIAN
279                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
280 #else
281                         DMAE_CMD_ENDIANITY_DW_SWAP |
282 #endif
283                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
284                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
285         dmae->src_addr_lo = src_addr >> 2;
286         dmae->src_addr_hi = 0;
287         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
288         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
289         dmae->len = len32;
290         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
291         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
292         dmae->comp_val = DMAE_COMP_VAL;
293
294         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
295            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
296                     "dst_addr [%x:%08x (%08x)]\n"
297            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
298            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
299            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
300            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
301
302         *wb_comp = 0;
303
304         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
305
306         udelay(5);
307
308         while (*wb_comp != DMAE_COMP_VAL) {
309
310                 if (!cnt) {
311                         BNX2X_ERR("dmae timeout!\n");
312                         break;
313                 }
314                 cnt--;
315                 /* adjust delay for emulation/FPGA */
316                 if (CHIP_REV_IS_SLOW(bp))
317                         msleep(100);
318                 else
319                         udelay(5);
320         }
321         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
322            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
323            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
324
325         mutex_unlock(&bp->dmae_mutex);
326 }
327
328 /* used only for slowpath so not inlined */
329 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
330 {
331         u32 wb_write[2];
332
333         wb_write[0] = val_hi;
334         wb_write[1] = val_lo;
335         REG_WR_DMAE(bp, reg, wb_write, 2);
336 }
337
338 #ifdef USE_WB_RD
339 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
340 {
341         u32 wb_data[2];
342
343         REG_RD_DMAE(bp, reg, wb_data, 2);
344
345         return HILO_U64(wb_data[0], wb_data[1]);
346 }
347 #endif
348
349 static int bnx2x_mc_assert(struct bnx2x *bp)
350 {
351         char last_idx;
352         int i, rc = 0;
353         u32 row0, row1, row2, row3;
354
355         /* XSTORM */
356         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
357                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
358         if (last_idx)
359                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
360
361         /* print the asserts */
362         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
363
364                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365                               XSTORM_ASSERT_LIST_OFFSET(i));
366                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
368                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
370                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
372
373                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
374                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
375                                   " 0x%08x 0x%08x 0x%08x\n",
376                                   i, row3, row2, row1, row0);
377                         rc++;
378                 } else {
379                         break;
380                 }
381         }
382
383         /* TSTORM */
384         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
385                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
386         if (last_idx)
387                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
388
389         /* print the asserts */
390         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
391
392                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393                               TSTORM_ASSERT_LIST_OFFSET(i));
394                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
396                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
398                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
400
401                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
402                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
403                                   " 0x%08x 0x%08x 0x%08x\n",
404                                   i, row3, row2, row1, row0);
405                         rc++;
406                 } else {
407                         break;
408                 }
409         }
410
411         /* CSTORM */
412         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
413                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
414         if (last_idx)
415                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
416
417         /* print the asserts */
418         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
419
420                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421                               CSTORM_ASSERT_LIST_OFFSET(i));
422                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
424                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
426                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
428
429                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
430                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
431                                   " 0x%08x 0x%08x 0x%08x\n",
432                                   i, row3, row2, row1, row0);
433                         rc++;
434                 } else {
435                         break;
436                 }
437         }
438
439         /* USTORM */
440         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
441                            USTORM_ASSERT_LIST_INDEX_OFFSET);
442         if (last_idx)
443                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
444
445         /* print the asserts */
446         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
447
448                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
449                               USTORM_ASSERT_LIST_OFFSET(i));
450                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
451                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
452                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
453                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
454                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
455                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
456
457                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
458                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
459                                   " 0x%08x 0x%08x 0x%08x\n",
460                                   i, row3, row2, row1, row0);
461                         rc++;
462                 } else {
463                         break;
464                 }
465         }
466
467         return rc;
468 }
469
470 static void bnx2x_fw_dump(struct bnx2x *bp)
471 {
472         u32 mark, offset;
473         __be32 data[9];
474         int word;
475
476         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
477         mark = ((mark + 0x3) & ~0x3);
478         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
479
480         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
481                 for (word = 0; word < 8; word++)
482                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
483                                                   offset + 4*word));
484                 data[8] = 0x0;
485                 printk(KERN_CONT "%s", (char *)data);
486         }
487         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
488                 for (word = 0; word < 8; word++)
489                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
490                                                   offset + 4*word));
491                 data[8] = 0x0;
492                 printk(KERN_CONT "%s", (char *)data);
493         }
494         printk("\n" KERN_ERR PFX "end of fw dump\n");
495 }
496
497 static void bnx2x_panic_dump(struct bnx2x *bp)
498 {
499         int i;
500         u16 j, start, end;
501
502         bp->stats_state = STATS_STATE_DISABLED;
503         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
504
505         BNX2X_ERR("begin crash dump -----------------\n");
506
507         /* Indices */
508         /* Common */
509         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
510                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
511                   "  spq_prod_idx(%u)\n",
512                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
513                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
514
515         /* Rx */
516         for_each_rx_queue(bp, i) {
517                 struct bnx2x_fastpath *fp = &bp->fp[i];
518
519                 BNX2X_ERR("queue[%d]: rx_bd_prod(%x)  rx_bd_cons(%x)"
520                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
521                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
522                           i, fp->rx_bd_prod, fp->rx_bd_cons,
523                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
524                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
525                 BNX2X_ERR("          rx_sge_prod(%x)  last_max_sge(%x)"
526                           "  fp_u_idx(%x) *sb_u_idx(%x)\n",
527                           fp->rx_sge_prod, fp->last_max_sge,
528                           le16_to_cpu(fp->fp_u_idx),
529                           fp->status_blk->u_status_block.status_block_index);
530         }
531
532         /* Tx */
533         for_each_tx_queue(bp, i) {
534                 struct bnx2x_fastpath *fp = &bp->fp[i];
535                 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
536
537                 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
538                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
539                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
540                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
541                 BNX2X_ERR("          fp_c_idx(%x)  *sb_c_idx(%x)"
542                           "  bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx),
543                           fp->status_blk->c_status_block.status_block_index,
544                           hw_prods->packets_prod, hw_prods->bds_prod);
545         }
546
547         /* Rings */
548         /* Rx */
549         for_each_rx_queue(bp, i) {
550                 struct bnx2x_fastpath *fp = &bp->fp[i];
551
552                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
553                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
554                 for (j = start; j != end; j = RX_BD(j + 1)) {
555                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
556                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
557
558                         BNX2X_ERR("rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
559                                   j, rx_bd[1], rx_bd[0], sw_bd->skb);
560                 }
561
562                 start = RX_SGE(fp->rx_sge_prod);
563                 end = RX_SGE(fp->last_max_sge);
564                 for (j = start; j != end; j = RX_SGE(j + 1)) {
565                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
566                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
567
568                         BNX2X_ERR("rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
569                                   j, rx_sge[1], rx_sge[0], sw_page->page);
570                 }
571
572                 start = RCQ_BD(fp->rx_comp_cons - 10);
573                 end = RCQ_BD(fp->rx_comp_cons + 503);
574                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
575                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
576
577                         BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
578                                   j, cqe[0], cqe[1], cqe[2], cqe[3]);
579                 }
580         }
581
582         /* Tx */
583         for_each_tx_queue(bp, i) {
584                 struct bnx2x_fastpath *fp = &bp->fp[i];
585
586                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
587                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
588                 for (j = start; j != end; j = TX_BD(j + 1)) {
589                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
590
591                         BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
592                                   sw_bd->skb, sw_bd->first_bd);
593                 }
594
595                 start = TX_BD(fp->tx_bd_cons - 10);
596                 end = TX_BD(fp->tx_bd_cons + 254);
597                 for (j = start; j != end; j = TX_BD(j + 1)) {
598                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
599
600                         BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
601                                   j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
602                 }
603         }
604
605         bnx2x_fw_dump(bp);
606         bnx2x_mc_assert(bp);
607         BNX2X_ERR("end crash dump -----------------\n");
608 }
609
610 static void bnx2x_int_enable(struct bnx2x *bp)
611 {
612         int port = BP_PORT(bp);
613         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
614         u32 val = REG_RD(bp, addr);
615         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
616         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
617
618         if (msix) {
619                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
620                          HC_CONFIG_0_REG_INT_LINE_EN_0);
621                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
622                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
623         } else if (msi) {
624                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
625                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
626                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
627                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
628         } else {
629                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
630                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
631                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
632                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
633
634                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
635                    val, port, addr);
636
637                 REG_WR(bp, addr, val);
638
639                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
640         }
641
642         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
643            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
644
645         REG_WR(bp, addr, val);
646
647         if (CHIP_IS_E1H(bp)) {
648                 /* init leading/trailing edge */
649                 if (IS_E1HMF(bp)) {
650                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
651                         if (bp->port.pmf)
652                                 /* enable nig and gpio3 attention */
653                                 val |= 0x1100;
654                 } else
655                         val = 0xffff;
656
657                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
658                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
659         }
660 }
661
662 static void bnx2x_int_disable(struct bnx2x *bp)
663 {
664         int port = BP_PORT(bp);
665         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
666         u32 val = REG_RD(bp, addr);
667
668         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
669                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
670                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
671                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
672
673         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
674            val, port, addr);
675
676         /* flush all outstanding writes */
677         mmiowb();
678
679         REG_WR(bp, addr, val);
680         if (REG_RD(bp, addr) != val)
681                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
682
683 }
684
685 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
686 {
687         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
688         int i, offset;
689
690         /* disable interrupt handling */
691         atomic_inc(&bp->intr_sem);
692         if (disable_hw)
693                 /* prevent the HW from sending interrupts */
694                 bnx2x_int_disable(bp);
695
696         /* make sure all ISRs are done */
697         if (msix) {
698                 synchronize_irq(bp->msix_table[0].vector);
699                 offset = 1;
700                 for_each_queue(bp, i)
701                         synchronize_irq(bp->msix_table[i + offset].vector);
702         } else
703                 synchronize_irq(bp->pdev->irq);
704
705         /* make sure sp_task is not running */
706         cancel_delayed_work(&bp->sp_task);
707         flush_workqueue(bnx2x_wq);
708 }
709
710 /* fast path */
711
712 /*
713  * General service functions
714  */
715
716 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
717                                 u8 storm, u16 index, u8 op, u8 update)
718 {
719         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
720                        COMMAND_REG_INT_ACK);
721         struct igu_ack_register igu_ack;
722
723         igu_ack.status_block_index = index;
724         igu_ack.sb_id_and_flags =
725                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
726                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
727                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
728                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
729
730         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
731            (*(u32 *)&igu_ack), hc_addr);
732         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
733 }
734
735 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
736 {
737         struct host_status_block *fpsb = fp->status_blk;
738         u16 rc = 0;
739
740         barrier(); /* status block is written to by the chip */
741         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
742                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
743                 rc |= 1;
744         }
745         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
746                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
747                 rc |= 2;
748         }
749         return rc;
750 }
751
752 static u16 bnx2x_ack_int(struct bnx2x *bp)
753 {
754         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
755                        COMMAND_REG_SIMD_MASK);
756         u32 result = REG_RD(bp, hc_addr);
757
758         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
759            result, hc_addr);
760
761         return result;
762 }
763
764
765 /*
766  * fast path service functions
767  */
768
769 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
770 {
771         u16 tx_cons_sb;
772
773         /* Tell compiler that status block fields can change */
774         barrier();
775         tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
776         return (fp->tx_pkt_cons != tx_cons_sb);
777 }
778
779 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
780 {
781         /* Tell compiler that consumer and producer can change */
782         barrier();
783         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
784 }
785
786 /* free skb in the packet ring at pos idx
787  * return idx of last bd freed
788  */
789 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
790                              u16 idx)
791 {
792         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
793         struct eth_tx_bd *tx_bd;
794         struct sk_buff *skb = tx_buf->skb;
795         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
796         int nbd;
797
798         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
799            idx, tx_buf, skb);
800
801         /* unmap first bd */
802         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
803         tx_bd = &fp->tx_desc_ring[bd_idx];
804         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
805                          BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
806
807         nbd = le16_to_cpu(tx_bd->nbd) - 1;
808         new_cons = nbd + tx_buf->first_bd;
809 #ifdef BNX2X_STOP_ON_ERROR
810         if (nbd > (MAX_SKB_FRAGS + 2)) {
811                 BNX2X_ERR("BAD nbd!\n");
812                 bnx2x_panic();
813         }
814 #endif
815
816         /* Skip a parse bd and the TSO split header bd
817            since they have no mapping */
818         if (nbd)
819                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
820
821         if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
822                                            ETH_TX_BD_FLAGS_TCP_CSUM |
823                                            ETH_TX_BD_FLAGS_SW_LSO)) {
824                 if (--nbd)
825                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
826                 tx_bd = &fp->tx_desc_ring[bd_idx];
827                 /* is this a TSO split header bd? */
828                 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
829                         if (--nbd)
830                                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
831                 }
832         }
833
834         /* now free frags */
835         while (nbd > 0) {
836
837                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
838                 tx_bd = &fp->tx_desc_ring[bd_idx];
839                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
840                                BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
841                 if (--nbd)
842                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
843         }
844
845         /* release skb */
846         WARN_ON(!skb);
847         dev_kfree_skb(skb);
848         tx_buf->first_bd = 0;
849         tx_buf->skb = NULL;
850
851         return new_cons;
852 }
853
854 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
855 {
856         s16 used;
857         u16 prod;
858         u16 cons;
859
860         barrier(); /* Tell compiler that prod and cons can change */
861         prod = fp->tx_bd_prod;
862         cons = fp->tx_bd_cons;
863
864         /* NUM_TX_RINGS = number of "next-page" entries
865            It will be used as a threshold */
866         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
867
868 #ifdef BNX2X_STOP_ON_ERROR
869         WARN_ON(used < 0);
870         WARN_ON(used > fp->bp->tx_ring_size);
871         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
872 #endif
873
874         return (s16)(fp->bp->tx_ring_size) - used;
875 }
876
877 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
878 {
879         struct bnx2x *bp = fp->bp;
880         struct netdev_queue *txq;
881         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
882         int done = 0;
883
884 #ifdef BNX2X_STOP_ON_ERROR
885         if (unlikely(bp->panic))
886                 return;
887 #endif
888
889         txq = netdev_get_tx_queue(bp->dev, fp->index);
890         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
891         sw_cons = fp->tx_pkt_cons;
892
893         while (sw_cons != hw_cons) {
894                 u16 pkt_cons;
895
896                 pkt_cons = TX_BD(sw_cons);
897
898                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
899
900                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
901                    hw_cons, sw_cons, pkt_cons);
902
903 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
904                         rmb();
905                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
906                 }
907 */
908                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
909                 sw_cons++;
910                 done++;
911
912                 if (done == work)
913                         break;
914         }
915
916         fp->tx_pkt_cons = sw_cons;
917         fp->tx_bd_cons = bd_cons;
918
919         /* Need to make the tx_bd_cons update visible to start_xmit()
920          * before checking for netif_tx_queue_stopped().  Without the
921          * memory barrier, there is a small possibility that start_xmit()
922          * will miss it and cause the queue to be stopped forever.
923          */
924         smp_mb();
925
926         /* TBD need a thresh? */
927         if (unlikely(netif_tx_queue_stopped(txq))) {
928
929                 __netif_tx_lock(txq, smp_processor_id());
930
931                 if ((netif_tx_queue_stopped(txq)) &&
932                     (bp->state == BNX2X_STATE_OPEN) &&
933                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
934                         netif_tx_wake_queue(txq);
935
936                 __netif_tx_unlock(txq);
937         }
938 }
939
940
941 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
942                            union eth_rx_cqe *rr_cqe)
943 {
944         struct bnx2x *bp = fp->bp;
945         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
946         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
947
948         DP(BNX2X_MSG_SP,
949            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
950            fp->index, cid, command, bp->state,
951            rr_cqe->ramrod_cqe.ramrod_type);
952
953         bp->spq_left++;
954
955         if (fp->index) {
956                 switch (command | fp->state) {
957                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
958                                                 BNX2X_FP_STATE_OPENING):
959                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
960                            cid);
961                         fp->state = BNX2X_FP_STATE_OPEN;
962                         break;
963
964                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
965                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
966                            cid);
967                         fp->state = BNX2X_FP_STATE_HALTED;
968                         break;
969
970                 default:
971                         BNX2X_ERR("unexpected MC reply (%d)  "
972                                   "fp->state is %x\n", command, fp->state);
973                         break;
974                 }
975                 mb(); /* force bnx2x_wait_ramrod() to see the change */
976                 return;
977         }
978
979         switch (command | bp->state) {
980         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
981                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
982                 bp->state = BNX2X_STATE_OPEN;
983                 break;
984
985         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
986                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
987                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
988                 fp->state = BNX2X_FP_STATE_HALTED;
989                 break;
990
991         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
992                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
993                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
994                 break;
995
996
997         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
998         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
999                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1000                 bp->set_mac_pending = 0;
1001                 break;
1002
1003         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1004                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1005                 break;
1006
1007         default:
1008                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
1009                           command, bp->state);
1010                 break;
1011         }
1012         mb(); /* force bnx2x_wait_ramrod() to see the change */
1013 }
1014
1015 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1016                                      struct bnx2x_fastpath *fp, u16 index)
1017 {
1018         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1019         struct page *page = sw_buf->page;
1020         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1021
1022         /* Skip "next page" elements */
1023         if (!page)
1024                 return;
1025
1026         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1027                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1028         __free_pages(page, PAGES_PER_SGE_SHIFT);
1029
1030         sw_buf->page = NULL;
1031         sge->addr_hi = 0;
1032         sge->addr_lo = 0;
1033 }
1034
1035 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1036                                            struct bnx2x_fastpath *fp, int last)
1037 {
1038         int i;
1039
1040         for (i = 0; i < last; i++)
1041                 bnx2x_free_rx_sge(bp, fp, i);
1042 }
1043
1044 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1045                                      struct bnx2x_fastpath *fp, u16 index)
1046 {
1047         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1048         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1049         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1050         dma_addr_t mapping;
1051
1052         if (unlikely(page == NULL))
1053                 return -ENOMEM;
1054
1055         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1056                                PCI_DMA_FROMDEVICE);
1057         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1058                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1059                 return -ENOMEM;
1060         }
1061
1062         sw_buf->page = page;
1063         pci_unmap_addr_set(sw_buf, mapping, mapping);
1064
1065         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1066         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1067
1068         return 0;
1069 }
1070
1071 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1072                                      struct bnx2x_fastpath *fp, u16 index)
1073 {
1074         struct sk_buff *skb;
1075         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1076         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1077         dma_addr_t mapping;
1078
1079         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1080         if (unlikely(skb == NULL))
1081                 return -ENOMEM;
1082
1083         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1084                                  PCI_DMA_FROMDEVICE);
1085         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1086                 dev_kfree_skb(skb);
1087                 return -ENOMEM;
1088         }
1089
1090         rx_buf->skb = skb;
1091         pci_unmap_addr_set(rx_buf, mapping, mapping);
1092
1093         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1094         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1095
1096         return 0;
1097 }
1098
1099 /* note that we are not allocating a new skb,
1100  * we are just moving one from cons to prod
1101  * we are not creating a new mapping,
1102  * so there is no need to check for dma_mapping_error().
1103  */
1104 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1105                                struct sk_buff *skb, u16 cons, u16 prod)
1106 {
1107         struct bnx2x *bp = fp->bp;
1108         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1109         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1110         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1111         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1112
1113         pci_dma_sync_single_for_device(bp->pdev,
1114                                        pci_unmap_addr(cons_rx_buf, mapping),
1115                                        RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1116
1117         prod_rx_buf->skb = cons_rx_buf->skb;
1118         pci_unmap_addr_set(prod_rx_buf, mapping,
1119                            pci_unmap_addr(cons_rx_buf, mapping));
1120         *prod_bd = *cons_bd;
1121 }
1122
1123 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1124                                              u16 idx)
1125 {
1126         u16 last_max = fp->last_max_sge;
1127
1128         if (SUB_S16(idx, last_max) > 0)
1129                 fp->last_max_sge = idx;
1130 }
1131
1132 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1133 {
1134         int i, j;
1135
1136         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1137                 int idx = RX_SGE_CNT * i - 1;
1138
1139                 for (j = 0; j < 2; j++) {
1140                         SGE_MASK_CLEAR_BIT(fp, idx);
1141                         idx--;
1142                 }
1143         }
1144 }
1145
1146 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1147                                   struct eth_fast_path_rx_cqe *fp_cqe)
1148 {
1149         struct bnx2x *bp = fp->bp;
1150         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1151                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1152                       SGE_PAGE_SHIFT;
1153         u16 last_max, last_elem, first_elem;
1154         u16 delta = 0;
1155         u16 i;
1156
1157         if (!sge_len)
1158                 return;
1159
1160         /* First mark all used pages */
1161         for (i = 0; i < sge_len; i++)
1162                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1163
1164         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1165            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1166
1167         /* Here we assume that the last SGE index is the biggest */
1168         prefetch((void *)(fp->sge_mask));
1169         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1170
1171         last_max = RX_SGE(fp->last_max_sge);
1172         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1173         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1174
1175         /* If ring is not full */
1176         if (last_elem + 1 != first_elem)
1177                 last_elem++;
1178
1179         /* Now update the prod */
1180         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1181                 if (likely(fp->sge_mask[i]))
1182                         break;
1183
1184                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1185                 delta += RX_SGE_MASK_ELEM_SZ;
1186         }
1187
1188         if (delta > 0) {
1189                 fp->rx_sge_prod += delta;
1190                 /* clear page-end entries */
1191                 bnx2x_clear_sge_mask_next_elems(fp);
1192         }
1193
1194         DP(NETIF_MSG_RX_STATUS,
1195            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1196            fp->last_max_sge, fp->rx_sge_prod);
1197 }
1198
1199 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1200 {
1201         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1202         memset(fp->sge_mask, 0xff,
1203                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1204
1205         /* Clear the two last indices in the page to 1:
1206            these are the indices that correspond to the "next" element,
1207            hence will never be indicated and should be removed from
1208            the calculations. */
1209         bnx2x_clear_sge_mask_next_elems(fp);
1210 }
1211
1212 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1213                             struct sk_buff *skb, u16 cons, u16 prod)
1214 {
1215         struct bnx2x *bp = fp->bp;
1216         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1217         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1218         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1219         dma_addr_t mapping;
1220
1221         /* move empty skb from pool to prod and map it */
1222         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1223         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1224                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1225         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1226
1227         /* move partial skb from cons to pool (don't unmap yet) */
1228         fp->tpa_pool[queue] = *cons_rx_buf;
1229
1230         /* mark bin state as start - print error if current state != stop */
1231         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1232                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1233
1234         fp->tpa_state[queue] = BNX2X_TPA_START;
1235
1236         /* point prod_bd to new skb */
1237         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1238         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1239
1240 #ifdef BNX2X_STOP_ON_ERROR
1241         fp->tpa_queue_used |= (1 << queue);
1242 #ifdef __powerpc64__
1243         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1244 #else
1245         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1246 #endif
1247            fp->tpa_queue_used);
1248 #endif
1249 }
1250
1251 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1252                                struct sk_buff *skb,
1253                                struct eth_fast_path_rx_cqe *fp_cqe,
1254                                u16 cqe_idx)
1255 {
1256         struct sw_rx_page *rx_pg, old_rx_pg;
1257         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1258         u32 i, frag_len, frag_size, pages;
1259         int err;
1260         int j;
1261
1262         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1263         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1264
1265         /* This is needed in order to enable forwarding support */
1266         if (frag_size)
1267                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1268                                                max(frag_size, (u32)len_on_bd));
1269
1270 #ifdef BNX2X_STOP_ON_ERROR
1271         if (pages >
1272             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1273                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1274                           pages, cqe_idx);
1275                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1276                           fp_cqe->pkt_len, len_on_bd);
1277                 bnx2x_panic();
1278                 return -EINVAL;
1279         }
1280 #endif
1281
1282         /* Run through the SGL and compose the fragmented skb */
1283         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1284                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1285
1286                 /* FW gives the indices of the SGE as if the ring is an array
1287                    (meaning that "next" element will consume 2 indices) */
1288                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1289                 rx_pg = &fp->rx_page_ring[sge_idx];
1290                 old_rx_pg = *rx_pg;
1291
1292                 /* If we fail to allocate a substitute page, we simply stop
1293                    where we are and drop the whole packet */
1294                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1295                 if (unlikely(err)) {
1296                         fp->eth_q_stats.rx_skb_alloc_failed++;
1297                         return err;
1298                 }
1299
1300                 /* Unmap the page as we r going to pass it to the stack */
1301                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1302                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1303
1304                 /* Add one frag and update the appropriate fields in the skb */
1305                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1306
1307                 skb->data_len += frag_len;
1308                 skb->truesize += frag_len;
1309                 skb->len += frag_len;
1310
1311                 frag_size -= frag_len;
1312         }
1313
1314         return 0;
1315 }
1316
1317 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1318                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1319                            u16 cqe_idx)
1320 {
1321         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1322         struct sk_buff *skb = rx_buf->skb;
1323         /* alloc new skb */
1324         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1325
1326         /* Unmap skb in the pool anyway, as we are going to change
1327            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1328            fails. */
1329         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1330                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1331
1332         if (likely(new_skb)) {
1333                 /* fix ip xsum and give it to the stack */
1334                 /* (no need to map the new skb) */
1335 #ifdef BCM_VLAN
1336                 int is_vlan_cqe =
1337                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1338                          PARSING_FLAGS_VLAN);
1339                 int is_not_hwaccel_vlan_cqe =
1340                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1341 #endif
1342
1343                 prefetch(skb);
1344                 prefetch(((char *)(skb)) + 128);
1345
1346 #ifdef BNX2X_STOP_ON_ERROR
1347                 if (pad + len > bp->rx_buf_size) {
1348                         BNX2X_ERR("skb_put is about to fail...  "
1349                                   "pad %d  len %d  rx_buf_size %d\n",
1350                                   pad, len, bp->rx_buf_size);
1351                         bnx2x_panic();
1352                         return;
1353                 }
1354 #endif
1355
1356                 skb_reserve(skb, pad);
1357                 skb_put(skb, len);
1358
1359                 skb->protocol = eth_type_trans(skb, bp->dev);
1360                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1361
1362                 {
1363                         struct iphdr *iph;
1364
1365                         iph = (struct iphdr *)skb->data;
1366 #ifdef BCM_VLAN
1367                         /* If there is no Rx VLAN offloading -
1368                            take VLAN tag into an account */
1369                         if (unlikely(is_not_hwaccel_vlan_cqe))
1370                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1371 #endif
1372                         iph->check = 0;
1373                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1374                 }
1375
1376                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1377                                          &cqe->fast_path_cqe, cqe_idx)) {
1378 #ifdef BCM_VLAN
1379                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1380                             (!is_not_hwaccel_vlan_cqe))
1381                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1382                                                 le16_to_cpu(cqe->fast_path_cqe.
1383                                                             vlan_tag));
1384                         else
1385 #endif
1386                                 netif_receive_skb(skb);
1387                 } else {
1388                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1389                            " - dropping packet!\n");
1390                         dev_kfree_skb(skb);
1391                 }
1392
1393
1394                 /* put new skb in bin */
1395                 fp->tpa_pool[queue].skb = new_skb;
1396
1397         } else {
1398                 /* else drop the packet and keep the buffer in the bin */
1399                 DP(NETIF_MSG_RX_STATUS,
1400                    "Failed to allocate new skb - dropping packet!\n");
1401                 fp->eth_q_stats.rx_skb_alloc_failed++;
1402         }
1403
1404         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1405 }
1406
1407 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1408                                         struct bnx2x_fastpath *fp,
1409                                         u16 bd_prod, u16 rx_comp_prod,
1410                                         u16 rx_sge_prod)
1411 {
1412         struct ustorm_eth_rx_producers rx_prods = {0};
1413         int i;
1414
1415         /* Update producers */
1416         rx_prods.bd_prod = bd_prod;
1417         rx_prods.cqe_prod = rx_comp_prod;
1418         rx_prods.sge_prod = rx_sge_prod;
1419
1420         /*
1421          * Make sure that the BD and SGE data is updated before updating the
1422          * producers since FW might read the BD/SGE right after the producer
1423          * is updated.
1424          * This is only applicable for weak-ordered memory model archs such
1425          * as IA-64. The following barrier is also mandatory since FW will
1426          * assumes BDs must have buffers.
1427          */
1428         wmb();
1429
1430         for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1431                 REG_WR(bp, BAR_USTRORM_INTMEM +
1432                        USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1433                        ((u32 *)&rx_prods)[i]);
1434
1435         mmiowb(); /* keep prod updates ordered */
1436
1437         DP(NETIF_MSG_RX_STATUS,
1438            "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
1439            fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1440 }
1441
1442 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1443 {
1444         struct bnx2x *bp = fp->bp;
1445         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1446         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1447         int rx_pkt = 0;
1448
1449 #ifdef BNX2X_STOP_ON_ERROR
1450         if (unlikely(bp->panic))
1451                 return 0;
1452 #endif
1453
1454         /* CQ "next element" is of the size of the regular element,
1455            that's why it's ok here */
1456         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1457         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1458                 hw_comp_cons++;
1459
1460         bd_cons = fp->rx_bd_cons;
1461         bd_prod = fp->rx_bd_prod;
1462         bd_prod_fw = bd_prod;
1463         sw_comp_cons = fp->rx_comp_cons;
1464         sw_comp_prod = fp->rx_comp_prod;
1465
1466         /* Memory barrier necessary as speculative reads of the rx
1467          * buffer can be ahead of the index in the status block
1468          */
1469         rmb();
1470
1471         DP(NETIF_MSG_RX_STATUS,
1472            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1473            fp->index, hw_comp_cons, sw_comp_cons);
1474
1475         while (sw_comp_cons != hw_comp_cons) {
1476                 struct sw_rx_bd *rx_buf = NULL;
1477                 struct sk_buff *skb;
1478                 union eth_rx_cqe *cqe;
1479                 u8 cqe_fp_flags;
1480                 u16 len, pad;
1481
1482                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1483                 bd_prod = RX_BD(bd_prod);
1484                 bd_cons = RX_BD(bd_cons);
1485
1486                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1487                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1488
1489                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1490                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1491                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1492                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1493                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1494                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1495
1496                 /* is this a slowpath msg? */
1497                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1498                         bnx2x_sp_event(fp, cqe);
1499                         goto next_cqe;
1500
1501                 /* this is an rx packet */
1502                 } else {
1503                         rx_buf = &fp->rx_buf_ring[bd_cons];
1504                         skb = rx_buf->skb;
1505                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1506                         pad = cqe->fast_path_cqe.placement_offset;
1507
1508                         /* If CQE is marked both TPA_START and TPA_END
1509                            it is a non-TPA CQE */
1510                         if ((!fp->disable_tpa) &&
1511                             (TPA_TYPE(cqe_fp_flags) !=
1512                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1513                                 u16 queue = cqe->fast_path_cqe.queue_index;
1514
1515                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1516                                         DP(NETIF_MSG_RX_STATUS,
1517                                            "calling tpa_start on queue %d\n",
1518                                            queue);
1519
1520                                         bnx2x_tpa_start(fp, queue, skb,
1521                                                         bd_cons, bd_prod);
1522                                         goto next_rx;
1523                                 }
1524
1525                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1526                                         DP(NETIF_MSG_RX_STATUS,
1527                                            "calling tpa_stop on queue %d\n",
1528                                            queue);
1529
1530                                         if (!BNX2X_RX_SUM_FIX(cqe))
1531                                                 BNX2X_ERR("STOP on none TCP "
1532                                                           "data\n");
1533
1534                                         /* This is a size of the linear data
1535                                            on this skb */
1536                                         len = le16_to_cpu(cqe->fast_path_cqe.
1537                                                                 len_on_bd);
1538                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1539                                                     len, cqe, comp_ring_cons);
1540 #ifdef BNX2X_STOP_ON_ERROR
1541                                         if (bp->panic)
1542                                                 return -EINVAL;
1543 #endif
1544
1545                                         bnx2x_update_sge_prod(fp,
1546                                                         &cqe->fast_path_cqe);
1547                                         goto next_cqe;
1548                                 }
1549                         }
1550
1551                         pci_dma_sync_single_for_device(bp->pdev,
1552                                         pci_unmap_addr(rx_buf, mapping),
1553                                                        pad + RX_COPY_THRESH,
1554                                                        PCI_DMA_FROMDEVICE);
1555                         prefetch(skb);
1556                         prefetch(((char *)(skb)) + 128);
1557
1558                         /* is this an error packet? */
1559                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1560                                 DP(NETIF_MSG_RX_ERR,
1561                                    "ERROR  flags %x  rx packet %u\n",
1562                                    cqe_fp_flags, sw_comp_cons);
1563                                 fp->eth_q_stats.rx_err_discard_pkt++;
1564                                 goto reuse_rx;
1565                         }
1566
1567                         /* Since we don't have a jumbo ring
1568                          * copy small packets if mtu > 1500
1569                          */
1570                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1571                             (len <= RX_COPY_THRESH)) {
1572                                 struct sk_buff *new_skb;
1573
1574                                 new_skb = netdev_alloc_skb(bp->dev,
1575                                                            len + pad);
1576                                 if (new_skb == NULL) {
1577                                         DP(NETIF_MSG_RX_ERR,
1578                                            "ERROR  packet dropped "
1579                                            "because of alloc failure\n");
1580                                         fp->eth_q_stats.rx_skb_alloc_failed++;
1581                                         goto reuse_rx;
1582                                 }
1583
1584                                 /* aligned copy */
1585                                 skb_copy_from_linear_data_offset(skb, pad,
1586                                                     new_skb->data + pad, len);
1587                                 skb_reserve(new_skb, pad);
1588                                 skb_put(new_skb, len);
1589
1590                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1591
1592                                 skb = new_skb;
1593
1594                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1595                                 pci_unmap_single(bp->pdev,
1596                                         pci_unmap_addr(rx_buf, mapping),
1597                                                  bp->rx_buf_size,
1598                                                  PCI_DMA_FROMDEVICE);
1599                                 skb_reserve(skb, pad);
1600                                 skb_put(skb, len);
1601
1602                         } else {
1603                                 DP(NETIF_MSG_RX_ERR,
1604                                    "ERROR  packet dropped because "
1605                                    "of alloc failure\n");
1606                                 fp->eth_q_stats.rx_skb_alloc_failed++;
1607 reuse_rx:
1608                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1609                                 goto next_rx;
1610                         }
1611
1612                         skb->protocol = eth_type_trans(skb, bp->dev);
1613
1614                         skb->ip_summed = CHECKSUM_NONE;
1615                         if (bp->rx_csum) {
1616                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1617                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1618                                 else
1619                                         fp->eth_q_stats.hw_csum_err++;
1620                         }
1621                 }
1622
1623                 skb_record_rx_queue(skb, fp->index);
1624 #ifdef BCM_VLAN
1625                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1626                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1627                      PARSING_FLAGS_VLAN))
1628                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1629                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1630                 else
1631 #endif
1632                         netif_receive_skb(skb);
1633
1634
1635 next_rx:
1636                 rx_buf->skb = NULL;
1637
1638                 bd_cons = NEXT_RX_IDX(bd_cons);
1639                 bd_prod = NEXT_RX_IDX(bd_prod);
1640                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1641                 rx_pkt++;
1642 next_cqe:
1643                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1644                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1645
1646                 if (rx_pkt == budget)
1647                         break;
1648         } /* while */
1649
1650         fp->rx_bd_cons = bd_cons;
1651         fp->rx_bd_prod = bd_prod_fw;
1652         fp->rx_comp_cons = sw_comp_cons;
1653         fp->rx_comp_prod = sw_comp_prod;
1654
1655         /* Update producers */
1656         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1657                              fp->rx_sge_prod);
1658
1659         fp->rx_pkt += rx_pkt;
1660         fp->rx_calls++;
1661
1662         return rx_pkt;
1663 }
1664
1665 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1666 {
1667         struct bnx2x_fastpath *fp = fp_cookie;
1668         struct bnx2x *bp = fp->bp;
1669         int index = fp->index;
1670
1671         /* Return here if interrupt is disabled */
1672         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1673                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1674                 return IRQ_HANDLED;
1675         }
1676
1677         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1678            index, fp->sb_id);
1679         bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1680
1681 #ifdef BNX2X_STOP_ON_ERROR
1682         if (unlikely(bp->panic))
1683                 return IRQ_HANDLED;
1684 #endif
1685
1686         prefetch(fp->rx_cons_sb);
1687         prefetch(fp->tx_cons_sb);
1688         prefetch(&fp->status_blk->c_status_block.status_block_index);
1689         prefetch(&fp->status_blk->u_status_block.status_block_index);
1690
1691         napi_schedule(&bnx2x_fp(bp, index, napi));
1692
1693         return IRQ_HANDLED;
1694 }
1695
1696 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1697 {
1698         struct bnx2x *bp = netdev_priv(dev_instance);
1699         u16 status = bnx2x_ack_int(bp);
1700         u16 mask;
1701
1702         /* Return here if interrupt is shared and it's not for us */
1703         if (unlikely(status == 0)) {
1704                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1705                 return IRQ_NONE;
1706         }
1707         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1708
1709         /* Return here if interrupt is disabled */
1710         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1711                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1712                 return IRQ_HANDLED;
1713         }
1714
1715 #ifdef BNX2X_STOP_ON_ERROR
1716         if (unlikely(bp->panic))
1717                 return IRQ_HANDLED;
1718 #endif
1719
1720         mask = 0x2 << bp->fp[0].sb_id;
1721         if (status & mask) {
1722                 struct bnx2x_fastpath *fp = &bp->fp[0];
1723
1724                 prefetch(fp->rx_cons_sb);
1725                 prefetch(fp->tx_cons_sb);
1726                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1727                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1728
1729                 napi_schedule(&bnx2x_fp(bp, 0, napi));
1730
1731                 status &= ~mask;
1732         }
1733
1734
1735         if (unlikely(status & 0x1)) {
1736                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1737
1738                 status &= ~0x1;
1739                 if (!status)
1740                         return IRQ_HANDLED;
1741         }
1742
1743         if (status)
1744                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1745                    status);
1746
1747         return IRQ_HANDLED;
1748 }
1749
1750 /* end of fast path */
1751
1752 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1753
1754 /* Link */
1755
1756 /*
1757  * General service functions
1758  */
1759
1760 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1761 {
1762         u32 lock_status;
1763         u32 resource_bit = (1 << resource);
1764         int func = BP_FUNC(bp);
1765         u32 hw_lock_control_reg;
1766         int cnt;
1767
1768         /* Validating that the resource is within range */
1769         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1770                 DP(NETIF_MSG_HW,
1771                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1772                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1773                 return -EINVAL;
1774         }
1775
1776         if (func <= 5) {
1777                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1778         } else {
1779                 hw_lock_control_reg =
1780                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1781         }
1782
1783         /* Validating that the resource is not already taken */
1784         lock_status = REG_RD(bp, hw_lock_control_reg);
1785         if (lock_status & resource_bit) {
1786                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1787                    lock_status, resource_bit);
1788                 return -EEXIST;
1789         }
1790
1791         /* Try for 5 second every 5ms */
1792         for (cnt = 0; cnt < 1000; cnt++) {
1793                 /* Try to acquire the lock */
1794                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1795                 lock_status = REG_RD(bp, hw_lock_control_reg);
1796                 if (lock_status & resource_bit)
1797                         return 0;
1798
1799                 msleep(5);
1800         }
1801         DP(NETIF_MSG_HW, "Timeout\n");
1802         return -EAGAIN;
1803 }
1804
1805 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1806 {
1807         u32 lock_status;
1808         u32 resource_bit = (1 << resource);
1809         int func = BP_FUNC(bp);
1810         u32 hw_lock_control_reg;
1811
1812         /* Validating that the resource is within range */
1813         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1814                 DP(NETIF_MSG_HW,
1815                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1816                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1817                 return -EINVAL;
1818         }
1819
1820         if (func <= 5) {
1821                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1822         } else {
1823                 hw_lock_control_reg =
1824                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1825         }
1826
1827         /* Validating that the resource is currently taken */
1828         lock_status = REG_RD(bp, hw_lock_control_reg);
1829         if (!(lock_status & resource_bit)) {
1830                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1831                    lock_status, resource_bit);
1832                 return -EFAULT;
1833         }
1834
1835         REG_WR(bp, hw_lock_control_reg, resource_bit);
1836         return 0;
1837 }
1838
1839 /* HW Lock for shared dual port PHYs */
1840 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1841 {
1842         mutex_lock(&bp->port.phy_mutex);
1843
1844         if (bp->port.need_hw_lock)
1845                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1846 }
1847
1848 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1849 {
1850         if (bp->port.need_hw_lock)
1851                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1852
1853         mutex_unlock(&bp->port.phy_mutex);
1854 }
1855
1856 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1857 {
1858         /* The GPIO should be swapped if swap register is set and active */
1859         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1860                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1861         int gpio_shift = gpio_num +
1862                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1863         u32 gpio_mask = (1 << gpio_shift);
1864         u32 gpio_reg;
1865         int value;
1866
1867         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1868                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1869                 return -EINVAL;
1870         }
1871
1872         /* read GPIO value */
1873         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1874
1875         /* get the requested pin value */
1876         if ((gpio_reg & gpio_mask) == gpio_mask)
1877                 value = 1;
1878         else
1879                 value = 0;
1880
1881         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1882
1883         return value;
1884 }
1885
1886 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1887 {
1888         /* The GPIO should be swapped if swap register is set and active */
1889         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1890                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1891         int gpio_shift = gpio_num +
1892                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1893         u32 gpio_mask = (1 << gpio_shift);
1894         u32 gpio_reg;
1895
1896         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1897                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1898                 return -EINVAL;
1899         }
1900
1901         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1902         /* read GPIO and mask except the float bits */
1903         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1904
1905         switch (mode) {
1906         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1907                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1908                    gpio_num, gpio_shift);
1909                 /* clear FLOAT and set CLR */
1910                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1911                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1912                 break;
1913
1914         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1915                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1916                    gpio_num, gpio_shift);
1917                 /* clear FLOAT and set SET */
1918                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1919                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1920                 break;
1921
1922         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1923                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1924                    gpio_num, gpio_shift);
1925                 /* set FLOAT */
1926                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1927                 break;
1928
1929         default:
1930                 break;
1931         }
1932
1933         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1934         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1935
1936         return 0;
1937 }
1938
1939 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1940 {
1941         /* The GPIO should be swapped if swap register is set and active */
1942         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1943                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1944         int gpio_shift = gpio_num +
1945                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1946         u32 gpio_mask = (1 << gpio_shift);
1947         u32 gpio_reg;
1948
1949         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1950                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1951                 return -EINVAL;
1952         }
1953
1954         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1955         /* read GPIO int */
1956         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1957
1958         switch (mode) {
1959         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1960                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1961                                    "output low\n", gpio_num, gpio_shift);
1962                 /* clear SET and set CLR */
1963                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1964                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1965                 break;
1966
1967         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1968                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1969                                    "output high\n", gpio_num, gpio_shift);
1970                 /* clear CLR and set SET */
1971                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1972                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1973                 break;
1974
1975         default:
1976                 break;
1977         }
1978
1979         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1980         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1981
1982         return 0;
1983 }
1984
1985 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1986 {
1987         u32 spio_mask = (1 << spio_num);
1988         u32 spio_reg;
1989
1990         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1991             (spio_num > MISC_REGISTERS_SPIO_7)) {
1992                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1993                 return -EINVAL;
1994         }
1995
1996         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1997         /* read SPIO and mask except the float bits */
1998         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1999
2000         switch (mode) {
2001         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2002                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2003                 /* clear FLOAT and set CLR */
2004                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2005                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2006                 break;
2007
2008         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2009                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2010                 /* clear FLOAT and set SET */
2011                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2012                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2013                 break;
2014
2015         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2016                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2017                 /* set FLOAT */
2018                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2019                 break;
2020
2021         default:
2022                 break;
2023         }
2024
2025         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2026         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2027
2028         return 0;
2029 }
2030
2031 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2032 {
2033         switch (bp->link_vars.ieee_fc &
2034                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2035         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2036                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2037                                           ADVERTISED_Pause);
2038                 break;
2039
2040         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2041                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2042                                          ADVERTISED_Pause);
2043                 break;
2044
2045         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2046                 bp->port.advertising |= ADVERTISED_Asym_Pause;
2047                 break;
2048
2049         default:
2050                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2051                                           ADVERTISED_Pause);
2052                 break;
2053         }
2054 }
2055
2056 static void bnx2x_link_report(struct bnx2x *bp)
2057 {
2058         if (bp->link_vars.link_up) {
2059                 if (bp->state == BNX2X_STATE_OPEN)
2060                         netif_carrier_on(bp->dev);
2061                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2062
2063                 printk("%d Mbps ", bp->link_vars.line_speed);
2064
2065                 if (bp->link_vars.duplex == DUPLEX_FULL)
2066                         printk("full duplex");
2067                 else
2068                         printk("half duplex");
2069
2070                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2071                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2072                                 printk(", receive ");
2073                                 if (bp->link_vars.flow_ctrl &
2074                                     BNX2X_FLOW_CTRL_TX)
2075                                         printk("& transmit ");
2076                         } else {
2077                                 printk(", transmit ");
2078                         }
2079                         printk("flow control ON");
2080                 }
2081                 printk("\n");
2082
2083         } else { /* link_down */
2084                 netif_carrier_off(bp->dev);
2085                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2086         }
2087 }
2088
2089 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2090 {
2091         if (!BP_NOMCP(bp)) {
2092                 u8 rc;
2093
2094                 /* Initialize link parameters structure variables */
2095                 /* It is recommended to turn off RX FC for jumbo frames
2096                    for better performance */
2097                 if (IS_E1HMF(bp))
2098                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2099                 else if (bp->dev->mtu > 5000)
2100                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2101                 else
2102                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2103
2104                 bnx2x_acquire_phy_lock(bp);
2105
2106                 if (load_mode == LOAD_DIAG)
2107                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2108
2109                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2110
2111                 bnx2x_release_phy_lock(bp);
2112
2113                 bnx2x_calc_fc_adv(bp);
2114
2115                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2116                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2117                         bnx2x_link_report(bp);
2118                 }
2119
2120                 return rc;
2121         }
2122         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2123         return -EINVAL;
2124 }
2125
2126 static void bnx2x_link_set(struct bnx2x *bp)
2127 {
2128         if (!BP_NOMCP(bp)) {
2129                 bnx2x_acquire_phy_lock(bp);
2130                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2131                 bnx2x_release_phy_lock(bp);
2132
2133                 bnx2x_calc_fc_adv(bp);
2134         } else
2135                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2136 }
2137
2138 static void bnx2x__link_reset(struct bnx2x *bp)
2139 {
2140         if (!BP_NOMCP(bp)) {
2141                 bnx2x_acquire_phy_lock(bp);
2142                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2143                 bnx2x_release_phy_lock(bp);
2144         } else
2145                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2146 }
2147
2148 static u8 bnx2x_link_test(struct bnx2x *bp)
2149 {
2150         u8 rc;
2151
2152         bnx2x_acquire_phy_lock(bp);
2153         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2154         bnx2x_release_phy_lock(bp);
2155
2156         return rc;
2157 }
2158
2159 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2160 {
2161         u32 r_param = bp->link_vars.line_speed / 8;
2162         u32 fair_periodic_timeout_usec;
2163         u32 t_fair;
2164
2165         memset(&(bp->cmng.rs_vars), 0,
2166                sizeof(struct rate_shaping_vars_per_port));
2167         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2168
2169         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2170         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2171
2172         /* this is the threshold below which no timer arming will occur
2173            1.25 coefficient is for the threshold to be a little bigger
2174            than the real time, to compensate for timer in-accuracy */
2175         bp->cmng.rs_vars.rs_threshold =
2176                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2177
2178         /* resolution of fairness timer */
2179         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2180         /* for 10G it is 1000usec. for 1G it is 10000usec. */
2181         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2182
2183         /* this is the threshold below which we won't arm the timer anymore */
2184         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2185
2186         /* we multiply by 1e3/8 to get bytes/msec.
2187            We don't want the credits to pass a credit
2188            of the t_fair*FAIR_MEM (algorithm resolution) */
2189         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2190         /* since each tick is 4 usec */
2191         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2192 }
2193
2194 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2195 {
2196         struct rate_shaping_vars_per_vn m_rs_vn;
2197         struct fairness_vars_per_vn m_fair_vn;
2198         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2199         u16 vn_min_rate, vn_max_rate;
2200         int i;
2201
2202         /* If function is hidden - set min and max to zeroes */
2203         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2204                 vn_min_rate = 0;
2205                 vn_max_rate = 0;
2206
2207         } else {
2208                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2209                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2210                 /* If fairness is enabled (not all min rates are zeroes) and
2211                    if current min rate is zero - set it to 1.
2212                    This is a requirement of the algorithm. */
2213                 if (bp->vn_weight_sum && (vn_min_rate == 0))
2214                         vn_min_rate = DEF_MIN_RATE;
2215                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2216                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2217         }
2218
2219         DP(NETIF_MSG_IFUP,
2220            "func %d: vn_min_rate=%d  vn_max_rate=%d  vn_weight_sum=%d\n",
2221            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2222
2223         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2224         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2225
2226         /* global vn counter - maximal Mbps for this vn */
2227         m_rs_vn.vn_counter.rate = vn_max_rate;
2228
2229         /* quota - number of bytes transmitted in this period */
2230         m_rs_vn.vn_counter.quota =
2231                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2232
2233         if (bp->vn_weight_sum) {
2234                 /* credit for each period of the fairness algorithm:
2235                    number of bytes in T_FAIR (the vn share the port rate).
2236                    vn_weight_sum should not be larger than 10000, thus
2237                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2238                    than zero */
2239                 m_fair_vn.vn_credit_delta =
2240                         max((u32)(vn_min_rate * (T_FAIR_COEF /
2241                                                  (8 * bp->vn_weight_sum))),
2242                             (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2243                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2244                    m_fair_vn.vn_credit_delta);
2245         }
2246
2247         /* Store it to internal memory */
2248         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2249                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2250                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2251                        ((u32 *)(&m_rs_vn))[i]);
2252
2253         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2254                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2255                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2256                        ((u32 *)(&m_fair_vn))[i]);
2257 }
2258
2259
2260 /* This function is called upon link interrupt */
2261 static void bnx2x_link_attn(struct bnx2x *bp)
2262 {
2263         /* Make sure that we are synced with the current statistics */
2264         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2265
2266         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2267
2268         if (bp->link_vars.link_up) {
2269
2270                 /* dropless flow control */
2271                 if (CHIP_IS_E1H(bp)) {
2272                         int port = BP_PORT(bp);
2273                         u32 pause_enabled = 0;
2274
2275                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2276                                 pause_enabled = 1;
2277
2278                         REG_WR(bp, BAR_USTRORM_INTMEM +
2279                                USTORM_PAUSE_ENABLED_OFFSET(port),
2280                                pause_enabled);
2281                 }
2282
2283                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2284                         struct host_port_stats *pstats;
2285
2286                         pstats = bnx2x_sp(bp, port_stats);
2287                         /* reset old bmac stats */
2288                         memset(&(pstats->mac_stx[0]), 0,
2289                                sizeof(struct mac_stx));
2290                 }
2291                 if ((bp->state == BNX2X_STATE_OPEN) ||
2292                     (bp->state == BNX2X_STATE_DISABLED))
2293                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2294         }
2295
2296         /* indicate link status */
2297         bnx2x_link_report(bp);
2298
2299         if (IS_E1HMF(bp)) {
2300                 int port = BP_PORT(bp);
2301                 int func;
2302                 int vn;
2303
2304                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2305                         if (vn == BP_E1HVN(bp))
2306                                 continue;
2307
2308                         func = ((vn << 1) | port);
2309
2310                         /* Set the attention towards other drivers
2311                            on the same port */
2312                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2313                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2314                 }
2315
2316                 if (bp->link_vars.link_up) {
2317                         int i;
2318
2319                         /* Init rate shaping and fairness contexts */
2320                         bnx2x_init_port_minmax(bp);
2321
2322                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2323                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2324
2325                         /* Store it to internal memory */
2326                         for (i = 0;
2327                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
2328                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2329                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2330                                        ((u32 *)(&bp->cmng))[i]);
2331                 }
2332         }
2333 }
2334
2335 static void bnx2x__link_status_update(struct bnx2x *bp)
2336 {
2337         if (bp->state != BNX2X_STATE_OPEN)
2338                 return;
2339
2340         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2341
2342         if (bp->link_vars.link_up)
2343                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2344         else
2345                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2346
2347         /* indicate link status */
2348         bnx2x_link_report(bp);
2349 }
2350
2351 static void bnx2x_pmf_update(struct bnx2x *bp)
2352 {
2353         int port = BP_PORT(bp);
2354         u32 val;
2355
2356         bp->port.pmf = 1;
2357         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2358
2359         /* enable nig attention */
2360         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2361         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2362         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2363
2364         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2365 }
2366
2367 /* end of Link */
2368
2369 /* slow path */
2370
2371 /*
2372  * General service functions
2373  */
2374
2375 /* the slow path queue is odd since completions arrive on the fastpath ring */
2376 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2377                          u32 data_hi, u32 data_lo, int common)
2378 {
2379         int func = BP_FUNC(bp);
2380
2381         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2382            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2383            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2384            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2385            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2386
2387 #ifdef BNX2X_STOP_ON_ERROR
2388         if (unlikely(bp->panic))
2389                 return -EIO;
2390 #endif
2391
2392         spin_lock_bh(&bp->spq_lock);
2393
2394         if (!bp->spq_left) {
2395                 BNX2X_ERR("BUG! SPQ ring full!\n");
2396                 spin_unlock_bh(&bp->spq_lock);
2397                 bnx2x_panic();
2398                 return -EBUSY;
2399         }
2400
2401         /* CID needs port number to be encoded int it */
2402         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2403                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2404                                      HW_CID(bp, cid)));
2405         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2406         if (common)
2407                 bp->spq_prod_bd->hdr.type |=
2408                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2409
2410         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2411         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2412
2413         bp->spq_left--;
2414
2415         if (bp->spq_prod_bd == bp->spq_last_bd) {
2416                 bp->spq_prod_bd = bp->spq;
2417                 bp->spq_prod_idx = 0;
2418                 DP(NETIF_MSG_TIMER, "end of spq\n");
2419
2420         } else {
2421                 bp->spq_prod_bd++;
2422                 bp->spq_prod_idx++;
2423         }
2424
2425         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2426                bp->spq_prod_idx);
2427
2428         spin_unlock_bh(&bp->spq_lock);
2429         return 0;
2430 }
2431
2432 /* acquire split MCP access lock register */
2433 static int bnx2x_acquire_alr(struct bnx2x *bp)
2434 {
2435         u32 i, j, val;
2436         int rc = 0;
2437
2438         might_sleep();
2439         i = 100;
2440         for (j = 0; j < i*10; j++) {
2441                 val = (1UL << 31);
2442                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2443                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2444                 if (val & (1L << 31))
2445                         break;
2446
2447                 msleep(5);
2448         }
2449         if (!(val & (1L << 31))) {
2450                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2451                 rc = -EBUSY;
2452         }
2453
2454         return rc;
2455 }
2456
2457 /* release split MCP access lock register */
2458 static void bnx2x_release_alr(struct bnx2x *bp)
2459 {
2460         u32 val = 0;
2461
2462         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2463 }
2464
2465 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2466 {
2467         struct host_def_status_block *def_sb = bp->def_status_blk;
2468         u16 rc = 0;
2469
2470         barrier(); /* status block is written to by the chip */
2471         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2472                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2473                 rc |= 1;
2474         }
2475         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2476                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2477                 rc |= 2;
2478         }
2479         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2480                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2481                 rc |= 4;
2482         }
2483         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2484                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2485                 rc |= 8;
2486         }
2487         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2488                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2489                 rc |= 16;
2490         }
2491         return rc;
2492 }
2493
2494 /*
2495  * slow path service functions
2496  */
2497
2498 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2499 {
2500         int port = BP_PORT(bp);
2501         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2502                        COMMAND_REG_ATTN_BITS_SET);
2503         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2504                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2505         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2506                                        NIG_REG_MASK_INTERRUPT_PORT0;
2507         u32 aeu_mask;
2508         u32 nig_mask = 0;
2509
2510         if (bp->attn_state & asserted)
2511                 BNX2X_ERR("IGU ERROR\n");
2512
2513         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2514         aeu_mask = REG_RD(bp, aeu_addr);
2515
2516         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2517            aeu_mask, asserted);
2518         aeu_mask &= ~(asserted & 0xff);
2519         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2520
2521         REG_WR(bp, aeu_addr, aeu_mask);
2522         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2523
2524         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2525         bp->attn_state |= asserted;
2526         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2527
2528         if (asserted & ATTN_HARD_WIRED_MASK) {
2529                 if (asserted & ATTN_NIG_FOR_FUNC) {
2530
2531                         bnx2x_acquire_phy_lock(bp);
2532
2533                         /* save nig interrupt mask */
2534                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2535                         REG_WR(bp, nig_int_mask_addr, 0);
2536
2537                         bnx2x_link_attn(bp);
2538
2539                         /* handle unicore attn? */
2540                 }
2541                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2542                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2543
2544                 if (asserted & GPIO_2_FUNC)
2545                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2546
2547                 if (asserted & GPIO_3_FUNC)
2548                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2549
2550                 if (asserted & GPIO_4_FUNC)
2551                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2552
2553                 if (port == 0) {
2554                         if (asserted & ATTN_GENERAL_ATTN_1) {
2555                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2556                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2557                         }
2558                         if (asserted & ATTN_GENERAL_ATTN_2) {
2559                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2560                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2561                         }
2562                         if (asserted & ATTN_GENERAL_ATTN_3) {
2563                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2564                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2565                         }
2566                 } else {
2567                         if (asserted & ATTN_GENERAL_ATTN_4) {
2568                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2569                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2570                         }
2571                         if (asserted & ATTN_GENERAL_ATTN_5) {
2572                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2573                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2574                         }
2575                         if (asserted & ATTN_GENERAL_ATTN_6) {
2576                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2577                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2578                         }
2579                 }
2580
2581         } /* if hardwired */
2582
2583         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2584            asserted, hc_addr);
2585         REG_WR(bp, hc_addr, asserted);
2586
2587         /* now set back the mask */
2588         if (asserted & ATTN_NIG_FOR_FUNC) {
2589                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2590                 bnx2x_release_phy_lock(bp);
2591         }
2592 }
2593
2594 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2595 {
2596         int port = BP_PORT(bp);
2597         int reg_offset;
2598         u32 val;
2599
2600         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2601                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2602
2603         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2604
2605                 val = REG_RD(bp, reg_offset);
2606                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2607                 REG_WR(bp, reg_offset, val);
2608
2609                 BNX2X_ERR("SPIO5 hw attention\n");
2610
2611                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2612                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2613                         /* Fan failure attention */
2614
2615                         /* The PHY reset is controlled by GPIO 1 */
2616                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2617                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2618                         /* Low power mode is controlled by GPIO 2 */
2619                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2620                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2621                         /* mark the failure */
2622                         bp->link_params.ext_phy_config &=
2623                                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2624                         bp->link_params.ext_phy_config |=
2625                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2626                         SHMEM_WR(bp,
2627                                  dev_info.port_hw_config[port].
2628                                                         external_phy_config,
2629                                  bp->link_params.ext_phy_config);
2630                         /* log the failure */
2631                         printk(KERN_ERR PFX "Fan Failure on Network"
2632                                " Controller %s has caused the driver to"
2633                                " shutdown the card to prevent permanent"
2634                                " damage.  Please contact Dell Support for"
2635                                " assistance\n", bp->dev->name);
2636                         break;
2637
2638                 default:
2639                         break;
2640                 }
2641         }
2642
2643         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2644                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2645                 bnx2x_acquire_phy_lock(bp);
2646                 bnx2x_handle_module_detect_int(&bp->link_params);
2647                 bnx2x_release_phy_lock(bp);
2648         }
2649
2650         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2651
2652                 val = REG_RD(bp, reg_offset);
2653                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2654                 REG_WR(bp, reg_offset, val);
2655
2656                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2657                           (attn & HW_INTERRUT_ASSERT_SET_0));
2658                 bnx2x_panic();
2659         }
2660 }
2661
2662 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2663 {
2664         u32 val;
2665
2666         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2667
2668                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2669                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2670                 /* DORQ discard attention */
2671                 if (val & 0x2)
2672                         BNX2X_ERR("FATAL error from DORQ\n");
2673         }
2674
2675         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2676
2677                 int port = BP_PORT(bp);
2678                 int reg_offset;
2679
2680                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2681                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2682
2683                 val = REG_RD(bp, reg_offset);
2684                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2685                 REG_WR(bp, reg_offset, val);
2686
2687                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2688                           (attn & HW_INTERRUT_ASSERT_SET_1));
2689                 bnx2x_panic();
2690         }
2691 }
2692
2693 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2694 {
2695         u32 val;
2696
2697         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2698
2699                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2700                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2701                 /* CFC error attention */
2702                 if (val & 0x2)
2703                         BNX2X_ERR("FATAL error from CFC\n");
2704         }
2705
2706         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2707
2708                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2709                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2710                 /* RQ_USDMDP_FIFO_OVERFLOW */
2711                 if (val & 0x18000)
2712                         BNX2X_ERR("FATAL error from PXP\n");
2713         }
2714
2715         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2716
2717                 int port = BP_PORT(bp);
2718                 int reg_offset;
2719
2720                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2721                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2722
2723                 val = REG_RD(bp, reg_offset);
2724                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2725                 REG_WR(bp, reg_offset, val);
2726
2727                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2728                           (attn & HW_INTERRUT_ASSERT_SET_2));
2729                 bnx2x_panic();
2730         }
2731 }
2732
2733 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2734 {
2735         u32 val;
2736
2737         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2738
2739                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2740                         int func = BP_FUNC(bp);
2741
2742                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2743                         bnx2x__link_status_update(bp);
2744                         if (SHMEM_RD(bp, func_mb[func].drv_status) &
2745                                                         DRV_STATUS_PMF)
2746                                 bnx2x_pmf_update(bp);
2747
2748                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2749
2750                         BNX2X_ERR("MC assert!\n");
2751                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2752                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2753                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2754                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2755                         bnx2x_panic();
2756
2757                 } else if (attn & BNX2X_MCP_ASSERT) {
2758
2759                         BNX2X_ERR("MCP assert!\n");
2760                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2761                         bnx2x_fw_dump(bp);
2762
2763                 } else
2764                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2765         }
2766
2767         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2768                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2769                 if (attn & BNX2X_GRC_TIMEOUT) {
2770                         val = CHIP_IS_E1H(bp) ?
2771                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2772                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2773                 }
2774                 if (attn & BNX2X_GRC_RSV) {
2775                         val = CHIP_IS_E1H(bp) ?
2776                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2777                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2778                 }
2779                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2780         }
2781 }
2782
2783 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2784 {
2785         struct attn_route attn;
2786         struct attn_route group_mask;
2787         int port = BP_PORT(bp);
2788         int index;
2789         u32 reg_addr;
2790         u32 val;
2791         u32 aeu_mask;
2792
2793         /* need to take HW lock because MCP or other port might also
2794            try to handle this event */
2795         bnx2x_acquire_alr(bp);
2796
2797         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2798         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2799         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2800         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2801         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2802            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2803
2804         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2805                 if (deasserted & (1 << index)) {
2806                         group_mask = bp->attn_group[index];
2807
2808                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2809                            index, group_mask.sig[0], group_mask.sig[1],
2810                            group_mask.sig[2], group_mask.sig[3]);
2811
2812                         bnx2x_attn_int_deasserted3(bp,
2813                                         attn.sig[3] & group_mask.sig[3]);
2814                         bnx2x_attn_int_deasserted1(bp,
2815                                         attn.sig[1] & group_mask.sig[1]);
2816                         bnx2x_attn_int_deasserted2(bp,
2817                                         attn.sig[2] & group_mask.sig[2]);
2818                         bnx2x_attn_int_deasserted0(bp,
2819                                         attn.sig[0] & group_mask.sig[0]);
2820
2821                         if ((attn.sig[0] & group_mask.sig[0] &
2822                                                 HW_PRTY_ASSERT_SET_0) ||
2823                             (attn.sig[1] & group_mask.sig[1] &
2824                                                 HW_PRTY_ASSERT_SET_1) ||
2825                             (attn.sig[2] & group_mask.sig[2] &
2826                                                 HW_PRTY_ASSERT_SET_2))
2827                                 BNX2X_ERR("FATAL HW block parity attention\n");
2828                 }
2829         }
2830
2831         bnx2x_release_alr(bp);
2832
2833         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2834
2835         val = ~deasserted;
2836         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2837            val, reg_addr);
2838         REG_WR(bp, reg_addr, val);
2839
2840         if (~bp->attn_state & deasserted)
2841                 BNX2X_ERR("IGU ERROR\n");
2842
2843         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2844                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2845
2846         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2847         aeu_mask = REG_RD(bp, reg_addr);
2848
2849         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2850            aeu_mask, deasserted);
2851         aeu_mask |= (deasserted & 0xff);
2852         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2853
2854         REG_WR(bp, reg_addr, aeu_mask);
2855         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2856
2857         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2858         bp->attn_state &= ~deasserted;
2859         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2860 }
2861
2862 static void bnx2x_attn_int(struct bnx2x *bp)
2863 {
2864         /* read local copy of bits */
2865         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2866                                                                 attn_bits);
2867         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2868                                                                 attn_bits_ack);
2869         u32 attn_state = bp->attn_state;
2870
2871         /* look for changed bits */
2872         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2873         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2874
2875         DP(NETIF_MSG_HW,
2876            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2877            attn_bits, attn_ack, asserted, deasserted);
2878
2879         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2880                 BNX2X_ERR("BAD attention state\n");
2881
2882         /* handle bits that were raised */
2883         if (asserted)
2884                 bnx2x_attn_int_asserted(bp, asserted);
2885
2886         if (deasserted)
2887                 bnx2x_attn_int_deasserted(bp, deasserted);
2888 }
2889
2890 static void bnx2x_sp_task(struct work_struct *work)
2891 {
2892         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2893         u16 status;
2894
2895
2896         /* Return here if interrupt is disabled */
2897         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2898                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2899                 return;
2900         }
2901
2902         status = bnx2x_update_dsb_idx(bp);
2903 /*      if (status == 0)                                     */
2904 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2905
2906         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2907
2908         /* HW attentions */
2909         if (status & 0x1)
2910                 bnx2x_attn_int(bp);
2911
2912         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2913                      IGU_INT_NOP, 1);
2914         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2915                      IGU_INT_NOP, 1);
2916         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2917                      IGU_INT_NOP, 1);
2918         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2919                      IGU_INT_NOP, 1);
2920         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2921                      IGU_INT_ENABLE, 1);
2922
2923 }
2924
2925 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2926 {
2927         struct net_device *dev = dev_instance;
2928         struct bnx2x *bp = netdev_priv(dev);
2929
2930         /* Return here if interrupt is disabled */
2931         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2932                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2933                 return IRQ_HANDLED;
2934         }
2935
2936         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2937
2938 #ifdef BNX2X_STOP_ON_ERROR
2939         if (unlikely(bp->panic))
2940                 return IRQ_HANDLED;
2941 #endif
2942
2943         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2944
2945         return IRQ_HANDLED;
2946 }
2947
2948 /* end of slow path */
2949
2950 /* Statistics */
2951
2952 /****************************************************************************
2953 * Macros
2954 ****************************************************************************/
2955
2956 /* sum[hi:lo] += add[hi:lo] */
2957 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2958         do { \
2959                 s_lo += a_lo; \
2960                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2961         } while (0)
2962
2963 /* difference = minuend - subtrahend */
2964 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2965         do { \
2966                 if (m_lo < s_lo) { \
2967                         /* underflow */ \
2968                         d_hi = m_hi - s_hi; \
2969                         if (d_hi > 0) { \
2970                                 /* we can 'loan' 1 */ \
2971                                 d_hi--; \
2972                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2973                         } else { \
2974                                 /* m_hi <= s_hi */ \
2975                                 d_hi = 0; \
2976                                 d_lo = 0; \
2977                         } \
2978                 } else { \
2979                         /* m_lo >= s_lo */ \
2980                         if (m_hi < s_hi) { \
2981                                 d_hi = 0; \
2982                                 d_lo = 0; \
2983                         } else { \
2984                                 /* m_hi >= s_hi */ \
2985                                 d_hi = m_hi - s_hi; \
2986                                 d_lo = m_lo - s_lo; \
2987                         } \
2988                 } \
2989         } while (0)
2990
2991 #define UPDATE_STAT64(s, t) \
2992         do { \
2993                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2994                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2995                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2996                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2997                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2998                        pstats->mac_stx[1].t##_lo, diff.lo); \
2999         } while (0)
3000
3001 #define UPDATE_STAT64_NIG(s, t) \
3002         do { \
3003                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3004                         diff.lo, new->s##_lo, old->s##_lo); \
3005                 ADD_64(estats->t##_hi, diff.hi, \
3006                        estats->t##_lo, diff.lo); \
3007         } while (0)
3008
3009 /* sum[hi:lo] += add */
3010 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3011         do { \
3012                 s_lo += a; \
3013                 s_hi += (s_lo < a) ? 1 : 0; \
3014         } while (0)
3015
3016 #define UPDATE_EXTEND_STAT(s) \
3017         do { \
3018                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3019                               pstats->mac_stx[1].s##_lo, \
3020                               new->s); \
3021         } while (0)
3022
3023 #define UPDATE_EXTEND_TSTAT(s, t) \
3024         do { \
3025                 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3026                 old_tclient->s = tclient->s; \
3027                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3028         } while (0)
3029
3030 #define UPDATE_EXTEND_USTAT(s, t) \
3031         do { \
3032                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3033                 old_uclient->s = uclient->s; \
3034                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3035         } while (0)
3036
3037 #define UPDATE_EXTEND_XSTAT(s, t) \
3038         do { \
3039                 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3040                 old_xclient->s = xclient->s; \
3041                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3042         } while (0)
3043
3044 /* minuend -= subtrahend */
3045 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3046         do { \
3047                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3048         } while (0)
3049
3050 /* minuend[hi:lo] -= subtrahend */
3051 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3052         do { \
3053                 SUB_64(m_hi, 0, m_lo, s); \
3054         } while (0)
3055
3056 #define SUB_EXTEND_USTAT(s, t) \
3057         do { \
3058                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3059                 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3060         } while (0)
3061
3062 /*
3063  * General service functions
3064  */
3065
3066 static inline long bnx2x_hilo(u32 *hiref)
3067 {
3068         u32 lo = *(hiref + 1);
3069 #if (BITS_PER_LONG == 64)
3070         u32 hi = *hiref;
3071
3072         return HILO_U64(hi, lo);
3073 #else
3074         return lo;
3075 #endif
3076 }
3077
3078 /*
3079  * Init service functions
3080  */
3081
3082 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3083 {
3084         if (!bp->stats_pending) {
3085                 struct eth_query_ramrod_data ramrod_data = {0};
3086                 int i, rc;
3087
3088                 ramrod_data.drv_counter = bp->stats_counter++;
3089                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3090                 for_each_queue(bp, i)
3091                         ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3092
3093                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3094                                    ((u32 *)&ramrod_data)[1],
3095                                    ((u32 *)&ramrod_data)[0], 0);
3096                 if (rc == 0) {
3097                         /* stats ramrod has it's own slot on the spq */
3098                         bp->spq_left++;
3099                         bp->stats_pending = 1;
3100                 }
3101         }
3102 }
3103
3104 static void bnx2x_stats_init(struct bnx2x *bp)
3105 {
3106         int port = BP_PORT(bp);
3107         int i;
3108
3109         bp->stats_pending = 0;
3110         bp->executer_idx = 0;
3111         bp->stats_counter = 0;
3112
3113         /* port stats */
3114         if (!BP_NOMCP(bp))
3115                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3116         else
3117                 bp->port.port_stx = 0;
3118         DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3119
3120         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3121         bp->port.old_nig_stats.brb_discard =
3122                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3123         bp->port.old_nig_stats.brb_truncate =
3124                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3125         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3126                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3127         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3128                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3129
3130         /* function stats */
3131         for_each_queue(bp, i) {
3132                 struct bnx2x_fastpath *fp = &bp->fp[i];
3133
3134                 memset(&fp->old_tclient, 0,
3135                        sizeof(struct tstorm_per_client_stats));
3136                 memset(&fp->old_uclient, 0,
3137                        sizeof(struct ustorm_per_client_stats));
3138                 memset(&fp->old_xclient, 0,
3139                        sizeof(struct xstorm_per_client_stats));
3140                 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3141         }
3142
3143         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3144         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3145
3146         bp->stats_state = STATS_STATE_DISABLED;
3147         if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3148                 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3149 }
3150
3151 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3152 {
3153         struct dmae_command *dmae = &bp->stats_dmae;
3154         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3155
3156         *stats_comp = DMAE_COMP_VAL;
3157         if (CHIP_REV_IS_SLOW(bp))
3158                 return;
3159
3160         /* loader */
3161         if (bp->executer_idx) {
3162                 int loader_idx = PMF_DMAE_C(bp);
3163
3164                 memset(dmae, 0, sizeof(struct dmae_command));
3165
3166                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3167                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3168                                 DMAE_CMD_DST_RESET |
3169 #ifdef __BIG_ENDIAN
3170                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3171 #else
3172                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3173 #endif
3174                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3175                                                DMAE_CMD_PORT_0) |
3176                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3177                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3178                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3179                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3180                                      sizeof(struct dmae_command) *
3181                                      (loader_idx + 1)) >> 2;
3182                 dmae->dst_addr_hi = 0;
3183                 dmae->len = sizeof(struct dmae_command) >> 2;
3184                 if (CHIP_IS_E1(bp))
3185                         dmae->len--;
3186                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3187                 dmae->comp_addr_hi = 0;
3188                 dmae->comp_val = 1;
3189
3190                 *stats_comp = 0;
3191                 bnx2x_post_dmae(bp, dmae, loader_idx);
3192
3193         } else if (bp->func_stx) {
3194                 *stats_comp = 0;
3195                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3196         }
3197 }
3198
3199 static int bnx2x_stats_comp(struct bnx2x *bp)
3200 {
3201         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3202         int cnt = 10;
3203
3204         might_sleep();
3205         while (*stats_comp != DMAE_COMP_VAL) {
3206                 if (!cnt) {
3207                         BNX2X_ERR("timeout waiting for stats finished\n");
3208                         break;
3209                 }
3210                 cnt--;
3211                 msleep(1);
3212         }
3213         return 1;
3214 }
3215
3216 /*
3217  * Statistics service functions
3218  */
3219
3220 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3221 {
3222         struct dmae_command *dmae;
3223         u32 opcode;
3224         int loader_idx = PMF_DMAE_C(bp);
3225         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3226
3227         /* sanity */
3228         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3229                 BNX2X_ERR("BUG!\n");
3230                 return;
3231         }
3232
3233         bp->executer_idx = 0;
3234
3235         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3236                   DMAE_CMD_C_ENABLE |
3237                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3238 #ifdef __BIG_ENDIAN
3239                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3240 #else
3241                   DMAE_CMD_ENDIANITY_DW_SWAP |
3242 #endif
3243                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3244                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3245
3246         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3247         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3248         dmae->src_addr_lo = bp->port.port_stx >> 2;
3249         dmae->src_addr_hi = 0;
3250         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3251         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3252         dmae->len = DMAE_LEN32_RD_MAX;
3253         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3254         dmae->comp_addr_hi = 0;
3255         dmae->comp_val = 1;
3256
3257         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3258         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3259         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3260         dmae->src_addr_hi = 0;
3261         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3262                                    DMAE_LEN32_RD_MAX * 4);
3263         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3264                                    DMAE_LEN32_RD_MAX * 4);
3265         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3266         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3267         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3268         dmae->comp_val = DMAE_COMP_VAL;
3269
3270         *stats_comp = 0;
3271         bnx2x_hw_stats_post(bp);
3272         bnx2x_stats_comp(bp);
3273 }
3274
3275 static void bnx2x_port_stats_init(struct bnx2x *bp)
3276 {
3277         struct dmae_command *dmae;
3278         int port = BP_PORT(bp);
3279         int vn = BP_E1HVN(bp);
3280         u32 opcode;
3281         int loader_idx = PMF_DMAE_C(bp);
3282         u32 mac_addr;
3283         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3284
3285         /* sanity */
3286         if (!bp->link_vars.link_up || !bp->port.pmf) {
3287                 BNX2X_ERR("BUG!\n");
3288                 return;
3289         }
3290
3291         bp->executer_idx = 0;
3292
3293         /* MCP */
3294         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3295                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3296                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3297 #ifdef __BIG_ENDIAN
3298                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3299 #else
3300                   DMAE_CMD_ENDIANITY_DW_SWAP |
3301 #endif
3302                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3303                   (vn << DMAE_CMD_E1HVN_SHIFT));
3304
3305         if (bp->port.port_stx) {
3306
3307                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3308                 dmae->opcode = opcode;
3309                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3310                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3311                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3312                 dmae->dst_addr_hi = 0;
3313                 dmae->len = sizeof(struct host_port_stats) >> 2;
3314                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3315                 dmae->comp_addr_hi = 0;
3316                 dmae->comp_val = 1;
3317         }
3318
3319         if (bp->func_stx) {
3320
3321                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3322                 dmae->opcode = opcode;
3323                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3324                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3325                 dmae->dst_addr_lo = bp->func_stx >> 2;
3326                 dmae->dst_addr_hi = 0;
3327                 dmae->len = sizeof(struct host_func_stats) >> 2;
3328                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3329                 dmae->comp_addr_hi = 0;
3330                 dmae->comp_val = 1;
3331         }
3332
3333         /* MAC */
3334         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3335                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3336                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3337 #ifdef __BIG_ENDIAN
3338                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3339 #else
3340                   DMAE_CMD_ENDIANITY_DW_SWAP |
3341 #endif
3342                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3343                   (vn << DMAE_CMD_E1HVN_SHIFT));
3344
3345         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3346
3347                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3348                                    NIG_REG_INGRESS_BMAC0_MEM);
3349
3350                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3351                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3352                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3353                 dmae->opcode = opcode;
3354                 dmae->src_addr_lo = (mac_addr +
3355                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3356                 dmae->src_addr_hi = 0;
3357                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3358                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3359                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3360                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3361                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3362                 dmae->comp_addr_hi = 0;
3363                 dmae->comp_val = 1;
3364
3365                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3366                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3367                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3368                 dmae->opcode = opcode;
3369                 dmae->src_addr_lo = (mac_addr +
3370                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3371                 dmae->src_addr_hi = 0;
3372                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3373                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3374                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3375                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3376                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3377                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3378                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3379                 dmae->comp_addr_hi = 0;
3380                 dmae->comp_val = 1;
3381
3382         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3383
3384                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3385
3386                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3387                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3388                 dmae->opcode = opcode;
3389                 dmae->src_addr_lo = (mac_addr +
3390                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3391                 dmae->src_addr_hi = 0;
3392                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3393                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3394                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3395                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3396                 dmae->comp_addr_hi = 0;
3397                 dmae->comp_val = 1;
3398
3399                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3400                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3401                 dmae->opcode = opcode;
3402                 dmae->src_addr_lo = (mac_addr +
3403                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3404                 dmae->src_addr_hi = 0;
3405                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3406                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3407                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3408                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3409                 dmae->len = 1;
3410                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3411                 dmae->comp_addr_hi = 0;
3412                 dmae->comp_val = 1;
3413
3414                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3415                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3416                 dmae->opcode = opcode;
3417                 dmae->src_addr_lo = (mac_addr +
3418                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3419                 dmae->src_addr_hi = 0;
3420                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3421                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3422                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3423                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3424                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3425                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3426                 dmae->comp_addr_hi = 0;
3427                 dmae->comp_val = 1;
3428         }
3429
3430         /* NIG */
3431         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3432         dmae->opcode = opcode;
3433         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3434                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3435         dmae->src_addr_hi = 0;
3436         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3437         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3438         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3439         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3440         dmae->comp_addr_hi = 0;
3441         dmae->comp_val = 1;
3442
3443         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3444         dmae->opcode = opcode;
3445         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3446                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3447         dmae->src_addr_hi = 0;
3448         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3449                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3450         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3451                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3452         dmae->len = (2*sizeof(u32)) >> 2;
3453         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3454         dmae->comp_addr_hi = 0;
3455         dmae->comp_val = 1;
3456
3457         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3458         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3459                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3460                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3461 #ifdef __BIG_ENDIAN
3462                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3463 #else
3464                         DMAE_CMD_ENDIANITY_DW_SWAP |
3465 #endif
3466                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3467                         (vn << DMAE_CMD_E1HVN_SHIFT));
3468         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3469                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3470         dmae->src_addr_hi = 0;
3471         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3472                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3473         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3474                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3475         dmae->len = (2*sizeof(u32)) >> 2;
3476         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3477         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3478         dmae->comp_val = DMAE_COMP_VAL;
3479
3480         *stats_comp = 0;
3481 }
3482
3483 static void bnx2x_func_stats_init(struct bnx2x *bp)
3484 {
3485         struct dmae_command *dmae = &bp->stats_dmae;
3486         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3487
3488         /* sanity */
3489         if (!bp->func_stx) {
3490                 BNX2X_ERR("BUG!\n");
3491                 return;
3492         }
3493
3494         bp->executer_idx = 0;
3495         memset(dmae, 0, sizeof(struct dmae_command));
3496
3497         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3498                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3499                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3500 #ifdef __BIG_ENDIAN
3501                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3502 #else
3503                         DMAE_CMD_ENDIANITY_DW_SWAP |
3504 #endif
3505                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3506                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3507         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3508         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3509         dmae->dst_addr_lo = bp->func_stx >> 2;
3510         dmae->dst_addr_hi = 0;
3511         dmae->len = sizeof(struct host_func_stats) >> 2;
3512         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3513         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3514         dmae->comp_val = DMAE_COMP_VAL;
3515
3516         *stats_comp = 0;
3517 }
3518
3519 static void bnx2x_stats_start(struct bnx2x *bp)
3520 {
3521         if (bp->port.pmf)
3522                 bnx2x_port_stats_init(bp);
3523
3524         else if (bp->func_stx)
3525                 bnx2x_func_stats_init(bp);
3526
3527         bnx2x_hw_stats_post(bp);
3528         bnx2x_storm_stats_post(bp);
3529 }
3530
3531 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3532 {
3533         bnx2x_stats_comp(bp);
3534         bnx2x_stats_pmf_update(bp);
3535         bnx2x_stats_start(bp);
3536 }
3537
3538 static void bnx2x_stats_restart(struct bnx2x *bp)
3539 {
3540         bnx2x_stats_comp(bp);
3541         bnx2x_stats_start(bp);
3542 }
3543
3544 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3545 {
3546         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3547         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3548         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3549         struct {
3550                 u32 lo;
3551                 u32 hi;
3552         } diff;
3553
3554         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3555         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3556         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3557         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3558         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3559         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3560         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3561         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3562         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3563         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3564         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3565         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3566         UPDATE_STAT64(tx_stat_gt127,
3567                                 tx_stat_etherstatspkts65octetsto127octets);
3568         UPDATE_STAT64(tx_stat_gt255,
3569                                 tx_stat_etherstatspkts128octetsto255octets);
3570         UPDATE_STAT64(tx_stat_gt511,
3571                                 tx_stat_etherstatspkts256octetsto511octets);
3572         UPDATE_STAT64(tx_stat_gt1023,
3573                                 tx_stat_etherstatspkts512octetsto1023octets);
3574         UPDATE_STAT64(tx_stat_gt1518,
3575                                 tx_stat_etherstatspkts1024octetsto1522octets);
3576         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3577         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3578         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3579         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3580         UPDATE_STAT64(tx_stat_gterr,
3581                                 tx_stat_dot3statsinternalmactransmiterrors);
3582         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3583
3584         estats->pause_frames_received_hi =
3585                                 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3586         estats->pause_frames_received_lo =
3587                                 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3588
3589         estats->pause_frames_sent_hi =
3590                                 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3591         estats->pause_frames_sent_lo =
3592                                 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3593 }
3594
3595 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3596 {
3597         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3598         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3599         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3600
3601         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3602         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3603         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3604         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3605         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3606         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3607         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3608         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3609         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3610         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3611         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3612         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3613         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3614         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3615         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3616         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3617         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3618         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3619         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3620         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3621         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3622         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3623         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3624         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3625         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3626         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3627         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3628         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3629         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3630         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3631         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3632
3633         estats->pause_frames_received_hi =
3634                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3635         estats->pause_frames_received_lo =
3636                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3637         ADD_64(estats->pause_frames_received_hi,
3638                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3639                estats->pause_frames_received_lo,
3640                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3641
3642         estats->pause_frames_sent_hi =
3643                         pstats->mac_stx[1].tx_stat_outxonsent_hi;
3644         estats->pause_frames_sent_lo =
3645                         pstats->mac_stx[1].tx_stat_outxonsent_lo;
3646         ADD_64(estats->pause_frames_sent_hi,
3647                pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3648                estats->pause_frames_sent_lo,
3649                pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3650 }
3651
3652 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3653 {
3654         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3655         struct nig_stats *old = &(bp->port.old_nig_stats);
3656         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3657         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3658         struct {
3659                 u32 lo;
3660                 u32 hi;
3661         } diff;
3662         u32 nig_timer_max;
3663
3664         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3665                 bnx2x_bmac_stats_update(bp);
3666
3667         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3668                 bnx2x_emac_stats_update(bp);
3669
3670         else { /* unreached */
3671                 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3672                 return -1;
3673         }
3674
3675         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3676                       new->brb_discard - old->brb_discard);
3677         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3678                       new->brb_truncate - old->brb_truncate);
3679
3680         UPDATE_STAT64_NIG(egress_mac_pkt0,
3681                                         etherstatspkts1024octetsto1522octets);
3682         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3683
3684         memcpy(old, new, sizeof(struct nig_stats));
3685
3686         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3687                sizeof(struct mac_stx));
3688         estats->brb_drop_hi = pstats->brb_drop_hi;
3689         estats->brb_drop_lo = pstats->brb_drop_lo;
3690
3691         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3692
3693         nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3694         if (nig_timer_max != estats->nig_timer_max) {
3695                 estats->nig_timer_max = nig_timer_max;
3696                 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3697         }
3698
3699         return 0;
3700 }
3701
3702 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3703 {
3704         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3705         struct tstorm_per_port_stats *tport =
3706                                         &stats->tstorm_common.port_statistics;
3707         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3708         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3709         int i;
3710
3711         memset(&(fstats->total_bytes_received_hi), 0,
3712                sizeof(struct host_func_stats) - 2*sizeof(u32));
3713         estats->error_bytes_received_hi = 0;
3714         estats->error_bytes_received_lo = 0;
3715         estats->etherstatsoverrsizepkts_hi = 0;
3716         estats->etherstatsoverrsizepkts_lo = 0;
3717         estats->no_buff_discard_hi = 0;
3718         estats->no_buff_discard_lo = 0;
3719
3720         for_each_queue(bp, i) {
3721                 struct bnx2x_fastpath *fp = &bp->fp[i];
3722                 int cl_id = fp->cl_id;
3723                 struct tstorm_per_client_stats *tclient =
3724                                 &stats->tstorm_common.client_statistics[cl_id];
3725                 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3726                 struct ustorm_per_client_stats *uclient =
3727                                 &stats->ustorm_common.client_statistics[cl_id];
3728                 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3729                 struct xstorm_per_client_stats *xclient =
3730                                 &stats->xstorm_common.client_statistics[cl_id];
3731                 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3732                 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3733                 u32 diff;
3734
3735                 /* are storm stats valid? */
3736                 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3737                                                         bp->stats_counter) {
3738                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3739                            "  xstorm counter (%d) != stats_counter (%d)\n",
3740                            i, xclient->stats_counter, bp->stats_counter);
3741                         return -1;
3742                 }
3743                 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3744                                                         bp->stats_counter) {
3745                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3746                            "  tstorm counter (%d) != stats_counter (%d)\n",
3747                            i, tclient->stats_counter, bp->stats_counter);
3748                         return -2;
3749                 }
3750                 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3751                                                         bp->stats_counter) {
3752                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3753                            "  ustorm counter (%d) != stats_counter (%d)\n",
3754                            i, uclient->stats_counter, bp->stats_counter);
3755                         return -4;
3756                 }
3757
3758                 qstats->total_bytes_received_hi =
3759                 qstats->valid_bytes_received_hi =
3760                                 le32_to_cpu(tclient->total_rcv_bytes.hi);
3761                 qstats->total_bytes_received_lo =
3762                 qstats->valid_bytes_received_lo =
3763                                 le32_to_cpu(tclient->total_rcv_bytes.lo);
3764
3765                 qstats->error_bytes_received_hi =
3766                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
3767                 qstats->error_bytes_received_lo =
3768                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
3769
3770                 ADD_64(qstats->total_bytes_received_hi,
3771                        qstats->error_bytes_received_hi,
3772                        qstats->total_bytes_received_lo,
3773                        qstats->error_bytes_received_lo);
3774
3775                 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3776                                         total_unicast_packets_received);
3777                 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3778                                         total_multicast_packets_received);
3779                 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3780                                         total_broadcast_packets_received);
3781                 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3782                                         etherstatsoverrsizepkts);
3783                 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3784
3785                 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3786                                         total_unicast_packets_received);
3787                 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3788                                         total_multicast_packets_received);
3789                 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3790                                         total_broadcast_packets_received);
3791                 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3792                 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3793                 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3794
3795                 qstats->total_bytes_transmitted_hi =
3796                                 le32_to_cpu(xclient->total_sent_bytes.hi);
3797                 qstats->total_bytes_transmitted_lo =
3798                                 le32_to_cpu(xclient->total_sent_bytes.lo);
3799
3800                 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3801                                         total_unicast_packets_transmitted);
3802                 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3803                                         total_multicast_packets_transmitted);
3804                 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3805                                         total_broadcast_packets_transmitted);
3806
3807                 old_tclient->checksum_discard = tclient->checksum_discard;
3808                 old_tclient->ttl0_discard = tclient->ttl0_discard;
3809
3810                 ADD_64(fstats->total_bytes_received_hi,
3811                        qstats->total_bytes_received_hi,
3812                        fstats->total_bytes_received_lo,
3813                        qstats->total_bytes_received_lo);
3814                 ADD_64(fstats->total_bytes_transmitted_hi,
3815                        qstats->total_bytes_transmitted_hi,
3816                        fstats->total_bytes_transmitted_lo,
3817                        qstats->total_bytes_transmitted_lo);
3818                 ADD_64(fstats->total_unicast_packets_received_hi,
3819                        qstats->total_unicast_packets_received_hi,
3820                        fstats->total_unicast_packets_received_lo,
3821                        qstats->total_unicast_packets_received_lo);
3822                 ADD_64(fstats->total_multicast_packets_received_hi,
3823                        qstats->total_multicast_packets_received_hi,
3824                        fstats->total_multicast_packets_received_lo,
3825                        qstats->total_multicast_packets_received_lo);
3826                 ADD_64(fstats->total_broadcast_packets_received_hi,
3827                        qstats->total_broadcast_packets_received_hi,
3828                        fstats->total_broadcast_packets_received_lo,
3829                        qstats->total_broadcast_packets_received_lo);
3830                 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3831                        qstats->total_unicast_packets_transmitted_hi,
3832                        fstats->total_unicast_packets_transmitted_lo,
3833                        qstats->total_unicast_packets_transmitted_lo);
3834                 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3835                        qstats->total_multicast_packets_transmitted_hi,
3836                        fstats->total_multicast_packets_transmitted_lo,
3837                        qstats->total_multicast_packets_transmitted_lo);
3838                 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3839                        qstats->total_broadcast_packets_transmitted_hi,
3840                        fstats->total_broadcast_packets_transmitted_lo,
3841                        qstats->total_broadcast_packets_transmitted_lo);
3842                 ADD_64(fstats->valid_bytes_received_hi,
3843                        qstats->valid_bytes_received_hi,
3844                        fstats->valid_bytes_received_lo,
3845                        qstats->valid_bytes_received_lo);
3846
3847                 ADD_64(estats->error_bytes_received_hi,
3848                        qstats->error_bytes_received_hi,
3849                        estats->error_bytes_received_lo,
3850                        qstats->error_bytes_received_lo);
3851                 ADD_64(estats->etherstatsoverrsizepkts_hi,
3852                        qstats->etherstatsoverrsizepkts_hi,
3853                        estats->etherstatsoverrsizepkts_lo,
3854                        qstats->etherstatsoverrsizepkts_lo);
3855                 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3856                        estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3857         }
3858
3859         ADD_64(fstats->total_bytes_received_hi,
3860                estats->rx_stat_ifhcinbadoctets_hi,
3861                fstats->total_bytes_received_lo,
3862                estats->rx_stat_ifhcinbadoctets_lo);
3863
3864         memcpy(estats, &(fstats->total_bytes_received_hi),
3865                sizeof(struct host_func_stats) - 2*sizeof(u32));
3866
3867         ADD_64(estats->etherstatsoverrsizepkts_hi,
3868                estats->rx_stat_dot3statsframestoolong_hi,
3869                estats->etherstatsoverrsizepkts_lo,
3870                estats->rx_stat_dot3statsframestoolong_lo);
3871         ADD_64(estats->error_bytes_received_hi,
3872                estats->rx_stat_ifhcinbadoctets_hi,
3873                estats->error_bytes_received_lo,
3874                estats->rx_stat_ifhcinbadoctets_lo);
3875
3876         if (bp->port.pmf) {
3877                 estats->mac_filter_discard =
3878                                 le32_to_cpu(tport->mac_filter_discard);
3879                 estats->xxoverflow_discard =
3880                                 le32_to_cpu(tport->xxoverflow_discard);
3881                 estats->brb_truncate_discard =
3882                                 le32_to_cpu(tport->brb_truncate_discard);
3883                 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3884         }
3885
3886         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3887
3888         bp->stats_pending = 0;
3889
3890         return 0;
3891 }
3892
3893 static void bnx2x_net_stats_update(struct bnx2x *bp)
3894 {
3895         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3896         struct net_device_stats *nstats = &bp->dev->stats;
3897         int i;
3898
3899         nstats->rx_packets =
3900                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3901                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3902                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3903
3904         nstats->tx_packets =
3905                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3906                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3907                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3908
3909         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
3910
3911         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3912
3913         nstats->rx_dropped = estats->mac_discard;
3914         for_each_queue(bp, i)
3915                 nstats->rx_dropped +=
3916                         le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3917
3918         nstats->tx_dropped = 0;
3919
3920         nstats->multicast =
3921                 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
3922
3923         nstats->collisions =
3924                 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
3925
3926         nstats->rx_length_errors =
3927                 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3928                 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3929         nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3930                                  bnx2x_hilo(&estats->brb_truncate_hi);
3931         nstats->rx_crc_errors =
3932                 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3933         nstats->rx_frame_errors =
3934                 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3935         nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
3936         nstats->rx_missed_errors = estats->xxoverflow_discard;
3937
3938         nstats->rx_errors = nstats->rx_length_errors +
3939                             nstats->rx_over_errors +
3940                             nstats->rx_crc_errors +
3941                             nstats->rx_frame_errors +
3942                             nstats->rx_fifo_errors +
3943                             nstats->rx_missed_errors;
3944
3945         nstats->tx_aborted_errors =
3946                 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3947                 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3948         nstats->tx_carrier_errors =
3949                 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
3950         nstats->tx_fifo_errors = 0;
3951         nstats->tx_heartbeat_errors = 0;
3952         nstats->tx_window_errors = 0;
3953
3954         nstats->tx_errors = nstats->tx_aborted_errors +
3955                             nstats->tx_carrier_errors +
3956             bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3957 }
3958
3959 static void bnx2x_drv_stats_update(struct bnx2x *bp)
3960 {
3961         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3962         int i;
3963
3964         estats->driver_xoff = 0;
3965         estats->rx_err_discard_pkt = 0;
3966         estats->rx_skb_alloc_failed = 0;
3967         estats->hw_csum_err = 0;
3968         for_each_queue(bp, i) {
3969                 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3970
3971                 estats->driver_xoff += qstats->driver_xoff;
3972                 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3973                 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3974                 estats->hw_csum_err += qstats->hw_csum_err;
3975         }
3976 }
3977
3978 static void bnx2x_stats_update(struct bnx2x *bp)
3979 {
3980         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3981
3982         if (*stats_comp != DMAE_COMP_VAL)
3983                 return;
3984
3985         if (bp->port.pmf)
3986                 bnx2x_hw_stats_update(bp);
3987
3988         if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3989                 BNX2X_ERR("storm stats were not updated for 3 times\n");
3990                 bnx2x_panic();
3991                 return;
3992         }
3993
3994         bnx2x_net_stats_update(bp);
3995         bnx2x_drv_stats_update(bp);
3996
3997         if (bp->msglevel & NETIF_MSG_TIMER) {
3998                 struct tstorm_per_client_stats *old_tclient =
3999                                                         &bp->fp->old_tclient;
4000                 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4001                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4002                 struct net_device_stats *nstats = &bp->dev->stats;
4003                 int i;
4004
4005                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4006                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
4007                                   "  tx pkt (%lx)\n",
4008                        bnx2x_tx_avail(bp->fp),
4009                        le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
4010                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
4011                                   "  rx pkt (%lx)\n",
4012                        (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
4013                              bp->fp->rx_comp_cons),
4014                        le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
4015                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u  "
4016                                   "brb truncate %u\n",
4017                        (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4018                        qstats->driver_xoff,
4019                        estats->brb_drop_lo, estats->brb_truncate_lo);
4020                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
4021                         "packets_too_big_discard %lu  no_buff_discard %lu  "
4022                         "mac_discard %u  mac_filter_discard %u  "
4023                         "xxovrflow_discard %u  brb_truncate_discard %u  "
4024                         "ttl0_discard %u\n",
4025                        le32_to_cpu(old_tclient->checksum_discard),
4026                        bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4027                        bnx2x_hilo(&qstats->no_buff_discard_hi),
4028                        estats->mac_discard, estats->mac_filter_discard,
4029                        estats->xxoverflow_discard, estats->brb_truncate_discard,
4030                        le32_to_cpu(old_tclient->ttl0_discard));
4031
4032                 for_each_queue(bp, i) {
4033                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4034                                bnx2x_fp(bp, i, tx_pkt),
4035                                bnx2x_fp(bp, i, rx_pkt),
4036                                bnx2x_fp(bp, i, rx_calls));
4037                 }
4038         }
4039
4040         bnx2x_hw_stats_post(bp);
4041         bnx2x_storm_stats_post(bp);
4042 }
4043
4044 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4045 {
4046         struct dmae_command *dmae;
4047         u32 opcode;
4048         int loader_idx = PMF_DMAE_C(bp);
4049         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4050
4051         bp->executer_idx = 0;
4052
4053         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4054                   DMAE_CMD_C_ENABLE |
4055                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4056 #ifdef __BIG_ENDIAN
4057                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4058 #else
4059                   DMAE_CMD_ENDIANITY_DW_SWAP |
4060 #endif
4061                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4062                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4063
4064         if (bp->port.port_stx) {
4065
4066                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4067                 if (bp->func_stx)
4068                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4069                 else
4070                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4071                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4072                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4073                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4074                 dmae->dst_addr_hi = 0;
4075                 dmae->len = sizeof(struct host_port_stats) >> 2;
4076                 if (bp->func_stx) {
4077                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4078                         dmae->comp_addr_hi = 0;
4079                         dmae->comp_val = 1;
4080                 } else {
4081                         dmae->comp_addr_lo =
4082                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4083                         dmae->comp_addr_hi =
4084                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4085                         dmae->comp_val = DMAE_COMP_VAL;
4086
4087                         *stats_comp = 0;
4088                 }
4089         }
4090
4091         if (bp->func_stx) {
4092
4093                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4094                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4095                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4096                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4097                 dmae->dst_addr_lo = bp->func_stx >> 2;
4098                 dmae->dst_addr_hi = 0;
4099                 dmae->len = sizeof(struct host_func_stats) >> 2;
4100                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4101                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4102                 dmae->comp_val = DMAE_COMP_VAL;
4103
4104                 *stats_comp = 0;
4105         }
4106 }
4107
4108 static void bnx2x_stats_stop(struct bnx2x *bp)
4109 {
4110         int update = 0;
4111
4112         bnx2x_stats_comp(bp);
4113
4114         if (bp->port.pmf)
4115                 update = (bnx2x_hw_stats_update(bp) == 0);
4116
4117         update |= (bnx2x_storm_stats_update(bp) == 0);
4118
4119         if (update) {
4120                 bnx2x_net_stats_update(bp);
4121
4122                 if (bp->port.pmf)
4123                         bnx2x_port_stats_stop(bp);
4124
4125                 bnx2x_hw_stats_post(bp);
4126                 bnx2x_stats_comp(bp);
4127         }
4128 }
4129
4130 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4131 {
4132 }
4133
4134 static const struct {
4135         void (*action)(struct bnx2x *bp);
4136         enum bnx2x_stats_state next_state;
4137 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4138 /* state        event   */
4139 {
4140 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4141 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
4142 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4143 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4144 },
4145 {
4146 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
4147 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
4148 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
4149 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
4150 }
4151 };
4152
4153 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4154 {
4155         enum bnx2x_stats_state state = bp->stats_state;
4156
4157         bnx2x_stats_stm[state][event].action(bp);
4158         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4159
4160         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4161                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4162                    state, event, bp->stats_state);
4163 }
4164
4165 static void bnx2x_timer(unsigned long data)
4166 {
4167         struct bnx2x *bp = (struct bnx2x *) data;
4168
4169         if (!netif_running(bp->dev))
4170                 return;
4171
4172         if (atomic_read(&bp->intr_sem) != 0)
4173                 goto timer_restart;
4174
4175         if (poll) {
4176                 struct bnx2x_fastpath *fp = &bp->fp[0];
4177                 int rc;
4178
4179                 bnx2x_tx_int(fp, 1000);
4180                 rc = bnx2x_rx_int(fp, 1000);
4181         }
4182
4183         if (!BP_NOMCP(bp)) {
4184                 int func = BP_FUNC(bp);
4185                 u32 drv_pulse;
4186                 u32 mcp_pulse;
4187
4188                 ++bp->fw_drv_pulse_wr_seq;
4189                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4190                 /* TBD - add SYSTEM_TIME */
4191                 drv_pulse = bp->fw_drv_pulse_wr_seq;
4192                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4193
4194                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4195                              MCP_PULSE_SEQ_MASK);
4196                 /* The delta between driver pulse and mcp response
4197                  * should be 1 (before mcp response) or 0 (after mcp response)
4198                  */
4199                 if ((drv_pulse != mcp_pulse) &&
4200                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4201                         /* someone lost a heartbeat... */
4202                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4203                                   drv_pulse, mcp_pulse);
4204                 }
4205         }
4206
4207         if ((bp->state == BNX2X_STATE_OPEN) ||
4208             (bp->state == BNX2X_STATE_DISABLED))
4209                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4210
4211 timer_restart:
4212         mod_timer(&bp->timer, jiffies + bp->current_interval);
4213 }
4214
4215 /* end of Statistics */
4216
4217 /* nic init */
4218
4219 /*
4220  * nic init service functions
4221  */
4222
4223 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4224 {
4225         int port = BP_PORT(bp);
4226
4227         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4228                         USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4229                         sizeof(struct ustorm_status_block)/4);
4230         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4231                         CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4232                         sizeof(struct cstorm_status_block)/4);
4233 }
4234
4235 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4236                           dma_addr_t mapping, int sb_id)
4237 {
4238         int port = BP_PORT(bp);
4239         int func = BP_FUNC(bp);
4240         int index;
4241         u64 section;
4242
4243         /* USTORM */
4244         section = ((u64)mapping) + offsetof(struct host_status_block,
4245                                             u_status_block);
4246         sb->u_status_block.status_block_id = sb_id;
4247
4248         REG_WR(bp, BAR_USTRORM_INTMEM +
4249                USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4250         REG_WR(bp, BAR_USTRORM_INTMEM +
4251                ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4252                U64_HI(section));
4253         REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4254                 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4255
4256         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4257                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4258                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4259
4260         /* CSTORM */
4261         section = ((u64)mapping) + offsetof(struct host_status_block,
4262                                             c_status_block);
4263         sb->c_status_block.status_block_id = sb_id;
4264
4265         REG_WR(bp, BAR_CSTRORM_INTMEM +
4266                CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4267         REG_WR(bp, BAR_CSTRORM_INTMEM +
4268                ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4269                U64_HI(section));
4270         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4271                 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4272
4273         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4274                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4275                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4276
4277         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4278 }
4279
4280 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4281 {
4282         int func = BP_FUNC(bp);
4283
4284         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4285                         USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4286                         sizeof(struct ustorm_def_status_block)/4);
4287         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4288                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4289                         sizeof(struct cstorm_def_status_block)/4);
4290         bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4291                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4292                         sizeof(struct xstorm_def_status_block)/4);
4293         bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4294                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4295                         sizeof(struct tstorm_def_status_block)/4);
4296 }
4297
4298 static void bnx2x_init_def_sb(struct bnx2x *bp,
4299                               struct host_def_status_block *def_sb,
4300                               dma_addr_t mapping, int sb_id)
4301 {
4302         int port = BP_PORT(bp);
4303         int func = BP_FUNC(bp);
4304         int index, val, reg_offset;
4305         u64 section;
4306
4307         /* ATTN */
4308         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4309                                             atten_status_block);
4310         def_sb->atten_status_block.status_block_id = sb_id;
4311
4312         bp->attn_state = 0;
4313
4314         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4315                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4316
4317         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4318                 bp->attn_group[index].sig[0] = REG_RD(bp,
4319                                                      reg_offset + 0x10*index);
4320                 bp->attn_group[index].sig[1] = REG_RD(bp,
4321                                                reg_offset + 0x4 + 0x10*index);
4322                 bp->attn_group[index].sig[2] = REG_RD(bp,
4323                                                reg_offset + 0x8 + 0x10*index);
4324                 bp->attn_group[index].sig[3] = REG_RD(bp,
4325                                                reg_offset + 0xc + 0x10*index);
4326         }
4327
4328         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4329                              HC_REG_ATTN_MSG0_ADDR_L);
4330
4331         REG_WR(bp, reg_offset, U64_LO(section));
4332         REG_WR(bp, reg_offset + 4, U64_HI(section));
4333
4334         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4335
4336         val = REG_RD(bp, reg_offset);
4337         val |= sb_id;
4338         REG_WR(bp, reg_offset, val);
4339
4340         /* USTORM */
4341         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4342                                             u_def_status_block);
4343         def_sb->u_def_status_block.status_block_id = sb_id;
4344
4345         REG_WR(bp, BAR_USTRORM_INTMEM +
4346                USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4347         REG_WR(bp, BAR_USTRORM_INTMEM +
4348                ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4349                U64_HI(section));
4350         REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4351                 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4352
4353         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4354                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4355                          USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4356
4357         /* CSTORM */
4358         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4359                                             c_def_status_block);
4360         def_sb->c_def_status_block.status_block_id = sb_id;
4361
4362         REG_WR(bp, BAR_CSTRORM_INTMEM +
4363                CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4364         REG_WR(bp, BAR_CSTRORM_INTMEM +
4365                ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4366                U64_HI(section));
4367         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4368                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4369
4370         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4371                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4372                          CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4373
4374         /* TSTORM */
4375         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4376                                             t_def_status_block);
4377         def_sb->t_def_status_block.status_block_id = sb_id;
4378
4379         REG_WR(bp, BAR_TSTRORM_INTMEM +
4380                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4381         REG_WR(bp, BAR_TSTRORM_INTMEM +
4382                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4383                U64_HI(section));
4384         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4385                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4386
4387         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4388                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4389                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4390
4391         /* XSTORM */
4392         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4393                                             x_def_status_block);
4394         def_sb->x_def_status_block.status_block_id = sb_id;
4395
4396         REG_WR(bp, BAR_XSTRORM_INTMEM +
4397                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4398         REG_WR(bp, BAR_XSTRORM_INTMEM +
4399                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4400                U64_HI(section));
4401         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4402                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4403
4404         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4405                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4406                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4407
4408         bp->stats_pending = 0;
4409         bp->set_mac_pending = 0;
4410
4411         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4412 }
4413
4414 static void bnx2x_update_coalesce(struct bnx2x *bp)
4415 {
4416         int port = BP_PORT(bp);
4417         int i;
4418
4419         for_each_queue(bp, i) {
4420                 int sb_id = bp->fp[i].sb_id;
4421
4422                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4423                 REG_WR8(bp, BAR_USTRORM_INTMEM +
4424                         USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4425                                                     U_SB_ETH_RX_CQ_INDEX),
4426                         bp->rx_ticks/12);
4427                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4428                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4429                                                      U_SB_ETH_RX_CQ_INDEX),
4430                          bp->rx_ticks ? 0 : 1);
4431
4432                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4433                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4434                         CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4435                                                     C_SB_ETH_TX_CQ_INDEX),
4436                         bp->tx_ticks/12);
4437                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4438                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4439                                                      C_SB_ETH_TX_CQ_INDEX),
4440                          bp->tx_ticks ? 0 : 1);
4441         }
4442 }
4443
4444 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4445                                        struct bnx2x_fastpath *fp, int last)
4446 {
4447         int i;
4448
4449         for (i = 0; i < last; i++) {
4450                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4451                 struct sk_buff *skb = rx_buf->skb;
4452
4453                 if (skb == NULL) {
4454                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4455                         continue;
4456                 }
4457
4458                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4459                         pci_unmap_single(bp->pdev,
4460                                          pci_unmap_addr(rx_buf, mapping),
4461                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4462
4463                 dev_kfree_skb(skb);
4464                 rx_buf->skb = NULL;
4465         }
4466 }
4467
4468 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4469 {
4470         int func = BP_FUNC(bp);
4471         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4472                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4473         u16 ring_prod, cqe_ring_prod;
4474         int i, j;
4475
4476         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4477         DP(NETIF_MSG_IFUP,
4478            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4479
4480         if (bp->flags & TPA_ENABLE_FLAG) {
4481
4482                 for_each_rx_queue(bp, j) {
4483                         struct bnx2x_fastpath *fp = &bp->fp[j];
4484
4485                         for (i = 0; i < max_agg_queues; i++) {
4486                                 fp->tpa_pool[i].skb =
4487                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4488                                 if (!fp->tpa_pool[i].skb) {
4489                                         BNX2X_ERR("Failed to allocate TPA "
4490                                                   "skb pool for queue[%d] - "
4491                                                   "disabling TPA on this "
4492                                                   "queue!\n", j);
4493                                         bnx2x_free_tpa_pool(bp, fp, i);
4494                                         fp->disable_tpa = 1;
4495                                         break;
4496                                 }
4497                                 pci_unmap_addr_set((struct sw_rx_bd *)
4498                                                         &bp->fp->tpa_pool[i],
4499                                                    mapping, 0);
4500                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4501                         }
4502                 }
4503         }
4504
4505         for_each_rx_queue(bp, j) {
4506                 struct bnx2x_fastpath *fp = &bp->fp[j];
4507
4508                 fp->rx_bd_cons = 0;
4509                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4510                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4511
4512                 /* "next page" elements initialization */
4513                 /* SGE ring */
4514                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4515                         struct eth_rx_sge *sge;
4516
4517                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4518                         sge->addr_hi =
4519                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4520                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4521                         sge->addr_lo =
4522                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4523                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4524                 }
4525
4526                 bnx2x_init_sge_ring_bit_mask(fp);
4527
4528                 /* RX BD ring */
4529                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4530                         struct eth_rx_bd *rx_bd;
4531
4532                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4533                         rx_bd->addr_hi =
4534                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4535                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4536                         rx_bd->addr_lo =
4537                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4538                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4539                 }
4540
4541                 /* CQ ring */
4542                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4543                         struct eth_rx_cqe_next_page *nextpg;
4544
4545                         nextpg = (struct eth_rx_cqe_next_page *)
4546                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4547                         nextpg->addr_hi =
4548                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4549                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4550                         nextpg->addr_lo =
4551                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4552                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4553                 }
4554
4555                 /* Allocate SGEs and initialize the ring elements */
4556                 for (i = 0, ring_prod = 0;
4557                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4558
4559                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4560                                 BNX2X_ERR("was only able to allocate "
4561                                           "%d rx sges\n", i);
4562                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4563                                 /* Cleanup already allocated elements */
4564                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4565                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4566                                 fp->disable_tpa = 1;
4567                                 ring_prod = 0;
4568                                 break;
4569                         }
4570                         ring_prod = NEXT_SGE_IDX(ring_prod);
4571                 }
4572                 fp->rx_sge_prod = ring_prod;
4573
4574                 /* Allocate BDs and initialize BD ring */
4575                 fp->rx_comp_cons = 0;
4576                 cqe_ring_prod = ring_prod = 0;
4577                 for (i = 0; i < bp->rx_ring_size; i++) {
4578                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4579                                 BNX2X_ERR("was only able to allocate "
4580                                           "%d rx skbs on queue[%d]\n", i, j);
4581                                 fp->eth_q_stats.rx_skb_alloc_failed++;
4582                                 break;
4583                         }
4584                         ring_prod = NEXT_RX_IDX(ring_prod);
4585                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4586                         WARN_ON(ring_prod <= i);
4587                 }
4588
4589                 fp->rx_bd_prod = ring_prod;
4590                 /* must not have more available CQEs than BDs */
4591                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4592                                        cqe_ring_prod);
4593                 fp->rx_pkt = fp->rx_calls = 0;
4594
4595                 /* Warning!
4596                  * this will generate an interrupt (to the TSTORM)
4597                  * must only be done after chip is initialized
4598                  */
4599                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4600                                      fp->rx_sge_prod);
4601                 if (j != 0)
4602                         continue;
4603
4604                 REG_WR(bp, BAR_USTRORM_INTMEM +
4605                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4606                        U64_LO(fp->rx_comp_mapping));
4607                 REG_WR(bp, BAR_USTRORM_INTMEM +
4608                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4609                        U64_HI(fp->rx_comp_mapping));
4610         }
4611 }
4612
4613 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4614 {
4615         int i, j;
4616
4617         for_each_tx_queue(bp, j) {
4618                 struct bnx2x_fastpath *fp = &bp->fp[j];
4619
4620                 for (i = 1; i <= NUM_TX_RINGS; i++) {
4621                         struct eth_tx_bd *tx_bd =
4622                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4623
4624                         tx_bd->addr_hi =
4625                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4626                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4627                         tx_bd->addr_lo =
4628                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4629                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4630                 }
4631
4632                 fp->tx_pkt_prod = 0;
4633                 fp->tx_pkt_cons = 0;
4634                 fp->tx_bd_prod = 0;
4635                 fp->tx_bd_cons = 0;
4636                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4637                 fp->tx_pkt = 0;
4638         }
4639 }
4640
4641 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4642 {
4643         int func = BP_FUNC(bp);
4644
4645         spin_lock_init(&bp->spq_lock);
4646
4647         bp->spq_left = MAX_SPQ_PENDING;
4648         bp->spq_prod_idx = 0;
4649         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4650         bp->spq_prod_bd = bp->spq;
4651         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4652
4653         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4654                U64_LO(bp->spq_mapping));
4655         REG_WR(bp,
4656                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4657                U64_HI(bp->spq_mapping));
4658
4659         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4660                bp->spq_prod_idx);
4661 }
4662
4663 static void bnx2x_init_context(struct bnx2x *bp)
4664 {
4665         int i;
4666
4667         for_each_queue(bp, i) {
4668                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4669                 struct bnx2x_fastpath *fp = &bp->fp[i];
4670                 u8 cl_id = fp->cl_id;
4671                 u8 sb_id = fp->sb_id;
4672
4673                 context->ustorm_st_context.common.sb_index_numbers =
4674                                                 BNX2X_RX_SB_INDEX_NUM;
4675                 context->ustorm_st_context.common.clientId = cl_id;
4676                 context->ustorm_st_context.common.status_block_id = sb_id;
4677                 context->ustorm_st_context.common.flags =
4678                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4679                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4680                 context->ustorm_st_context.common.statistics_counter_id =
4681                                                 cl_id;
4682                 context->ustorm_st_context.common.mc_alignment_log_size =
4683                                                 BNX2X_RX_ALIGN_SHIFT;
4684                 context->ustorm_st_context.common.bd_buff_size =
4685                                                 bp->rx_buf_size;
4686                 context->ustorm_st_context.common.bd_page_base_hi =
4687                                                 U64_HI(fp->rx_desc_mapping);
4688                 context->ustorm_st_context.common.bd_page_base_lo =
4689                                                 U64_LO(fp->rx_desc_mapping);
4690                 if (!fp->disable_tpa) {
4691                         context->ustorm_st_context.common.flags |=
4692                                 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4693                                  USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4694                         context->ustorm_st_context.common.sge_buff_size =
4695                                 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4696                                          (u32)0xffff);
4697                         context->ustorm_st_context.common.sge_page_base_hi =
4698                                                 U64_HI(fp->rx_sge_mapping);
4699                         context->ustorm_st_context.common.sge_page_base_lo =
4700                                                 U64_LO(fp->rx_sge_mapping);
4701                 }
4702
4703                 context->ustorm_ag_context.cdu_usage =
4704                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4705                                                CDU_REGION_NUMBER_UCM_AG,
4706                                                ETH_CONNECTION_TYPE);
4707
4708                 context->xstorm_st_context.tx_bd_page_base_hi =
4709                                                 U64_HI(fp->tx_desc_mapping);
4710                 context->xstorm_st_context.tx_bd_page_base_lo =
4711                                                 U64_LO(fp->tx_desc_mapping);
4712                 context->xstorm_st_context.db_data_addr_hi =
4713                                                 U64_HI(fp->tx_prods_mapping);
4714                 context->xstorm_st_context.db_data_addr_lo =
4715                                                 U64_LO(fp->tx_prods_mapping);
4716                 context->xstorm_st_context.statistics_data = (cl_id |
4717                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4718                 context->cstorm_st_context.sb_index_number =
4719                                                 C_SB_ETH_TX_CQ_INDEX;
4720                 context->cstorm_st_context.status_block_id = sb_id;
4721
4722                 context->xstorm_ag_context.cdu_reserved =
4723                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4724                                                CDU_REGION_NUMBER_XCM_AG,
4725                                                ETH_CONNECTION_TYPE);
4726         }
4727 }
4728
4729 static void bnx2x_init_ind_table(struct bnx2x *bp)
4730 {
4731         int func = BP_FUNC(bp);
4732         int i;
4733
4734         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4735                 return;
4736
4737         DP(NETIF_MSG_IFUP,
4738            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
4739         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4740                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4741                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4742                         bp->fp->cl_id + (i % bp->num_rx_queues));
4743 }
4744
4745 static void bnx2x_set_client_config(struct bnx2x *bp)
4746 {
4747         struct tstorm_eth_client_config tstorm_client = {0};
4748         int port = BP_PORT(bp);
4749         int i;
4750
4751         tstorm_client.mtu = bp->dev->mtu;
4752         tstorm_client.config_flags =
4753                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4754                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
4755 #ifdef BCM_VLAN
4756         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4757                 tstorm_client.config_flags |=
4758                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
4759                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4760         }
4761 #endif
4762
4763         if (bp->flags & TPA_ENABLE_FLAG) {
4764                 tstorm_client.max_sges_for_packet =
4765                         SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4766                 tstorm_client.max_sges_for_packet =
4767                         ((tstorm_client.max_sges_for_packet +
4768                           PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4769                         PAGES_PER_SGE_SHIFT;
4770
4771                 tstorm_client.config_flags |=
4772                                 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4773         }
4774
4775         for_each_queue(bp, i) {
4776                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4777
4778                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4779                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4780                        ((u32 *)&tstorm_client)[0]);
4781                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4782                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4783                        ((u32 *)&tstorm_client)[1]);
4784         }
4785
4786         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4787            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4788 }
4789
4790 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4791 {
4792         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4793         int mode = bp->rx_mode;
4794         int mask = (1 << BP_L_ID(bp));
4795         int func = BP_FUNC(bp);
4796         int i;
4797
4798         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
4799
4800         switch (mode) {
4801         case BNX2X_RX_MODE_NONE: /* no Rx */
4802                 tstorm_mac_filter.ucast_drop_all = mask;
4803                 tstorm_mac_filter.mcast_drop_all = mask;
4804                 tstorm_mac_filter.bcast_drop_all = mask;
4805                 break;
4806
4807         case BNX2X_RX_MODE_NORMAL:
4808                 tstorm_mac_filter.bcast_accept_all = mask;
4809                 break;
4810
4811         case BNX2X_RX_MODE_ALLMULTI:
4812                 tstorm_mac_filter.mcast_accept_all = mask;
4813                 tstorm_mac_filter.bcast_accept_all = mask;
4814                 break;
4815
4816         case BNX2X_RX_MODE_PROMISC:
4817                 tstorm_mac_filter.ucast_accept_all = mask;
4818                 tstorm_mac_filter.mcast_accept_all = mask;
4819                 tstorm_mac_filter.bcast_accept_all = mask;
4820                 break;
4821
4822         default:
4823                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4824                 break;
4825         }
4826
4827         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4828                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4829                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4830                        ((u32 *)&tstorm_mac_filter)[i]);
4831
4832 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4833                    ((u32 *)&tstorm_mac_filter)[i]); */
4834         }
4835
4836         if (mode != BNX2X_RX_MODE_NONE)
4837                 bnx2x_set_client_config(bp);
4838 }
4839
4840 static void bnx2x_init_internal_common(struct bnx2x *bp)
4841 {
4842         int i;
4843
4844         if (bp->flags & TPA_ENABLE_FLAG) {
4845                 struct tstorm_eth_tpa_exist tpa = {0};
4846
4847                 tpa.tpa_exist = 1;
4848
4849                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4850                        ((u32 *)&tpa)[0]);
4851                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4852                        ((u32 *)&tpa)[1]);
4853         }
4854
4855         /* Zero this manually as its initialization is
4856            currently missing in the initTool */
4857         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4858                 REG_WR(bp, BAR_USTRORM_INTMEM +
4859                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4860 }
4861
4862 static void bnx2x_init_internal_port(struct bnx2x *bp)
4863 {
4864         int port = BP_PORT(bp);
4865
4866         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4867         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4868         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4869         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4870 }
4871
4872 /* Calculates the sum of vn_min_rates.
4873    It's needed for further normalizing of the min_rates.
4874    Returns:
4875      sum of vn_min_rates.
4876        or
4877      0 - if all the min_rates are 0.
4878      In the later case fainess algorithm should be deactivated.
4879      If not all min_rates are zero then those that are zeroes will be set to 1.
4880  */
4881 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4882 {
4883         int all_zero = 1;
4884         int port = BP_PORT(bp);
4885         int vn;
4886
4887         bp->vn_weight_sum = 0;
4888         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4889                 int func = 2*vn + port;
4890                 u32 vn_cfg =
4891                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4892                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4893                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4894
4895                 /* Skip hidden vns */
4896                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4897                         continue;
4898
4899                 /* If min rate is zero - set it to 1 */
4900                 if (!vn_min_rate)
4901                         vn_min_rate = DEF_MIN_RATE;
4902                 else
4903                         all_zero = 0;
4904
4905                 bp->vn_weight_sum += vn_min_rate;
4906         }
4907
4908         /* ... only if all min rates are zeros - disable fairness */
4909         if (all_zero)
4910                 bp->vn_weight_sum = 0;
4911 }
4912
4913 static void bnx2x_init_internal_func(struct bnx2x *bp)
4914 {
4915         struct tstorm_eth_function_common_config tstorm_config = {0};
4916         struct stats_indication_flags stats_flags = {0};
4917         int port = BP_PORT(bp);
4918         int func = BP_FUNC(bp);
4919         int i, j;
4920         u32 offset;
4921         u16 max_agg_size;
4922
4923         if (is_multi(bp)) {
4924                 tstorm_config.config_flags = MULTI_FLAGS(bp);
4925                 tstorm_config.rss_result_mask = MULTI_MASK;
4926         }
4927         if (IS_E1HMF(bp))
4928                 tstorm_config.config_flags |=
4929                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
4930
4931         tstorm_config.leading_client_id = BP_L_ID(bp);
4932
4933         REG_WR(bp, BAR_TSTRORM_INTMEM +
4934                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4935                (*(u32 *)&tstorm_config));
4936
4937         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4938         bnx2x_set_storm_rx_mode(bp);
4939
4940         for_each_queue(bp, i) {
4941                 u8 cl_id = bp->fp[i].cl_id;
4942
4943                 /* reset xstorm per client statistics */
4944                 offset = BAR_XSTRORM_INTMEM +
4945                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4946                 for (j = 0;
4947                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4948                         REG_WR(bp, offset + j*4, 0);
4949
4950                 /* reset tstorm per client statistics */
4951                 offset = BAR_TSTRORM_INTMEM +
4952                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4953                 for (j = 0;
4954                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4955                         REG_WR(bp, offset + j*4, 0);
4956
4957                 /* reset ustorm per client statistics */
4958                 offset = BAR_USTRORM_INTMEM +
4959                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4960                 for (j = 0;
4961                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4962                         REG_WR(bp, offset + j*4, 0);
4963         }
4964
4965         /* Init statistics related context */
4966         stats_flags.collect_eth = 1;
4967
4968         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4969                ((u32 *)&stats_flags)[0]);
4970         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4971                ((u32 *)&stats_flags)[1]);
4972
4973         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4974                ((u32 *)&stats_flags)[0]);
4975         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4976                ((u32 *)&stats_flags)[1]);
4977
4978         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4979                ((u32 *)&stats_flags)[0]);
4980         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4981                ((u32 *)&stats_flags)[1]);
4982
4983         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4984                ((u32 *)&stats_flags)[0]);
4985         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4986                ((u32 *)&stats_flags)[1]);
4987
4988         REG_WR(bp, BAR_XSTRORM_INTMEM +
4989                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4990                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4991         REG_WR(bp, BAR_XSTRORM_INTMEM +
4992                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4993                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4994
4995         REG_WR(bp, BAR_TSTRORM_INTMEM +
4996                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4997                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4998         REG_WR(bp, BAR_TSTRORM_INTMEM +
4999                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5000                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5001
5002         REG_WR(bp, BAR_USTRORM_INTMEM +
5003                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5004                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5005         REG_WR(bp, BAR_USTRORM_INTMEM +
5006                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5007                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5008
5009         if (CHIP_IS_E1H(bp)) {
5010                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5011                         IS_E1HMF(bp));
5012                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5013                         IS_E1HMF(bp));
5014                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5015                         IS_E1HMF(bp));
5016                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5017                         IS_E1HMF(bp));
5018
5019                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5020                          bp->e1hov);
5021         }
5022
5023         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5024         max_agg_size =
5025                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5026                           SGE_PAGE_SIZE * PAGES_PER_SGE),
5027                     (u32)0xffff);
5028         for_each_rx_queue(bp, i) {
5029                 struct bnx2x_fastpath *fp = &bp->fp[i];
5030
5031                 REG_WR(bp, BAR_USTRORM_INTMEM +
5032                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5033                        U64_LO(fp->rx_comp_mapping));
5034                 REG_WR(bp, BAR_USTRORM_INTMEM +
5035                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5036                        U64_HI(fp->rx_comp_mapping));
5037
5038                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5039                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5040                          max_agg_size);
5041         }
5042
5043         /* dropless flow control */
5044         if (CHIP_IS_E1H(bp)) {
5045                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5046
5047                 rx_pause.bd_thr_low = 250;
5048                 rx_pause.cqe_thr_low = 250;
5049                 rx_pause.cos = 1;
5050                 rx_pause.sge_thr_low = 0;
5051                 rx_pause.bd_thr_high = 350;
5052                 rx_pause.cqe_thr_high = 350;
5053                 rx_pause.sge_thr_high = 0;
5054
5055                 for_each_rx_queue(bp, i) {
5056                         struct bnx2x_fastpath *fp = &bp->fp[i];
5057
5058                         if (!fp->disable_tpa) {
5059                                 rx_pause.sge_thr_low = 150;
5060                                 rx_pause.sge_thr_high = 250;
5061                         }
5062
5063
5064                         offset = BAR_USTRORM_INTMEM +
5065                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5066                                                                    fp->cl_id);
5067                         for (j = 0;
5068                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5069                              j++)
5070                                 REG_WR(bp, offset + j*4,
5071                                        ((u32 *)&rx_pause)[j]);
5072                 }
5073         }
5074
5075         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5076
5077         /* Init rate shaping and fairness contexts */
5078         if (IS_E1HMF(bp)) {
5079                 int vn;
5080
5081                 /* During init there is no active link
5082                    Until link is up, set link rate to 10Gbps */
5083                 bp->link_vars.line_speed = SPEED_10000;
5084                 bnx2x_init_port_minmax(bp);
5085
5086                 bnx2x_calc_vn_weight_sum(bp);
5087
5088                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5089                         bnx2x_init_vn_minmax(bp, 2*vn + port);
5090
5091                 /* Enable rate shaping and fairness */
5092                 bp->cmng.flags.cmng_enables =
5093                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5094                 if (bp->vn_weight_sum)
5095                         bp->cmng.flags.cmng_enables |=
5096                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5097                 else
5098                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5099                            "  fairness will be disabled\n");
5100         } else {
5101                 /* rate shaping and fairness are disabled */
5102                 DP(NETIF_MSG_IFUP,
5103                    "single function mode  minmax will be disabled\n");
5104         }
5105
5106
5107         /* Store it to internal memory */
5108         if (bp->port.pmf)
5109                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5110                         REG_WR(bp, BAR_XSTRORM_INTMEM +
5111                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5112                                ((u32 *)(&bp->cmng))[i]);
5113 }
5114
5115 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5116 {
5117         switch (load_code) {
5118         case FW_MSG_CODE_DRV_LOAD_COMMON:
5119                 bnx2x_init_internal_common(bp);
5120                 /* no break */
5121
5122         case FW_MSG_CODE_DRV_LOAD_PORT:
5123                 bnx2x_init_internal_port(bp);
5124                 /* no break */
5125
5126         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5127                 bnx2x_init_internal_func(bp);
5128                 break;
5129
5130         default:
5131                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5132                 break;
5133         }
5134 }
5135
5136 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5137 {
5138         int i;
5139
5140         for_each_queue(bp, i) {
5141                 struct bnx2x_fastpath *fp = &bp->fp[i];
5142
5143                 fp->bp = bp;
5144                 fp->state = BNX2X_FP_STATE_CLOSED;
5145                 fp->index = i;
5146                 fp->cl_id = BP_L_ID(bp) + i;
5147                 fp->sb_id = fp->cl_id;
5148                 DP(NETIF_MSG_IFUP,
5149                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
5150                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5151                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5152                               fp->sb_id);
5153                 bnx2x_update_fpsb_idx(fp);
5154         }
5155
5156         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5157                           DEF_SB_ID);
5158         bnx2x_update_dsb_idx(bp);
5159         bnx2x_update_coalesce(bp);
5160         bnx2x_init_rx_rings(bp);
5161         bnx2x_init_tx_ring(bp);
5162         bnx2x_init_sp_ring(bp);
5163         bnx2x_init_context(bp);
5164         bnx2x_init_internal(bp, load_code);
5165         bnx2x_init_ind_table(bp);
5166         bnx2x_stats_init(bp);
5167
5168         /* At this point, we are ready for interrupts */
5169         atomic_set(&bp->intr_sem, 0);
5170
5171         /* flush all before enabling interrupts */
5172         mb();
5173         mmiowb();
5174
5175         bnx2x_int_enable(bp);
5176 }
5177
5178 /* end of nic init */
5179
5180 /*
5181  * gzip service functions
5182  */
5183
5184 static int bnx2x_gunzip_init(struct bnx2x *bp)
5185 {
5186         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5187                                               &bp->gunzip_mapping);
5188         if (bp->gunzip_buf  == NULL)
5189                 goto gunzip_nomem1;
5190
5191         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5192         if (bp->strm  == NULL)
5193                 goto gunzip_nomem2;
5194
5195         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5196                                       GFP_KERNEL);
5197         if (bp->strm->workspace == NULL)
5198                 goto gunzip_nomem3;
5199
5200         return 0;
5201
5202 gunzip_nomem3:
5203         kfree(bp->strm);
5204         bp->strm = NULL;
5205
5206 gunzip_nomem2:
5207         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5208                             bp->gunzip_mapping);
5209         bp->gunzip_buf = NULL;
5210
5211 gunzip_nomem1:
5212         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5213                " un-compression\n", bp->dev->name);
5214         return -ENOMEM;
5215 }
5216
5217 static void bnx2x_gunzip_end(struct bnx2x *bp)
5218 {
5219         kfree(bp->strm->workspace);
5220
5221         kfree(bp->strm);
5222         bp->strm = NULL;
5223
5224         if (bp->gunzip_buf) {
5225                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5226                                     bp->gunzip_mapping);
5227                 bp->gunzip_buf = NULL;
5228         }
5229 }
5230
5231 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5232 {
5233         int n, rc;
5234
5235         /* check gzip header */
5236         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5237                 return -EINVAL;
5238
5239         n = 10;
5240
5241 #define FNAME                           0x8
5242
5243         if (zbuf[3] & FNAME)
5244                 while ((zbuf[n++] != 0) && (n < len));
5245
5246         bp->strm->next_in = zbuf + n;
5247         bp->strm->avail_in = len - n;
5248         bp->strm->next_out = bp->gunzip_buf;
5249         bp->strm->avail_out = FW_BUF_SIZE;
5250
5251         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5252         if (rc != Z_OK)
5253                 return rc;
5254
5255         rc = zlib_inflate(bp->strm, Z_FINISH);
5256         if ((rc != Z_OK) && (rc != Z_STREAM_END))
5257                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5258                        bp->dev->name, bp->strm->msg);
5259
5260         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5261         if (bp->gunzip_outlen & 0x3)
5262                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5263                                     " gunzip_outlen (%d) not aligned\n",
5264                        bp->dev->name, bp->gunzip_outlen);
5265         bp->gunzip_outlen >>= 2;
5266
5267         zlib_inflateEnd(bp->strm);
5268
5269         if (rc == Z_STREAM_END)
5270                 return 0;
5271
5272         return rc;
5273 }
5274
5275 /* nic load/unload */
5276
5277 /*
5278  * General service functions
5279  */
5280
5281 /* send a NIG loopback debug packet */
5282 static void bnx2x_lb_pckt(struct bnx2x *bp)
5283 {
5284         u32 wb_write[3];
5285
5286         /* Ethernet source and destination addresses */
5287         wb_write[0] = 0x55555555;
5288         wb_write[1] = 0x55555555;
5289         wb_write[2] = 0x20;             /* SOP */
5290         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5291
5292         /* NON-IP protocol */
5293         wb_write[0] = 0x09000000;
5294         wb_write[1] = 0x55555555;
5295         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
5296         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5297 }
5298
5299 /* some of the internal memories
5300  * are not directly readable from the driver
5301  * to test them we send debug packets
5302  */
5303 static int bnx2x_int_mem_test(struct bnx2x *bp)
5304 {
5305         int factor;
5306         int count, i;
5307         u32 val = 0;
5308
5309         if (CHIP_REV_IS_FPGA(bp))
5310                 factor = 120;
5311         else if (CHIP_REV_IS_EMUL(bp))
5312                 factor = 200;
5313         else
5314                 factor = 1;
5315
5316         DP(NETIF_MSG_HW, "start part1\n");
5317
5318         /* Disable inputs of parser neighbor blocks */
5319         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5320         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5321         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5322         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5323
5324         /*  Write 0 to parser credits for CFC search request */
5325         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5326
5327         /* send Ethernet packet */
5328         bnx2x_lb_pckt(bp);
5329
5330         /* TODO do i reset NIG statistic? */
5331         /* Wait until NIG register shows 1 packet of size 0x10 */
5332         count = 1000 * factor;
5333         while (count) {
5334
5335                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5336                 val = *bnx2x_sp(bp, wb_data[0]);
5337                 if (val == 0x10)
5338                         break;
5339
5340                 msleep(10);
5341                 count--;
5342         }
5343         if (val != 0x10) {
5344                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5345                 return -1;
5346         }
5347
5348         /* Wait until PRS register shows 1 packet */
5349         count = 1000 * factor;
5350         while (count) {
5351                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5352                 if (val == 1)
5353                         break;
5354
5355                 msleep(10);
5356                 count--;
5357         }
5358         if (val != 0x1) {
5359                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5360                 return -2;
5361         }
5362
5363         /* Reset and init BRB, PRS */
5364         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5365         msleep(50);
5366         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5367         msleep(50);
5368         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5369         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5370
5371         DP(NETIF_MSG_HW, "part2\n");
5372
5373         /* Disable inputs of parser neighbor blocks */
5374         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5375         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5376         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5377         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5378
5379         /* Write 0 to parser credits for CFC search request */
5380         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5381
5382         /* send 10 Ethernet packets */
5383         for (i = 0; i < 10; i++)
5384                 bnx2x_lb_pckt(bp);
5385
5386         /* Wait until NIG register shows 10 + 1
5387            packets of size 11*0x10 = 0xb0 */
5388         count = 1000 * factor;
5389         while (count) {
5390
5391                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5392                 val = *bnx2x_sp(bp, wb_data[0]);
5393                 if (val == 0xb0)
5394                         break;
5395
5396                 msleep(10);
5397                 count--;
5398         }
5399         if (val != 0xb0) {
5400                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5401                 return -3;
5402         }
5403
5404         /* Wait until PRS register shows 2 packets */
5405         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5406         if (val != 2)
5407                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5408
5409         /* Write 1 to parser credits for CFC search request */
5410         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5411
5412         /* Wait until PRS register shows 3 packets */
5413         msleep(10 * factor);
5414         /* Wait until NIG register shows 1 packet of size 0x10 */
5415         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5416         if (val != 3)
5417                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5418
5419         /* clear NIG EOP FIFO */
5420         for (i = 0; i < 11; i++)
5421                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5422         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5423         if (val != 1) {
5424                 BNX2X_ERR("clear of NIG failed\n");
5425                 return -4;
5426         }
5427
5428         /* Reset and init BRB, PRS, NIG */
5429         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5430         msleep(50);
5431         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5432         msleep(50);
5433         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5434         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5435 #ifndef BCM_ISCSI
5436         /* set NIC mode */
5437         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5438 #endif
5439
5440         /* Enable inputs of parser neighbor blocks */
5441         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5442         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5443         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5444         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5445
5446         DP(NETIF_MSG_HW, "done\n");
5447
5448         return 0; /* OK */
5449 }
5450
5451 static void enable_blocks_attention(struct bnx2x *bp)
5452 {
5453         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5454         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5455         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5456         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5457         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5458         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5459         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5460         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5461         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5462 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5463 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5464         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5465         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5466         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5467 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5468 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5469         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5470         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5471         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5472         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5473 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5474 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5475         if (CHIP_REV_IS_FPGA(bp))
5476                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5477         else
5478                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5479         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5480         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5481         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5482 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5483 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5484         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5485         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5486 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5487         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5488 }
5489
5490
5491 static void bnx2x_reset_common(struct bnx2x *bp)
5492 {
5493         /* reset_common */
5494         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5495                0xd3ffff7f);
5496         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5497 }
5498
5499 static int bnx2x_init_common(struct bnx2x *bp)
5500 {
5501         u32 val, i;
5502
5503         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5504
5505         bnx2x_reset_common(bp);
5506         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5507         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5508
5509         bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5510         if (CHIP_IS_E1H(bp))
5511                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5512
5513         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5514         msleep(30);
5515         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5516
5517         bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5518         if (CHIP_IS_E1(bp)) {
5519                 /* enable HW interrupt from PXP on USDM overflow
5520                    bit 16 on INT_MASK_0 */
5521                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5522         }
5523
5524         bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5525         bnx2x_init_pxp(bp);
5526
5527 #ifdef __BIG_ENDIAN
5528         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5529         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5530         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5531         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5532         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5533         /* make sure this value is 0 */
5534         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5535
5536 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5537         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5538         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5539         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5540         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5541 #endif
5542
5543         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5544 #ifdef BCM_ISCSI
5545         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5546         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5547         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5548 #endif
5549
5550         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5551                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5552
5553         /* let the HW do it's magic ... */
5554         msleep(100);
5555         /* finish PXP init */
5556         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5557         if (val != 1) {
5558                 BNX2X_ERR("PXP2 CFG failed\n");
5559                 return -EBUSY;
5560         }
5561         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5562         if (val != 1) {
5563                 BNX2X_ERR("PXP2 RD_INIT failed\n");
5564                 return -EBUSY;
5565         }
5566
5567         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5568         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5569
5570         bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5571
5572         /* clean the DMAE memory */
5573         bp->dmae_ready = 1;
5574         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5575
5576         bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5577         bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5578         bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5579         bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5580
5581         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5582         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5583         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5584         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5585
5586         bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5587         /* soft reset pulse */
5588         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5589         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5590
5591 #ifdef BCM_ISCSI
5592         bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5593 #endif
5594
5595         bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5596         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5597         if (!CHIP_REV_IS_SLOW(bp)) {
5598                 /* enable hw interrupt from doorbell Q */
5599                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5600         }
5601
5602         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5603         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5604         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5605         /* set NIC mode */
5606         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5607         if (CHIP_IS_E1H(bp))
5608                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5609
5610         bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5611         bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5612         bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5613         bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5614
5615         if (CHIP_IS_E1H(bp)) {
5616                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5617                                 STORM_INTMEM_SIZE_E1H/2);
5618                 bnx2x_init_fill(bp,
5619                                 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5620                                 0, STORM_INTMEM_SIZE_E1H/2);
5621                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5622                                 STORM_INTMEM_SIZE_E1H/2);
5623                 bnx2x_init_fill(bp,
5624                                 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5625                                 0, STORM_INTMEM_SIZE_E1H/2);
5626                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5627                                 STORM_INTMEM_SIZE_E1H/2);
5628                 bnx2x_init_fill(bp,
5629                                 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5630                                 0, STORM_INTMEM_SIZE_E1H/2);
5631                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5632                                 STORM_INTMEM_SIZE_E1H/2);
5633                 bnx2x_init_fill(bp,
5634                                 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5635                                 0, STORM_INTMEM_SIZE_E1H/2);
5636         } else { /* E1 */
5637                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5638                                 STORM_INTMEM_SIZE_E1);
5639                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5640                                 STORM_INTMEM_SIZE_E1);
5641                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5642                                 STORM_INTMEM_SIZE_E1);
5643                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5644                                 STORM_INTMEM_SIZE_E1);
5645         }
5646
5647         bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5648         bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5649         bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5650         bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5651
5652         /* sync semi rtc */
5653         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5654                0x80000000);
5655         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5656                0x80000000);
5657
5658         bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5659         bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5660         bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5661
5662         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5663         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5664                 REG_WR(bp, i, 0xc0cac01a);
5665                 /* TODO: replace with something meaningful */
5666         }
5667         bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5668         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5669
5670         if (sizeof(union cdu_context) != 1024)
5671                 /* we currently assume that a context is 1024 bytes */
5672                 printk(KERN_ALERT PFX "please adjust the size of"
5673                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5674
5675         bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5676         val = (4 << 24) + (0 << 12) + 1024;
5677         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5678         if (CHIP_IS_E1(bp)) {
5679                 /* !!! fix pxp client crdit until excel update */
5680                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5681                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5682         }
5683
5684         bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5685         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5686         /* enable context validation interrupt from CFC */
5687         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5688
5689         /* set the thresholds to prevent CFC/CDU race */
5690         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5691
5692         bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5693         bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5694
5695         /* PXPCS COMMON comes here */
5696         /* Reset PCIE errors for debug */
5697         REG_WR(bp, 0x2814, 0xffffffff);
5698         REG_WR(bp, 0x3820, 0xffffffff);
5699
5700         /* EMAC0 COMMON comes here */
5701         /* EMAC1 COMMON comes here */
5702         /* DBU COMMON comes here */
5703         /* DBG COMMON comes here */
5704
5705         bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5706         if (CHIP_IS_E1H(bp)) {
5707                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5708                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5709         }
5710
5711         if (CHIP_REV_IS_SLOW(bp))
5712                 msleep(200);
5713
5714         /* finish CFC init */
5715         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5716         if (val != 1) {
5717                 BNX2X_ERR("CFC LL_INIT failed\n");
5718                 return -EBUSY;
5719         }
5720         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5721         if (val != 1) {
5722                 BNX2X_ERR("CFC AC_INIT failed\n");
5723                 return -EBUSY;
5724         }
5725         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5726         if (val != 1) {
5727                 BNX2X_ERR("CFC CAM_INIT failed\n");
5728                 return -EBUSY;
5729         }
5730         REG_WR(bp, CFC_REG_DEBUG0, 0);
5731
5732         /* read NIG statistic
5733            to see if this is our first up since powerup */
5734         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5735         val = *bnx2x_sp(bp, wb_data[0]);
5736
5737         /* do internal memory self test */
5738         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5739                 BNX2X_ERR("internal mem self test failed\n");
5740                 return -EBUSY;
5741         }
5742
5743         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5744         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5745         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5746         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5747                 bp->port.need_hw_lock = 1;
5748                 break;
5749
5750         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5751                 /* Fan failure is indicated by SPIO 5 */
5752                 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5753                                MISC_REGISTERS_SPIO_INPUT_HI_Z);
5754
5755                 /* set to active low mode */
5756                 val = REG_RD(bp, MISC_REG_SPIO_INT);
5757                 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5758                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5759                 REG_WR(bp, MISC_REG_SPIO_INT, val);
5760
5761                 /* enable interrupt to signal the IGU */
5762                 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5763                 val |= (1 << MISC_REGISTERS_SPIO_5);
5764                 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5765                 break;
5766
5767         default:
5768                 break;
5769         }
5770
5771         /* clear PXP2 attentions */
5772         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5773
5774         enable_blocks_attention(bp);
5775
5776         if (!BP_NOMCP(bp)) {
5777                 bnx2x_acquire_phy_lock(bp);
5778                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5779                 bnx2x_release_phy_lock(bp);
5780         } else
5781                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5782
5783         return 0;
5784 }
5785
5786 static int bnx2x_init_port(struct bnx2x *bp)
5787 {
5788         int port = BP_PORT(bp);
5789         u32 low, high;
5790         u32 val;
5791
5792         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
5793
5794         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5795
5796         /* Port PXP comes here */
5797         /* Port PXP2 comes here */
5798 #ifdef BCM_ISCSI
5799         /* Port0  1
5800          * Port1  385 */
5801         i++;
5802         wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5803         wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5804         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5805         REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5806
5807         /* Port0  2
5808          * Port1  386 */
5809         i++;
5810         wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5811         wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5812         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5813         REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5814
5815         /* Port0  3
5816          * Port1  387 */
5817         i++;
5818         wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5819         wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5820         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5821         REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5822 #endif
5823         /* Port CMs come here */
5824         bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
5825                              (port ? XCM_PORT1_END : XCM_PORT0_END));
5826
5827         /* Port QM comes here */
5828 #ifdef BCM_ISCSI
5829         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5830         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5831
5832         bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5833                              func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5834 #endif
5835         /* Port DQ comes here */
5836
5837         bnx2x_init_block(bp, (port ? BRB1_PORT1_START : BRB1_PORT0_START),
5838                              (port ? BRB1_PORT1_END : BRB1_PORT0_END));
5839         if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5840                 /* no pause for emulation and FPGA */
5841                 low = 0;
5842                 high = 513;
5843         } else {
5844                 if (IS_E1HMF(bp))
5845                         low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5846                 else if (bp->dev->mtu > 4096) {
5847                         if (bp->flags & ONE_PORT_FLAG)
5848                                 low = 160;
5849                         else {
5850                                 val = bp->dev->mtu;
5851                                 /* (24*1024 + val*4)/256 */
5852                                 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5853                         }
5854                 } else
5855                         low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5856                 high = low + 56;        /* 14*1024/256 */
5857         }
5858         REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5859         REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5860
5861
5862         /* Port PRS comes here */
5863         /* Port TSDM comes here */
5864         /* Port CSDM comes here */
5865         /* Port USDM comes here */
5866         /* Port XSDM comes here */
5867
5868         bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5869                              port ? TSEM_PORT1_END : TSEM_PORT0_END);
5870         bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5871                              port ? USEM_PORT1_END : USEM_PORT0_END);
5872         bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5873                              port ? CSEM_PORT1_END : CSEM_PORT0_END);
5874         bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5875                              port ? XSEM_PORT1_END : XSEM_PORT0_END);
5876
5877         /* Port UPB comes here */
5878         /* Port XPB comes here */
5879
5880         bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5881                              port ? PBF_PORT1_END : PBF_PORT0_END);
5882
5883         /* configure PBF to work without PAUSE mtu 9000 */
5884         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5885
5886         /* update threshold */
5887         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5888         /* update init credit */
5889         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5890
5891         /* probe changes */
5892         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5893         msleep(5);
5894         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5895
5896 #ifdef BCM_ISCSI
5897         /* tell the searcher where the T2 table is */
5898         REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5899
5900         wb_write[0] = U64_LO(bp->t2_mapping);
5901         wb_write[1] = U64_HI(bp->t2_mapping);
5902         REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5903         wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5904         wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5905         REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5906
5907         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5908         /* Port SRCH comes here */
5909 #endif
5910         /* Port CDU comes here */
5911         /* Port CFC comes here */
5912
5913         if (CHIP_IS_E1(bp)) {
5914                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5915                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5916         }
5917         bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5918                              port ? HC_PORT1_END : HC_PORT0_END);
5919
5920         bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5921                                     MISC_AEU_PORT0_START,
5922                              port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5923         /* init aeu_mask_attn_func_0/1:
5924          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5925          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5926          *             bits 4-7 are used for "per vn group attention" */
5927         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5928                (IS_E1HMF(bp) ? 0xF7 : 0x7));
5929
5930         /* Port PXPCS comes here */
5931         /* Port EMAC0 comes here */
5932         /* Port EMAC1 comes here */
5933         /* Port DBU comes here */
5934         /* Port DBG comes here */
5935
5936         bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5937                              port ? NIG_PORT1_END : NIG_PORT0_END);
5938
5939         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5940
5941         if (CHIP_IS_E1H(bp)) {
5942                 /* 0x2 disable e1hov, 0x1 enable */
5943                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5944                        (IS_E1HMF(bp) ? 0x1 : 0x2));
5945
5946                 /* support pause requests from USDM, TSDM and BRB */
5947                 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
5948
5949                 {
5950                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5951                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5952                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5953                 }
5954         }
5955
5956         /* Port MCP comes here */
5957         /* Port DMAE comes here */
5958
5959         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5960         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5961                 {
5962                 u32 swap_val, swap_override, aeu_gpio_mask, offset;
5963
5964                 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5965                                MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
5966
5967                 /* The GPIO should be swapped if the swap register is
5968                    set and active */
5969                 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5970                 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5971
5972                 /* Select function upon port-swap configuration */
5973                 if (port == 0) {
5974                         offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
5975                         aeu_gpio_mask = (swap_val && swap_override) ?
5976                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
5977                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
5978                 } else {
5979                         offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
5980                         aeu_gpio_mask = (swap_val && swap_override) ?
5981                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
5982                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
5983                 }
5984                 val = REG_RD(bp, offset);
5985                 /* add GPIO3 to group */
5986                 val |= aeu_gpio_mask;
5987                 REG_WR(bp, offset, val);
5988                 }
5989                 break;
5990
5991         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5992                 /* add SPIO 5 to group 0 */
5993                 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5994                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5995                 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5996                 break;
5997
5998         default:
5999                 break;
6000         }
6001
6002         bnx2x__link_reset(bp);
6003
6004         return 0;
6005 }
6006
6007 #define ILT_PER_FUNC            (768/2)
6008 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
6009 /* the phys address is shifted right 12 bits and has an added
6010    1=valid bit added to the 53rd bit
6011    then since this is a wide register(TM)
6012    we split it into two 32 bit writes
6013  */
6014 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6015 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
6016 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
6017 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
6018
6019 #define CNIC_ILT_LINES          0
6020
6021 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6022 {
6023         int reg;
6024
6025         if (CHIP_IS_E1H(bp))
6026                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6027         else /* E1 */
6028                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6029
6030         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6031 }
6032
6033 static int bnx2x_init_func(struct bnx2x *bp)
6034 {
6035         int port = BP_PORT(bp);
6036         int func = BP_FUNC(bp);
6037         u32 addr, val;
6038         int i;
6039
6040         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
6041
6042         /* set MSI reconfigure capability */
6043         addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6044         val = REG_RD(bp, addr);
6045         val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6046         REG_WR(bp, addr, val);
6047
6048         i = FUNC_ILT_BASE(func);
6049
6050         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6051         if (CHIP_IS_E1H(bp)) {
6052                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6053                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6054         } else /* E1 */
6055                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6056                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6057
6058
6059         if (CHIP_IS_E1H(bp)) {
6060                 for (i = 0; i < 9; i++)
6061                         bnx2x_init_block(bp,
6062                                          cm_start[func][i], cm_end[func][i]);
6063
6064                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6065                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6066         }
6067
6068         /* HC init per function */
6069         if (CHIP_IS_E1H(bp)) {
6070                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6071
6072                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6073                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6074         }
6075         bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
6076
6077         /* Reset PCIE errors for debug */
6078         REG_WR(bp, 0x2114, 0xffffffff);
6079         REG_WR(bp, 0x2120, 0xffffffff);
6080
6081         return 0;
6082 }
6083
6084 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6085 {
6086         int i, rc = 0;
6087
6088         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
6089            BP_FUNC(bp), load_code);
6090
6091         bp->dmae_ready = 0;
6092         mutex_init(&bp->dmae_mutex);
6093         bnx2x_gunzip_init(bp);
6094
6095         switch (load_code) {
6096         case FW_MSG_CODE_DRV_LOAD_COMMON:
6097                 rc = bnx2x_init_common(bp);
6098                 if (rc)
6099                         goto init_hw_err;
6100                 /* no break */
6101
6102         case FW_MSG_CODE_DRV_LOAD_PORT:
6103                 bp->dmae_ready = 1;
6104                 rc = bnx2x_init_port(bp);
6105                 if (rc)
6106                         goto init_hw_err;
6107                 /* no break */
6108
6109         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6110                 bp->dmae_ready = 1;
6111                 rc = bnx2x_init_func(bp);
6112                 if (rc)
6113                         goto init_hw_err;
6114                 break;
6115
6116         default:
6117                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6118                 break;
6119         }
6120
6121         if (!BP_NOMCP(bp)) {
6122                 int func = BP_FUNC(bp);
6123
6124                 bp->fw_drv_pulse_wr_seq =
6125                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6126                                  DRV_PULSE_SEQ_MASK);
6127                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6128                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x  func_stx 0x%x\n",
6129                    bp->fw_drv_pulse_wr_seq, bp->func_stx);
6130         } else
6131                 bp->func_stx = 0;
6132
6133         /* this needs to be done before gunzip end */
6134         bnx2x_zero_def_sb(bp);
6135         for_each_queue(bp, i)
6136                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6137
6138 init_hw_err:
6139         bnx2x_gunzip_end(bp);
6140
6141         return rc;
6142 }
6143
6144 /* send the MCP a request, block until there is a reply */
6145 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6146 {
6147         int func = BP_FUNC(bp);
6148         u32 seq = ++bp->fw_seq;
6149         u32 rc = 0;
6150         u32 cnt = 1;
6151         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
6152
6153         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
6154         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
6155
6156         do {
6157                 /* let the FW do it's magic ... */
6158                 msleep(delay);
6159
6160                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
6161
6162                 /* Give the FW up to 2 second (200*10ms) */
6163         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6164
6165         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6166            cnt*delay, rc, seq);
6167
6168         /* is this a reply to our command? */
6169         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6170                 rc &= FW_MSG_CODE_MASK;
6171
6172         } else {
6173                 /* FW BUG! */
6174                 BNX2X_ERR("FW failed to respond!\n");
6175                 bnx2x_fw_dump(bp);
6176                 rc = 0;
6177         }
6178
6179         return rc;
6180 }
6181
6182 static void bnx2x_free_mem(struct bnx2x *bp)
6183 {
6184
6185 #define BNX2X_PCI_FREE(x, y, size) \
6186         do { \
6187                 if (x) { \
6188                         pci_free_consistent(bp->pdev, size, x, y); \
6189                         x = NULL; \
6190                         y = 0; \
6191                 } \
6192         } while (0)
6193
6194 #define BNX2X_FREE(x) \
6195         do { \
6196                 if (x) { \
6197                         vfree(x); \
6198                         x = NULL; \
6199                 } \
6200         } while (0)
6201
6202         int i;
6203
6204         /* fastpath */
6205         /* Common */
6206         for_each_queue(bp, i) {
6207
6208                 /* status blocks */
6209                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6210                                bnx2x_fp(bp, i, status_blk_mapping),
6211                                sizeof(struct host_status_block) +
6212                                sizeof(struct eth_tx_db_data));
6213         }
6214         /* Rx */
6215         for_each_rx_queue(bp, i) {
6216
6217                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6218                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6219                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6220                                bnx2x_fp(bp, i, rx_desc_mapping),
6221                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
6222
6223                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6224                                bnx2x_fp(bp, i, rx_comp_mapping),
6225                                sizeof(struct eth_fast_path_rx_cqe) *
6226                                NUM_RCQ_BD);
6227
6228                 /* SGE ring */
6229                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6230                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6231                                bnx2x_fp(bp, i, rx_sge_mapping),
6232                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6233         }
6234         /* Tx */
6235         for_each_tx_queue(bp, i) {
6236
6237                 /* fastpath tx rings: tx_buf tx_desc */
6238                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6239                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6240                                bnx2x_fp(bp, i, tx_desc_mapping),
6241                                sizeof(struct eth_tx_bd) * NUM_TX_BD);
6242         }
6243         /* end of fastpath */
6244
6245         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6246                        sizeof(struct host_def_status_block));
6247
6248         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6249                        sizeof(struct bnx2x_slowpath));
6250
6251 #ifdef BCM_ISCSI
6252         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6253         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6254         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6255         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6256 #endif
6257         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6258
6259 #undef BNX2X_PCI_FREE
6260 #undef BNX2X_KFREE
6261 }
6262
6263 static int bnx2x_alloc_mem(struct bnx2x *bp)
6264 {
6265
6266 #define BNX2X_PCI_ALLOC(x, y, size) \
6267         do { \
6268                 x = pci_alloc_consistent(bp->pdev, size, y); \
6269                 if (x == NULL) \
6270                         goto alloc_mem_err; \
6271                 memset(x, 0, size); \
6272         } while (0)
6273
6274 #define BNX2X_ALLOC(x, size) \
6275         do { \
6276                 x = vmalloc(size); \
6277                 if (x == NULL) \
6278                         goto alloc_mem_err; \
6279                 memset(x, 0, size); \
6280         } while (0)
6281
6282         int i;
6283
6284         /* fastpath */
6285         /* Common */
6286         for_each_queue(bp, i) {
6287                 bnx2x_fp(bp, i, bp) = bp;
6288
6289                 /* status blocks */
6290                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6291                                 &bnx2x_fp(bp, i, status_blk_mapping),
6292                                 sizeof(struct host_status_block) +
6293                                 sizeof(struct eth_tx_db_data));
6294         }
6295         /* Rx */
6296         for_each_rx_queue(bp, i) {
6297
6298                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6299                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6300                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6301                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6302                                 &bnx2x_fp(bp, i, rx_desc_mapping),
6303                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6304
6305                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6306                                 &bnx2x_fp(bp, i, rx_comp_mapping),
6307                                 sizeof(struct eth_fast_path_rx_cqe) *
6308                                 NUM_RCQ_BD);
6309
6310                 /* SGE ring */
6311                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6312                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6313                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6314                                 &bnx2x_fp(bp, i, rx_sge_mapping),
6315                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6316         }
6317         /* Tx */
6318         for_each_tx_queue(bp, i) {
6319
6320                 bnx2x_fp(bp, i, hw_tx_prods) =
6321                                 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6322
6323                 bnx2x_fp(bp, i, tx_prods_mapping) =
6324                                 bnx2x_fp(bp, i, status_blk_mapping) +
6325                                 sizeof(struct host_status_block);
6326
6327                 /* fastpath tx rings: tx_buf tx_desc */
6328                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6329                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6330                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6331                                 &bnx2x_fp(bp, i, tx_desc_mapping),
6332                                 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6333         }
6334         /* end of fastpath */
6335
6336         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6337                         sizeof(struct host_def_status_block));
6338
6339         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6340                         sizeof(struct bnx2x_slowpath));
6341
6342 #ifdef BCM_ISCSI
6343         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6344
6345         /* Initialize T1 */
6346         for (i = 0; i < 64*1024; i += 64) {
6347                 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6348                 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6349         }
6350
6351         /* allocate searcher T2 table
6352            we allocate 1/4 of alloc num for T2
6353           (which is not entered into the ILT) */
6354         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6355
6356         /* Initialize T2 */
6357         for (i = 0; i < 16*1024; i += 64)
6358                 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6359
6360         /* now fixup the last line in the block to point to the next block */
6361         *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6362
6363         /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6364         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6365
6366         /* QM queues (128*MAX_CONN) */
6367         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6368 #endif
6369
6370         /* Slow path ring */
6371         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6372
6373         return 0;
6374
6375 alloc_mem_err:
6376         bnx2x_free_mem(bp);
6377         return -ENOMEM;
6378
6379 #undef BNX2X_PCI_ALLOC
6380 #undef BNX2X_ALLOC
6381 }
6382
6383 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6384 {
6385         int i;
6386
6387         for_each_tx_queue(bp, i) {
6388                 struct bnx2x_fastpath *fp = &bp->fp[i];
6389
6390                 u16 bd_cons = fp->tx_bd_cons;
6391                 u16 sw_prod = fp->tx_pkt_prod;
6392                 u16 sw_cons = fp->tx_pkt_cons;
6393
6394                 while (sw_cons != sw_prod) {
6395                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6396                         sw_cons++;
6397                 }
6398         }
6399 }
6400
6401 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6402 {
6403         int i, j;
6404
6405         for_each_rx_queue(bp, j) {
6406                 struct bnx2x_fastpath *fp = &bp->fp[j];
6407
6408                 for (i = 0; i < NUM_RX_BD; i++) {
6409                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6410                         struct sk_buff *skb = rx_buf->skb;
6411
6412                         if (skb == NULL)
6413                                 continue;
6414
6415                         pci_unmap_single(bp->pdev,
6416                                          pci_unmap_addr(rx_buf, mapping),
6417                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6418
6419                         rx_buf->skb = NULL;
6420                         dev_kfree_skb(skb);
6421                 }
6422                 if (!fp->disable_tpa)
6423                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6424                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
6425                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
6426         }
6427 }
6428
6429 static void bnx2x_free_skbs(struct bnx2x *bp)
6430 {
6431         bnx2x_free_tx_skbs(bp);
6432         bnx2x_free_rx_skbs(bp);
6433 }
6434
6435 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6436 {
6437         int i, offset = 1;
6438
6439         free_irq(bp->msix_table[0].vector, bp->dev);
6440         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6441            bp->msix_table[0].vector);
6442
6443         for_each_queue(bp, i) {
6444                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
6445                    "state %x\n", i, bp->msix_table[i + offset].vector,
6446                    bnx2x_fp(bp, i, state));
6447
6448                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6449         }
6450 }
6451
6452 static void bnx2x_free_irq(struct bnx2x *bp)
6453 {
6454         if (bp->flags & USING_MSIX_FLAG) {
6455                 bnx2x_free_msix_irqs(bp);
6456                 pci_disable_msix(bp->pdev);
6457                 bp->flags &= ~USING_MSIX_FLAG;
6458
6459         } else if (bp->flags & USING_MSI_FLAG) {
6460                 free_irq(bp->pdev->irq, bp->dev);
6461                 pci_disable_msi(bp->pdev);
6462                 bp->flags &= ~USING_MSI_FLAG;
6463
6464         } else
6465                 free_irq(bp->pdev->irq, bp->dev);
6466 }
6467
6468 static int bnx2x_enable_msix(struct bnx2x *bp)
6469 {
6470         int i, rc, offset = 1;
6471         int igu_vec = 0;
6472
6473         bp->msix_table[0].entry = igu_vec;
6474         DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6475
6476         for_each_queue(bp, i) {
6477                 igu_vec = BP_L_ID(bp) + offset + i;
6478                 bp->msix_table[i + offset].entry = igu_vec;
6479                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6480                    "(fastpath #%u)\n", i + offset, igu_vec, i);
6481         }
6482
6483         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6484                              BNX2X_NUM_QUEUES(bp) + offset);
6485         if (rc) {
6486                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
6487                 return rc;
6488         }
6489
6490         bp->flags |= USING_MSIX_FLAG;
6491
6492         return 0;
6493 }
6494
6495 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6496 {
6497         int i, rc, offset = 1;
6498
6499         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6500                          bp->dev->name, bp->dev);
6501         if (rc) {
6502                 BNX2X_ERR("request sp irq failed\n");
6503                 return -EBUSY;
6504         }
6505
6506         for_each_queue(bp, i) {
6507                 struct bnx2x_fastpath *fp = &bp->fp[i];
6508
6509                 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
6510                 rc = request_irq(bp->msix_table[i + offset].vector,
6511                                  bnx2x_msix_fp_int, 0, fp->name, fp);
6512                 if (rc) {
6513                         BNX2X_ERR("request fp #%d irq failed  rc %d\n", i, rc);
6514                         bnx2x_free_msix_irqs(bp);
6515                         return -EBUSY;
6516                 }
6517
6518                 fp->state = BNX2X_FP_STATE_IRQ;
6519         }
6520
6521         i = BNX2X_NUM_QUEUES(bp);
6522         if (is_multi(bp))
6523                 printk(KERN_INFO PFX
6524                        "%s: using MSI-X  IRQs: sp %d  fp %d - %d\n",
6525                        bp->dev->name, bp->msix_table[0].vector,
6526                        bp->msix_table[offset].vector,
6527                        bp->msix_table[offset + i - 1].vector);
6528         else
6529                 printk(KERN_INFO PFX "%s: using MSI-X  IRQs: sp %d  fp %d\n",
6530                        bp->dev->name, bp->msix_table[0].vector,
6531                        bp->msix_table[offset + i - 1].vector);
6532
6533         return 0;
6534 }
6535
6536 static int bnx2x_enable_msi(struct bnx2x *bp)
6537 {
6538         int rc;
6539
6540         rc = pci_enable_msi(bp->pdev);
6541         if (rc) {
6542                 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6543                 return -1;
6544         }
6545         bp->flags |= USING_MSI_FLAG;
6546
6547         return 0;
6548 }
6549
6550 static int bnx2x_req_irq(struct bnx2x *bp)
6551 {
6552         unsigned long flags;
6553         int rc;
6554
6555         if (bp->flags & USING_MSI_FLAG)
6556                 flags = 0;
6557         else
6558                 flags = IRQF_SHARED;
6559
6560         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
6561                          bp->dev->name, bp->dev);
6562         if (!rc)
6563                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6564
6565         return rc;
6566 }
6567
6568 static void bnx2x_napi_enable(struct bnx2x *bp)
6569 {
6570         int i;
6571
6572         for_each_rx_queue(bp, i)
6573                 napi_enable(&bnx2x_fp(bp, i, napi));
6574 }
6575
6576 static void bnx2x_napi_disable(struct bnx2x *bp)
6577 {
6578         int i;
6579
6580         for_each_rx_queue(bp, i)
6581                 napi_disable(&bnx2x_fp(bp, i, napi));
6582 }
6583
6584 static void bnx2x_netif_start(struct bnx2x *bp)
6585 {
6586         if (atomic_dec_and_test(&bp->intr_sem)) {
6587                 if (netif_running(bp->dev)) {
6588                         bnx2x_napi_enable(bp);
6589                         bnx2x_int_enable(bp);
6590                         if (bp->state == BNX2X_STATE_OPEN)
6591                                 netif_tx_wake_all_queues(bp->dev);
6592                 }
6593         }
6594 }
6595
6596 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6597 {
6598         bnx2x_int_disable_sync(bp, disable_hw);
6599         bnx2x_napi_disable(bp);
6600         if (netif_running(bp->dev)) {
6601                 netif_tx_disable(bp->dev);
6602                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6603         }
6604 }
6605
6606 /*
6607  * Init service functions
6608  */
6609
6610 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6611 {
6612         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6613         int port = BP_PORT(bp);
6614
6615         /* CAM allocation
6616          * unicasts 0-31:port0 32-63:port1
6617          * multicast 64-127:port0 128-191:port1
6618          */
6619         config->hdr.length = 2;
6620         config->hdr.offset = port ? 32 : 0;
6621         config->hdr.client_id = bp->fp->cl_id;
6622         config->hdr.reserved1 = 0;
6623
6624         /* primary MAC */
6625         config->config_table[0].cam_entry.msb_mac_addr =
6626                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6627         config->config_table[0].cam_entry.middle_mac_addr =
6628                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6629         config->config_table[0].cam_entry.lsb_mac_addr =
6630                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6631         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6632         if (set)
6633                 config->config_table[0].target_table_entry.flags = 0;
6634         else
6635                 CAM_INVALIDATE(config->config_table[0]);
6636         config->config_table[0].target_table_entry.client_id = 0;
6637         config->config_table[0].target_table_entry.vlan_id = 0;
6638
6639         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6640            (set ? "setting" : "clearing"),
6641            config->config_table[0].cam_entry.msb_mac_addr,
6642            config->config_table[0].cam_entry.middle_mac_addr,
6643            config->config_table[0].cam_entry.lsb_mac_addr);
6644
6645         /* broadcast */
6646         config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
6647         config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
6648         config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
6649         config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6650         if (set)
6651                 config->config_table[1].target_table_entry.flags =
6652                                 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6653         else
6654                 CAM_INVALIDATE(config->config_table[1]);
6655         config->config_table[1].target_table_entry.client_id = 0;
6656         config->config_table[1].target_table_entry.vlan_id = 0;
6657
6658         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6659                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6660                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6661 }
6662
6663 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6664 {
6665         struct mac_configuration_cmd_e1h *config =
6666                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6667
6668         if (set && (bp->state != BNX2X_STATE_OPEN)) {
6669                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6670                 return;
6671         }
6672
6673         /* CAM allocation for E1H
6674          * unicasts: by func number
6675          * multicast: 20+FUNC*20, 20 each
6676          */
6677         config->hdr.length = 1;
6678         config->hdr.offset = BP_FUNC(bp);
6679         config->hdr.client_id = bp->fp->cl_id;
6680         config->hdr.reserved1 = 0;
6681
6682         /* primary MAC */
6683         config->config_table[0].msb_mac_addr =
6684                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6685         config->config_table[0].middle_mac_addr =
6686                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6687         config->config_table[0].lsb_mac_addr =
6688                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6689         config->config_table[0].client_id = BP_L_ID(bp);
6690         config->config_table[0].vlan_id = 0;
6691         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6692         if (set)
6693                 config->config_table[0].flags = BP_PORT(bp);
6694         else
6695                 config->config_table[0].flags =
6696                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6697
6698         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID %d\n",
6699            (set ? "setting" : "clearing"),
6700            config->config_table[0].msb_mac_addr,
6701            config->config_table[0].middle_mac_addr,
6702            config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6703
6704         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6705                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6706                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6707 }
6708
6709 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6710                              int *state_p, int poll)
6711 {
6712         /* can take a while if any port is running */
6713         int cnt = 5000;
6714
6715         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6716            poll ? "polling" : "waiting", state, idx);
6717
6718         might_sleep();
6719         while (cnt--) {
6720                 if (poll) {
6721                         bnx2x_rx_int(bp->fp, 10);
6722                         /* if index is different from 0
6723                          * the reply for some commands will
6724                          * be on the non default queue
6725                          */
6726                         if (idx)
6727                                 bnx2x_rx_int(&bp->fp[idx], 10);
6728                 }
6729
6730                 mb(); /* state is changed by bnx2x_sp_event() */
6731                 if (*state_p == state) {
6732 #ifdef BNX2X_STOP_ON_ERROR
6733                         DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
6734 #endif
6735                         return 0;
6736                 }
6737
6738                 msleep(1);
6739         }
6740
6741         /* timeout! */
6742         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6743                   poll ? "polling" : "waiting", state, idx);
6744 #ifdef BNX2X_STOP_ON_ERROR
6745         bnx2x_panic();
6746 #endif
6747
6748         return -EBUSY;
6749 }
6750
6751 static int bnx2x_setup_leading(struct bnx2x *bp)
6752 {
6753         int rc;
6754
6755         /* reset IGU state */
6756         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6757
6758         /* SETUP ramrod */
6759         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6760
6761         /* Wait for completion */
6762         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6763
6764         return rc;
6765 }
6766
6767 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6768 {
6769         struct bnx2x_fastpath *fp = &bp->fp[index];
6770
6771         /* reset IGU state */
6772         bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6773
6774         /* SETUP ramrod */
6775         fp->state = BNX2X_FP_STATE_OPENING;
6776         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6777                       fp->cl_id, 0);
6778
6779         /* Wait for completion */
6780         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6781                                  &(fp->state), 0);
6782 }
6783
6784 static int bnx2x_poll(struct napi_struct *napi, int budget);
6785
6786 static void bnx2x_set_int_mode(struct bnx2x *bp)
6787 {
6788         int num_queues;
6789
6790         switch (int_mode) {
6791         case INT_MODE_INTx:
6792         case INT_MODE_MSI:
6793                 num_queues = 1;
6794                 bp->num_rx_queues = num_queues;
6795                 bp->num_tx_queues = num_queues;
6796                 DP(NETIF_MSG_IFUP,
6797                    "set number of queues to %d\n", num_queues);
6798                 break;
6799
6800         case INT_MODE_MSIX:
6801         default:
6802                 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6803                         num_queues = min_t(u32, num_online_cpus(),
6804                                            BNX2X_MAX_QUEUES(bp));
6805                 else
6806                         num_queues = 1;
6807                 bp->num_rx_queues = num_queues;
6808                 bp->num_tx_queues = num_queues;
6809                 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6810                    "  number of tx queues to %d\n",
6811                    bp->num_rx_queues, bp->num_tx_queues);
6812                 /* if we can't use MSI-X we only need one fp,
6813                  * so try to enable MSI-X with the requested number of fp's
6814                  * and fallback to MSI or legacy INTx with one fp
6815                  */
6816                 if (bnx2x_enable_msix(bp)) {
6817                         /* failed to enable MSI-X */
6818                         num_queues = 1;
6819                         bp->num_rx_queues = num_queues;
6820                         bp->num_tx_queues = num_queues;
6821                         if (bp->multi_mode)
6822                                 BNX2X_ERR("Multi requested but failed to "
6823                                           "enable MSI-X  set number of "
6824                                           "queues to %d\n", num_queues);
6825                 }
6826                 break;
6827         }
6828         bp->dev->real_num_tx_queues = bp->num_tx_queues;
6829 }
6830
6831 static void bnx2x_set_rx_mode(struct net_device *dev);
6832
6833 /* must be called with rtnl_lock */
6834 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6835 {
6836         u32 load_code;
6837         int i, rc = 0;
6838 #ifdef BNX2X_STOP_ON_ERROR
6839         DP(NETIF_MSG_IFUP, "enter  load_mode %d\n", load_mode);
6840         if (unlikely(bp->panic))
6841                 return -EPERM;
6842 #endif
6843
6844         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6845
6846         bnx2x_set_int_mode(bp);
6847
6848         if (bnx2x_alloc_mem(bp))
6849                 return -ENOMEM;
6850
6851         for_each_rx_queue(bp, i)
6852                 bnx2x_fp(bp, i, disable_tpa) =
6853                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
6854
6855         for_each_rx_queue(bp, i)
6856                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6857                                bnx2x_poll, 128);
6858
6859 #ifdef BNX2X_STOP_ON_ERROR
6860         for_each_rx_queue(bp, i) {
6861                 struct bnx2x_fastpath *fp = &bp->fp[i];
6862
6863                 fp->poll_no_work = 0;
6864                 fp->poll_calls = 0;
6865                 fp->poll_max_calls = 0;
6866                 fp->poll_complete = 0;
6867                 fp->poll_exit = 0;
6868         }
6869 #endif
6870         bnx2x_napi_enable(bp);
6871
6872         if (bp->flags & USING_MSIX_FLAG) {
6873                 rc = bnx2x_req_msix_irqs(bp);
6874                 if (rc) {
6875                         pci_disable_msix(bp->pdev);
6876                         goto load_error1;
6877                 }
6878         } else {
6879                 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6880                         bnx2x_enable_msi(bp);
6881                 bnx2x_ack_int(bp);
6882                 rc = bnx2x_req_irq(bp);
6883                 if (rc) {
6884                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
6885                         if (bp->flags & USING_MSI_FLAG)
6886                                 pci_disable_msi(bp->pdev);
6887                         goto load_error1;
6888                 }
6889                 if (bp->flags & USING_MSI_FLAG) {
6890                         bp->dev->irq = bp->pdev->irq;
6891                         printk(KERN_INFO PFX "%s: using MSI  IRQ %d\n",
6892                                bp->dev->name, bp->pdev->irq);
6893                 }
6894         }
6895
6896         /* Send LOAD_REQUEST command to MCP
6897            Returns the type of LOAD command:
6898            if it is the first port to be initialized
6899            common blocks should be initialized, otherwise - not
6900         */
6901         if (!BP_NOMCP(bp)) {
6902                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6903                 if (!load_code) {
6904                         BNX2X_ERR("MCP response failure, aborting\n");
6905                         rc = -EBUSY;
6906                         goto load_error2;
6907                 }
6908                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6909                         rc = -EBUSY; /* other port in diagnostic mode */
6910                         goto load_error2;
6911                 }
6912
6913         } else {
6914                 int port = BP_PORT(bp);
6915
6916                 DP(NETIF_MSG_IFUP, "NO MCP - load counts      %d, %d, %d\n",
6917                    load_count[0], load_count[1], load_count[2]);
6918                 load_count[0]++;
6919                 load_count[1 + port]++;
6920                 DP(NETIF_MSG_IFUP, "NO MCP - new load counts  %d, %d, %d\n",
6921                    load_count[0], load_count[1], load_count[2]);
6922                 if (load_count[0] == 1)
6923                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6924                 else if (load_count[1 + port] == 1)
6925                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6926                 else
6927                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6928         }
6929
6930         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6931             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6932                 bp->port.pmf = 1;
6933         else
6934                 bp->port.pmf = 0;
6935         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6936
6937         /* Initialize HW */
6938         rc = bnx2x_init_hw(bp, load_code);
6939         if (rc) {
6940                 BNX2X_ERR("HW init failed, aborting\n");
6941                 goto load_error2;
6942         }
6943
6944         /* Setup NIC internals and enable interrupts */
6945         bnx2x_nic_init(bp, load_code);
6946
6947         /* Send LOAD_DONE command to MCP */
6948         if (!BP_NOMCP(bp)) {
6949                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6950                 if (!load_code) {
6951                         BNX2X_ERR("MCP response failure, aborting\n");
6952                         rc = -EBUSY;
6953                         goto load_error3;
6954                 }
6955         }
6956
6957         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6958
6959         rc = bnx2x_setup_leading(bp);
6960         if (rc) {
6961                 BNX2X_ERR("Setup leading failed!\n");
6962                 goto load_error3;
6963         }
6964
6965         if (CHIP_IS_E1H(bp))
6966                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6967                         DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
6968                         bp->state = BNX2X_STATE_DISABLED;
6969                 }
6970
6971         if (bp->state == BNX2X_STATE_OPEN)
6972                 for_each_nondefault_queue(bp, i) {
6973                         rc = bnx2x_setup_multi(bp, i);
6974                         if (rc)
6975                                 goto load_error3;
6976                 }
6977
6978         if (CHIP_IS_E1(bp))
6979                 bnx2x_set_mac_addr_e1(bp, 1);
6980         else
6981                 bnx2x_set_mac_addr_e1h(bp, 1);
6982
6983         if (bp->port.pmf)
6984                 bnx2x_initial_phy_init(bp, load_mode);
6985
6986         /* Start fast path */
6987         switch (load_mode) {
6988         case LOAD_NORMAL:
6989                 /* Tx queue should be only reenabled */
6990                 netif_tx_wake_all_queues(bp->dev);
6991                 /* Initialize the receive filter. */
6992                 bnx2x_set_rx_mode(bp->dev);
6993                 break;
6994
6995         case LOAD_OPEN:
6996                 netif_tx_start_all_queues(bp->dev);
6997                 /* Initialize the receive filter. */
6998                 bnx2x_set_rx_mode(bp->dev);
6999                 break;
7000
7001         case LOAD_DIAG:
7002                 /* Initialize the receive filter. */
7003                 bnx2x_set_rx_mode(bp->dev);
7004                 bp->state = BNX2X_STATE_DIAG;
7005                 break;
7006
7007         default:
7008                 break;
7009         }
7010
7011         if (!bp->port.pmf)
7012                 bnx2x__link_status_update(bp);
7013
7014         /* start the timer */
7015         mod_timer(&bp->timer, jiffies + bp->current_interval);
7016
7017
7018         return 0;
7019
7020 load_error3:
7021         bnx2x_int_disable_sync(bp, 1);
7022         if (!BP_NOMCP(bp)) {
7023                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7024                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7025         }
7026         bp->port.pmf = 0;
7027         /* Free SKBs, SGEs, TPA pool and driver internals */
7028         bnx2x_free_skbs(bp);
7029         for_each_rx_queue(bp, i)
7030                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7031 load_error2:
7032         /* Release IRQs */
7033         bnx2x_free_irq(bp);
7034 load_error1:
7035         bnx2x_napi_disable(bp);
7036         for_each_rx_queue(bp, i)
7037                 netif_napi_del(&bnx2x_fp(bp, i, napi));
7038         bnx2x_free_mem(bp);
7039
7040         return rc;
7041 }
7042
7043 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7044 {
7045         struct bnx2x_fastpath *fp = &bp->fp[index];
7046         int rc;
7047
7048         /* halt the connection */
7049         fp->state = BNX2X_FP_STATE_HALTING;
7050         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7051
7052         /* Wait for completion */
7053         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7054                                &(fp->state), 1);
7055         if (rc) /* timeout */
7056                 return rc;
7057
7058         /* delete cfc entry */
7059         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7060
7061         /* Wait for completion */
7062         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7063                                &(fp->state), 1);
7064         return rc;
7065 }
7066
7067 static int bnx2x_stop_leading(struct bnx2x *bp)
7068 {
7069         __le16 dsb_sp_prod_idx;
7070         /* if the other port is handling traffic,
7071            this can take a lot of time */
7072         int cnt = 500;
7073         int rc;
7074
7075         might_sleep();
7076
7077         /* Send HALT ramrod */
7078         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7079         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7080
7081         /* Wait for completion */
7082         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7083                                &(bp->fp[0].state), 1);
7084         if (rc) /* timeout */
7085                 return rc;
7086
7087         dsb_sp_prod_idx = *bp->dsb_sp_prod;
7088
7089         /* Send PORT_DELETE ramrod */
7090         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7091
7092         /* Wait for completion to arrive on default status block
7093            we are going to reset the chip anyway
7094            so there is not much to do if this times out
7095          */
7096         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7097                 if (!cnt) {
7098                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7099                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7100                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
7101 #ifdef BNX2X_STOP_ON_ERROR
7102                         bnx2x_panic();
7103 #endif
7104                         rc = -EBUSY;
7105                         break;
7106                 }
7107                 cnt--;
7108                 msleep(1);
7109                 rmb(); /* Refresh the dsb_sp_prod */
7110         }
7111         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7112         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7113
7114         return rc;
7115 }
7116
7117 static void bnx2x_reset_func(struct bnx2x *bp)
7118 {
7119         int port = BP_PORT(bp);
7120         int func = BP_FUNC(bp);
7121         int base, i;
7122
7123         /* Configure IGU */
7124         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7125         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7126
7127         /* Clear ILT */
7128         base = FUNC_ILT_BASE(func);
7129         for (i = base; i < base + ILT_PER_FUNC; i++)
7130                 bnx2x_ilt_wr(bp, i, 0);
7131 }
7132
7133 static void bnx2x_reset_port(struct bnx2x *bp)
7134 {
7135         int port = BP_PORT(bp);
7136         u32 val;
7137
7138         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7139
7140         /* Do not rcv packets to BRB */
7141         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7142         /* Do not direct rcv packets that are not for MCP to the BRB */
7143         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7144                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7145
7146         /* Configure AEU */
7147         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7148
7149         msleep(100);
7150         /* Check for BRB port occupancy */
7151         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7152         if (val)
7153                 DP(NETIF_MSG_IFDOWN,
7154                    "BRB1 is not empty  %d blocks are occupied\n", val);
7155
7156         /* TODO: Close Doorbell port? */
7157 }
7158
7159 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7160 {
7161         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
7162            BP_FUNC(bp), reset_code);
7163
7164         switch (reset_code) {
7165         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7166                 bnx2x_reset_port(bp);
7167                 bnx2x_reset_func(bp);
7168                 bnx2x_reset_common(bp);
7169                 break;
7170
7171         case FW_MSG_CODE_DRV_UNLOAD_PORT:
7172                 bnx2x_reset_port(bp);
7173                 bnx2x_reset_func(bp);
7174                 break;
7175
7176         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7177                 bnx2x_reset_func(bp);
7178                 break;
7179
7180         default:
7181                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7182                 break;
7183         }
7184 }
7185
7186 /* must be called with rtnl_lock */
7187 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7188 {
7189         int port = BP_PORT(bp);
7190         u32 reset_code = 0;
7191         int i, cnt, rc;
7192
7193         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7194
7195         bp->rx_mode = BNX2X_RX_MODE_NONE;
7196         bnx2x_set_storm_rx_mode(bp);
7197
7198         bnx2x_netif_stop(bp, 1);
7199
7200         del_timer_sync(&bp->timer);
7201         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7202                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7203         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7204
7205         /* Release IRQs */
7206         bnx2x_free_irq(bp);
7207
7208         /* Wait until tx fastpath tasks complete */
7209         for_each_tx_queue(bp, i) {
7210                 struct bnx2x_fastpath *fp = &bp->fp[i];
7211
7212                 cnt = 1000;
7213                 smp_mb();
7214                 while (bnx2x_has_tx_work_unload(fp)) {
7215
7216                         bnx2x_tx_int(fp, 1000);
7217                         if (!cnt) {
7218                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
7219                                           i);
7220 #ifdef BNX2X_STOP_ON_ERROR
7221                                 bnx2x_panic();
7222                                 return -EBUSY;
7223 #else
7224                                 break;
7225 #endif
7226                         }
7227                         cnt--;
7228                         msleep(1);
7229                         smp_mb();
7230                 }
7231         }
7232         /* Give HW time to discard old tx messages */
7233         msleep(1);
7234
7235         if (CHIP_IS_E1(bp)) {
7236                 struct mac_configuration_cmd *config =
7237                                                 bnx2x_sp(bp, mcast_config);
7238
7239                 bnx2x_set_mac_addr_e1(bp, 0);
7240
7241                 for (i = 0; i < config->hdr.length; i++)
7242                         CAM_INVALIDATE(config->config_table[i]);
7243
7244                 config->hdr.length = i;
7245                 if (CHIP_REV_IS_SLOW(bp))
7246                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7247                 else
7248                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7249                 config->hdr.client_id = bp->fp->cl_id;
7250                 config->hdr.reserved1 = 0;
7251
7252                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7253                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7254                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7255
7256         } else { /* E1H */
7257                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7258
7259                 bnx2x_set_mac_addr_e1h(bp, 0);
7260
7261                 for (i = 0; i < MC_HASH_SIZE; i++)
7262                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7263         }
7264
7265         if (unload_mode == UNLOAD_NORMAL)
7266                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7267
7268         else if (bp->flags & NO_WOL_FLAG) {
7269                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7270                 if (CHIP_IS_E1H(bp))
7271                         REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7272
7273         } else if (bp->wol) {
7274                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7275                 u8 *mac_addr = bp->dev->dev_addr;
7276                 u32 val;
7277                 /* The mac address is written to entries 1-4 to
7278                    preserve entry 0 which is used by the PMF */
7279                 u8 entry = (BP_E1HVN(bp) + 1)*8;
7280
7281                 val = (mac_addr[0] << 8) | mac_addr[1];
7282                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7283
7284                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7285                       (mac_addr[4] << 8) | mac_addr[5];
7286                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7287
7288                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7289
7290         } else
7291                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7292
7293         /* Close multi and leading connections
7294            Completions for ramrods are collected in a synchronous way */
7295         for_each_nondefault_queue(bp, i)
7296                 if (bnx2x_stop_multi(bp, i))
7297                         goto unload_error;
7298
7299         rc = bnx2x_stop_leading(bp);
7300         if (rc) {
7301                 BNX2X_ERR("Stop leading failed!\n");
7302 #ifdef BNX2X_STOP_ON_ERROR
7303                 return -EBUSY;
7304 #else
7305                 goto unload_error;
7306 #endif
7307         }
7308
7309 unload_error:
7310         if (!BP_NOMCP(bp))
7311                 reset_code = bnx2x_fw_command(bp, reset_code);
7312         else {
7313                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts      %d, %d, %d\n",
7314                    load_count[0], load_count[1], load_count[2]);
7315                 load_count[0]--;
7316                 load_count[1 + port]--;
7317                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts  %d, %d, %d\n",
7318                    load_count[0], load_count[1], load_count[2]);
7319                 if (load_count[0] == 0)
7320                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7321                 else if (load_count[1 + port] == 0)
7322                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7323                 else
7324                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7325         }
7326
7327         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7328             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7329                 bnx2x__link_reset(bp);
7330
7331         /* Reset the chip */
7332         bnx2x_reset_chip(bp, reset_code);
7333
7334         /* Report UNLOAD_DONE to MCP */
7335         if (!BP_NOMCP(bp))
7336                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7337
7338         bp->port.pmf = 0;
7339
7340         /* Free SKBs, SGEs, TPA pool and driver internals */
7341         bnx2x_free_skbs(bp);
7342         for_each_rx_queue(bp, i)
7343                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7344         for_each_rx_queue(bp, i)
7345                 netif_napi_del(&bnx2x_fp(bp, i, napi));
7346         bnx2x_free_mem(bp);
7347
7348         bp->state = BNX2X_STATE_CLOSED;
7349
7350         netif_carrier_off(bp->dev);
7351
7352         return 0;
7353 }
7354
7355 static void bnx2x_reset_task(struct work_struct *work)
7356 {
7357         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7358
7359 #ifdef BNX2X_STOP_ON_ERROR
7360         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7361                   " so reset not done to allow debug dump,\n"
7362          KERN_ERR " you will need to reboot when done\n");
7363         return;
7364 #endif
7365
7366         rtnl_lock();
7367
7368         if (!netif_running(bp->dev))
7369                 goto reset_task_exit;
7370
7371         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7372         bnx2x_nic_load(bp, LOAD_NORMAL);
7373
7374 reset_task_exit:
7375         rtnl_unlock();
7376 }
7377
7378 /* end of nic load/unload */
7379
7380 /* ethtool_ops */
7381
7382 /*
7383  * Init service functions
7384  */
7385
7386 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7387 {
7388         switch (func) {
7389         case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7390         case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7391         case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7392         case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7393         case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7394         case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7395         case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7396         case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7397         default:
7398                 BNX2X_ERR("Unsupported function index: %d\n", func);
7399                 return (u32)(-1);
7400         }
7401 }
7402
7403 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7404 {
7405         u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7406
7407         /* Flush all outstanding writes */
7408         mmiowb();
7409
7410         /* Pretend to be function 0 */
7411         REG_WR(bp, reg, 0);
7412         /* Flush the GRC transaction (in the chip) */
7413         new_val = REG_RD(bp, reg);
7414         if (new_val != 0) {
7415                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7416                           new_val);
7417                 BUG();
7418         }
7419
7420         /* From now we are in the "like-E1" mode */
7421         bnx2x_int_disable(bp);
7422
7423         /* Flush all outstanding writes */
7424         mmiowb();
7425
7426         /* Restore the original funtion settings */
7427         REG_WR(bp, reg, orig_func);
7428         new_val = REG_RD(bp, reg);
7429         if (new_val != orig_func) {
7430                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7431                           orig_func, new_val);
7432                 BUG();
7433         }
7434 }
7435
7436 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7437 {
7438         if (CHIP_IS_E1H(bp))
7439                 bnx2x_undi_int_disable_e1h(bp, func);
7440         else
7441                 bnx2x_int_disable(bp);
7442 }
7443
7444 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7445 {
7446         u32 val;
7447
7448         /* Check if there is any driver already loaded */
7449         val = REG_RD(bp, MISC_REG_UNPREPARED);
7450         if (val == 0x1) {
7451                 /* Check if it is the UNDI driver
7452                  * UNDI driver initializes CID offset for normal bell to 0x7
7453                  */
7454                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7455                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7456                 if (val == 0x7) {
7457                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7458                         /* save our func */
7459                         int func = BP_FUNC(bp);
7460                         u32 swap_en;
7461                         u32 swap_val;
7462
7463                         /* clear the UNDI indication */
7464                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7465
7466                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
7467
7468                         /* try unload UNDI on port 0 */
7469                         bp->func = 0;
7470                         bp->fw_seq =
7471                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7472                                 DRV_MSG_SEQ_NUMBER_MASK);
7473                         reset_code = bnx2x_fw_command(bp, reset_code);
7474
7475                         /* if UNDI is loaded on the other port */
7476                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7477
7478                                 /* send "DONE" for previous unload */
7479                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7480
7481                                 /* unload UNDI on port 1 */
7482                                 bp->func = 1;
7483                                 bp->fw_seq =
7484                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7485                                         DRV_MSG_SEQ_NUMBER_MASK);
7486                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7487
7488                                 bnx2x_fw_command(bp, reset_code);
7489                         }
7490
7491                         /* now it's safe to release the lock */
7492                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7493
7494                         bnx2x_undi_int_disable(bp, func);
7495
7496                         /* close input traffic and wait for it */
7497                         /* Do not rcv packets to BRB */
7498                         REG_WR(bp,
7499                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7500                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7501                         /* Do not direct rcv packets that are not for MCP to
7502                          * the BRB */
7503                         REG_WR(bp,
7504                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7505                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7506                         /* clear AEU */
7507                         REG_WR(bp,
7508                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7509                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7510                         msleep(10);
7511
7512                         /* save NIG port swap info */
7513                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7514                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7515                         /* reset device */
7516                         REG_WR(bp,
7517                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7518                                0xd3ffffff);
7519                         REG_WR(bp,
7520                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7521                                0x1403);
7522                         /* take the NIG out of reset and restore swap values */
7523                         REG_WR(bp,
7524                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7525                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
7526                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7527                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7528
7529                         /* send unload done to the MCP */
7530                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7531
7532                         /* restore our func and fw_seq */
7533                         bp->func = func;
7534                         bp->fw_seq =
7535                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7536                                 DRV_MSG_SEQ_NUMBER_MASK);
7537
7538                 } else
7539                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7540         }
7541 }
7542
7543 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7544 {
7545         u32 val, val2, val3, val4, id;
7546         u16 pmc;
7547
7548         /* Get the chip revision id and number. */
7549         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7550         val = REG_RD(bp, MISC_REG_CHIP_NUM);
7551         id = ((val & 0xffff) << 16);
7552         val = REG_RD(bp, MISC_REG_CHIP_REV);
7553         id |= ((val & 0xf) << 12);
7554         val = REG_RD(bp, MISC_REG_CHIP_METAL);
7555         id |= ((val & 0xff) << 4);
7556         val = REG_RD(bp, MISC_REG_BOND_ID);
7557         id |= (val & 0xf);
7558         bp->common.chip_id = id;
7559         bp->link_params.chip_id = bp->common.chip_id;
7560         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7561
7562         val = (REG_RD(bp, 0x2874) & 0x55);
7563         if ((bp->common.chip_id & 0x1) ||
7564             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7565                 bp->flags |= ONE_PORT_FLAG;
7566                 BNX2X_DEV_INFO("single port device\n");
7567         }
7568
7569         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7570         bp->common.flash_size = (NVRAM_1MB_SIZE <<
7571                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
7572         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7573                        bp->common.flash_size, bp->common.flash_size);
7574
7575         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7576         bp->link_params.shmem_base = bp->common.shmem_base;
7577         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7578
7579         if (!bp->common.shmem_base ||
7580             (bp->common.shmem_base < 0xA0000) ||
7581             (bp->common.shmem_base >= 0xC0000)) {
7582                 BNX2X_DEV_INFO("MCP not active\n");
7583                 bp->flags |= NO_MCP_FLAG;
7584                 return;
7585         }
7586
7587         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7588         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7589                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7590                 BNX2X_ERR("BAD MCP validity signature\n");
7591
7592         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7593         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
7594
7595         bp->link_params.hw_led_mode = ((bp->common.hw_config &
7596                                         SHARED_HW_CFG_LED_MODE_MASK) >>
7597                                        SHARED_HW_CFG_LED_MODE_SHIFT);
7598
7599         bp->link_params.feature_config_flags = 0;
7600         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7601         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7602                 bp->link_params.feature_config_flags |=
7603                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7604         else
7605                 bp->link_params.feature_config_flags &=
7606                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7607
7608         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7609         bp->common.bc_ver = val;
7610         BNX2X_DEV_INFO("bc_ver %X\n", val);
7611         if (val < BNX2X_BC_VER) {
7612                 /* for now only warn
7613                  * later we might need to enforce this */
7614                 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7615                           " please upgrade BC\n", BNX2X_BC_VER, val);
7616         }
7617
7618         if (BP_E1HVN(bp) == 0) {
7619                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7620                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7621         } else {
7622                 /* no WOL capability for E1HVN != 0 */
7623                 bp->flags |= NO_WOL_FLAG;
7624         }
7625         BNX2X_DEV_INFO("%sWoL capable\n",
7626                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
7627
7628         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7629         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7630         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7631         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7632
7633         printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7634                val, val2, val3, val4);
7635 }
7636
7637 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7638                                                     u32 switch_cfg)
7639 {
7640         int port = BP_PORT(bp);
7641         u32 ext_phy_type;
7642
7643         switch (switch_cfg) {
7644         case SWITCH_CFG_1G:
7645                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7646
7647                 ext_phy_type =
7648                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7649                 switch (ext_phy_type) {
7650                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7651                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7652                                        ext_phy_type);
7653
7654                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7655                                                SUPPORTED_10baseT_Full |
7656                                                SUPPORTED_100baseT_Half |
7657                                                SUPPORTED_100baseT_Full |
7658                                                SUPPORTED_1000baseT_Full |
7659                                                SUPPORTED_2500baseX_Full |
7660                                                SUPPORTED_TP |
7661                                                SUPPORTED_FIBRE |
7662                                                SUPPORTED_Autoneg |
7663                                                SUPPORTED_Pause |
7664                                                SUPPORTED_Asym_Pause);
7665                         break;
7666
7667                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7668                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7669                                        ext_phy_type);
7670
7671                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7672                                                SUPPORTED_10baseT_Full |
7673                                                SUPPORTED_100baseT_Half |
7674                                                SUPPORTED_100baseT_Full |
7675                                                SUPPORTED_1000baseT_Full |
7676                                                SUPPORTED_TP |
7677                                                SUPPORTED_FIBRE |
7678                                                SUPPORTED_Autoneg |
7679                                                SUPPORTED_Pause |
7680                                                SUPPORTED_Asym_Pause);
7681                         break;
7682
7683                 default:
7684                         BNX2X_ERR("NVRAM config error. "
7685                                   "BAD SerDes ext_phy_config 0x%x\n",
7686                                   bp->link_params.ext_phy_config);
7687                         return;
7688                 }
7689
7690                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7691                                            port*0x10);
7692                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7693                 break;
7694
7695         case SWITCH_CFG_10G:
7696                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7697
7698                 ext_phy_type =
7699                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7700                 switch (ext_phy_type) {
7701                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7702                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7703                                        ext_phy_type);
7704
7705                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7706                                                SUPPORTED_10baseT_Full |
7707                                                SUPPORTED_100baseT_Half |
7708                                                SUPPORTED_100baseT_Full |
7709                                                SUPPORTED_1000baseT_Full |
7710                                                SUPPORTED_2500baseX_Full |
7711                                                SUPPORTED_10000baseT_Full |
7712                                                SUPPORTED_TP |
7713                                                SUPPORTED_FIBRE |
7714                                                SUPPORTED_Autoneg |
7715                                                SUPPORTED_Pause |
7716                                                SUPPORTED_Asym_Pause);
7717                         break;
7718
7719                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7720                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7721                                        ext_phy_type);
7722
7723                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7724                                                SUPPORTED_1000baseT_Full |
7725                                                SUPPORTED_FIBRE |
7726                                                SUPPORTED_Autoneg |
7727                                                SUPPORTED_Pause |
7728                                                SUPPORTED_Asym_Pause);
7729                         break;
7730
7731                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7732                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7733                                        ext_phy_type);
7734
7735                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7736                                                SUPPORTED_2500baseX_Full |
7737                                                SUPPORTED_1000baseT_Full |
7738                                                SUPPORTED_FIBRE |
7739                                                SUPPORTED_Autoneg |
7740                                                SUPPORTED_Pause |
7741                                                SUPPORTED_Asym_Pause);
7742                         break;
7743
7744                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7745                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7746                                        ext_phy_type);
7747
7748                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7749                                                SUPPORTED_FIBRE |
7750                                                SUPPORTED_Pause |
7751                                                SUPPORTED_Asym_Pause);
7752                         break;
7753
7754                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7755                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7756                                        ext_phy_type);
7757
7758                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7759                                                SUPPORTED_1000baseT_Full |
7760                                                SUPPORTED_FIBRE |
7761                                                SUPPORTED_Pause |
7762                                                SUPPORTED_Asym_Pause);
7763                         break;
7764
7765                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7766                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
7767                                        ext_phy_type);
7768
7769                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7770                                                SUPPORTED_1000baseT_Full |
7771                                                SUPPORTED_Autoneg |
7772                                                SUPPORTED_FIBRE |
7773                                                SUPPORTED_Pause |
7774                                                SUPPORTED_Asym_Pause);
7775                         break;
7776
7777                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7778                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7779                                        ext_phy_type);
7780
7781                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7782                                                SUPPORTED_TP |
7783                                                SUPPORTED_Autoneg |
7784                                                SUPPORTED_Pause |
7785                                                SUPPORTED_Asym_Pause);
7786                         break;
7787
7788                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7789                         BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7790                                        ext_phy_type);
7791
7792                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7793                                                SUPPORTED_10baseT_Full |
7794                                                SUPPORTED_100baseT_Half |
7795                                                SUPPORTED_100baseT_Full |
7796                                                SUPPORTED_1000baseT_Full |
7797                                                SUPPORTED_10000baseT_Full |
7798                                                SUPPORTED_TP |
7799                                                SUPPORTED_Autoneg |
7800                                                SUPPORTED_Pause |
7801                                                SUPPORTED_Asym_Pause);
7802                         break;
7803
7804                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7805                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7806                                   bp->link_params.ext_phy_config);
7807                         break;
7808
7809                 default:
7810                         BNX2X_ERR("NVRAM config error. "
7811                                   "BAD XGXS ext_phy_config 0x%x\n",
7812                                   bp->link_params.ext_phy_config);
7813                         return;
7814                 }
7815
7816                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7817                                            port*0x18);
7818                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7819
7820                 break;
7821
7822         default:
7823                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7824                           bp->port.link_config);
7825                 return;
7826         }
7827         bp->link_params.phy_addr = bp->port.phy_addr;
7828
7829         /* mask what we support according to speed_cap_mask */
7830         if (!(bp->link_params.speed_cap_mask &
7831                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7832                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7833
7834         if (!(bp->link_params.speed_cap_mask &
7835                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7836                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7837
7838         if (!(bp->link_params.speed_cap_mask &
7839                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7840                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7841
7842         if (!(bp->link_params.speed_cap_mask &
7843                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7844                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7845
7846         if (!(bp->link_params.speed_cap_mask &
7847                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7848                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7849                                         SUPPORTED_1000baseT_Full);
7850
7851         if (!(bp->link_params.speed_cap_mask &
7852                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7853                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7854
7855         if (!(bp->link_params.speed_cap_mask &
7856                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7857                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7858
7859         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7860 }
7861
7862 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7863 {
7864         bp->link_params.req_duplex = DUPLEX_FULL;
7865
7866         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7867         case PORT_FEATURE_LINK_SPEED_AUTO:
7868                 if (bp->port.supported & SUPPORTED_Autoneg) {
7869                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7870                         bp->port.advertising = bp->port.supported;
7871                 } else {
7872                         u32 ext_phy_type =
7873                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7874
7875                         if ((ext_phy_type ==
7876                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7877                             (ext_phy_type ==
7878                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7879                                 /* force 10G, no AN */
7880                                 bp->link_params.req_line_speed = SPEED_10000;
7881                                 bp->port.advertising =
7882                                                 (ADVERTISED_10000baseT_Full |
7883                                                  ADVERTISED_FIBRE);
7884                                 break;
7885                         }
7886                         BNX2X_ERR("NVRAM config error. "
7887                                   "Invalid link_config 0x%x"
7888                                   "  Autoneg not supported\n",
7889                                   bp->port.link_config);
7890                         return;
7891                 }
7892                 break;
7893
7894         case PORT_FEATURE_LINK_SPEED_10M_FULL:
7895                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7896                         bp->link_params.req_line_speed = SPEED_10;
7897                         bp->port.advertising = (ADVERTISED_10baseT_Full |
7898                                                 ADVERTISED_TP);
7899                 } else {
7900                         BNX2X_ERR("NVRAM config error. "
7901                                   "Invalid link_config 0x%x"
7902                                   "  speed_cap_mask 0x%x\n",
7903                                   bp->port.link_config,
7904                                   bp->link_params.speed_cap_mask);
7905                         return;
7906                 }
7907                 break;
7908
7909         case PORT_FEATURE_LINK_SPEED_10M_HALF:
7910                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7911                         bp->link_params.req_line_speed = SPEED_10;
7912                         bp->link_params.req_duplex = DUPLEX_HALF;
7913                         bp->port.advertising = (ADVERTISED_10baseT_Half |
7914                                                 ADVERTISED_TP);
7915                 } else {
7916                         BNX2X_ERR("NVRAM config error. "
7917                                   "Invalid link_config 0x%x"
7918                                   "  speed_cap_mask 0x%x\n",
7919                                   bp->port.link_config,
7920                                   bp->link_params.speed_cap_mask);
7921                         return;
7922                 }
7923                 break;
7924
7925         case PORT_FEATURE_LINK_SPEED_100M_FULL:
7926                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7927                         bp->link_params.req_line_speed = SPEED_100;
7928                         bp->port.advertising = (ADVERTISED_100baseT_Full |
7929                                                 ADVERTISED_TP);
7930                 } else {
7931                         BNX2X_ERR("NVRAM config error. "
7932                                   "Invalid link_config 0x%x"
7933                                   "  speed_cap_mask 0x%x\n",
7934                                   bp->port.link_config,
7935                                   bp->link_params.speed_cap_mask);
7936                         return;
7937                 }
7938                 break;
7939
7940         case PORT_FEATURE_LINK_SPEED_100M_HALF:
7941                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7942                         bp->link_params.req_line_speed = SPEED_100;
7943                         bp->link_params.req_duplex = DUPLEX_HALF;
7944                         bp->port.advertising = (ADVERTISED_100baseT_Half |
7945                                                 ADVERTISED_TP);
7946                 } else {
7947                         BNX2X_ERR("NVRAM config error. "
7948                                   "Invalid link_config 0x%x"
7949                                   "  speed_cap_mask 0x%x\n",
7950                                   bp->port.link_config,
7951                                   bp->link_params.speed_cap_mask);
7952                         return;
7953                 }
7954                 break;
7955
7956         case PORT_FEATURE_LINK_SPEED_1G:
7957                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7958                         bp->link_params.req_line_speed = SPEED_1000;
7959                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
7960                                                 ADVERTISED_TP);
7961                 } else {
7962                         BNX2X_ERR("NVRAM config error. "
7963                                   "Invalid link_config 0x%x"
7964                                   "  speed_cap_mask 0x%x\n",
7965                                   bp->port.link_config,
7966                                   bp->link_params.speed_cap_mask);
7967                         return;
7968                 }
7969                 break;
7970
7971         case PORT_FEATURE_LINK_SPEED_2_5G:
7972                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7973                         bp->link_params.req_line_speed = SPEED_2500;
7974                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
7975                                                 ADVERTISED_TP);
7976                 } else {
7977                         BNX2X_ERR("NVRAM config error. "
7978                                   "Invalid link_config 0x%x"
7979                                   "  speed_cap_mask 0x%x\n",
7980                                   bp->port.link_config,
7981                                   bp->link_params.speed_cap_mask);
7982                         return;
7983                 }
7984                 break;
7985
7986         case PORT_FEATURE_LINK_SPEED_10G_CX4:
7987         case PORT_FEATURE_LINK_SPEED_10G_KX4:
7988         case PORT_FEATURE_LINK_SPEED_10G_KR:
7989                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7990                         bp->link_params.req_line_speed = SPEED_10000;
7991                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
7992                                                 ADVERTISED_FIBRE);
7993                 } else {
7994                         BNX2X_ERR("NVRAM config error. "
7995                                   "Invalid link_config 0x%x"
7996                                   "  speed_cap_mask 0x%x\n",
7997                                   bp->port.link_config,
7998                                   bp->link_params.speed_cap_mask);
7999                         return;
8000                 }
8001                 break;
8002
8003         default:
8004                 BNX2X_ERR("NVRAM config error. "
8005                           "BAD link speed link_config 0x%x\n",
8006                           bp->port.link_config);
8007                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8008                 bp->port.advertising = bp->port.supported;
8009                 break;
8010         }
8011
8012         bp->link_params.req_flow_ctrl = (bp->port.link_config &
8013                                          PORT_FEATURE_FLOW_CONTROL_MASK);
8014         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8015             !(bp->port.supported & SUPPORTED_Autoneg))
8016                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8017
8018         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
8019                        "  advertising 0x%x\n",
8020                        bp->link_params.req_line_speed,
8021                        bp->link_params.req_duplex,
8022                        bp->link_params.req_flow_ctrl, bp->port.advertising);
8023 }
8024
8025 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8026 {
8027         int port = BP_PORT(bp);
8028         u32 val, val2;
8029         u32 config;
8030         u16 i;
8031
8032         bp->link_params.bp = bp;
8033         bp->link_params.port = port;
8034
8035         bp->link_params.lane_config =
8036                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8037         bp->link_params.ext_phy_config =
8038                 SHMEM_RD(bp,
8039                          dev_info.port_hw_config[port].external_phy_config);
8040         bp->link_params.speed_cap_mask =
8041                 SHMEM_RD(bp,
8042                          dev_info.port_hw_config[port].speed_capability_mask);
8043
8044         bp->port.link_config =
8045                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8046
8047         /* Get the 4 lanes xgxs config rx and tx */
8048         for (i = 0; i < 2; i++) {
8049                 val = SHMEM_RD(bp,
8050                            dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8051                 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8052                 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8053
8054                 val = SHMEM_RD(bp,
8055                            dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8056                 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8057                 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8058         }
8059
8060         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8061         if (config & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_ENABLED)
8062                 bp->link_params.feature_config_flags |=
8063                                 FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8064         else
8065                 bp->link_params.feature_config_flags &=
8066                                 ~FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8067
8068         /* If the device is capable of WoL, set the default state according
8069          * to the HW
8070          */
8071         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8072                    (config & PORT_FEATURE_WOL_ENABLED));
8073
8074         BNX2X_DEV_INFO("lane_config 0x%08x  ext_phy_config 0x%08x"
8075                        "  speed_cap_mask 0x%08x  link_config 0x%08x\n",
8076                        bp->link_params.lane_config,
8077                        bp->link_params.ext_phy_config,
8078                        bp->link_params.speed_cap_mask, bp->port.link_config);
8079
8080         bp->link_params.switch_cfg = (bp->port.link_config &
8081                                       PORT_FEATURE_CONNECTED_SWITCH_MASK);
8082         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8083
8084         bnx2x_link_settings_requested(bp);
8085
8086         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8087         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8088         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8089         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8090         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8091         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8092         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
8093         bp->dev->dev_addr[5] = (u8)(val & 0xff);
8094         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8095         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8096 }
8097
8098 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8099 {
8100         int func = BP_FUNC(bp);
8101         u32 val, val2;
8102         int rc = 0;
8103
8104         bnx2x_get_common_hwinfo(bp);
8105
8106         bp->e1hov = 0;
8107         bp->e1hmf = 0;
8108         if (CHIP_IS_E1H(bp)) {
8109                 bp->mf_config =
8110                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8111
8112                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8113                        FUNC_MF_CFG_E1HOV_TAG_MASK);
8114                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8115
8116                         bp->e1hov = val;
8117                         bp->e1hmf = 1;
8118                         BNX2X_DEV_INFO("MF mode  E1HOV for func %d is %d "
8119                                        "(0x%04x)\n",
8120                                        func, bp->e1hov, bp->e1hov);
8121                 } else {
8122                         BNX2X_DEV_INFO("single function mode\n");
8123                         if (BP_E1HVN(bp)) {
8124                                 BNX2X_ERR("!!!  No valid E1HOV for func %d,"
8125                                           "  aborting\n", func);
8126                                 rc = -EPERM;
8127                         }
8128                 }
8129         }
8130
8131         if (!BP_NOMCP(bp)) {
8132                 bnx2x_get_port_hwinfo(bp);
8133
8134                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8135                               DRV_MSG_SEQ_NUMBER_MASK);
8136                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8137         }
8138
8139         if (IS_E1HMF(bp)) {
8140                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8141                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
8142                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8143                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8144                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8145                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8146                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8147                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8148                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
8149                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
8150                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8151                                ETH_ALEN);
8152                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8153                                ETH_ALEN);
8154                 }
8155
8156                 return rc;
8157         }
8158
8159         if (BP_NOMCP(bp)) {
8160                 /* only supposed to happen on emulation/FPGA */
8161                 BNX2X_ERR("warning random MAC workaround active\n");
8162                 random_ether_addr(bp->dev->dev_addr);
8163                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8164         }
8165
8166         return rc;
8167 }
8168
8169 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8170 {
8171         int func = BP_FUNC(bp);
8172         int timer_interval;
8173         int rc;
8174
8175         /* Disable interrupt handling until HW is initialized */
8176         atomic_set(&bp->intr_sem, 1);
8177
8178         mutex_init(&bp->port.phy_mutex);
8179
8180         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8181         INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8182
8183         rc = bnx2x_get_hwinfo(bp);
8184
8185         /* need to reset chip if undi was active */
8186         if (!BP_NOMCP(bp))
8187                 bnx2x_undi_unload(bp);
8188
8189         if (CHIP_REV_IS_FPGA(bp))
8190                 printk(KERN_ERR PFX "FPGA detected\n");
8191
8192         if (BP_NOMCP(bp) && (func == 0))
8193                 printk(KERN_ERR PFX
8194                        "MCP disabled, must load devices in order!\n");
8195
8196         /* Set multi queue mode */
8197         if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8198             ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8199                 printk(KERN_ERR PFX
8200                       "Multi disabled since int_mode requested is not MSI-X\n");
8201                 multi_mode = ETH_RSS_MODE_DISABLED;
8202         }
8203         bp->multi_mode = multi_mode;
8204
8205
8206         /* Set TPA flags */
8207         if (disable_tpa) {
8208                 bp->flags &= ~TPA_ENABLE_FLAG;
8209                 bp->dev->features &= ~NETIF_F_LRO;
8210         } else {
8211                 bp->flags |= TPA_ENABLE_FLAG;
8212                 bp->dev->features |= NETIF_F_LRO;
8213         }
8214
8215         bp->mrrs = mrrs;
8216
8217         bp->tx_ring_size = MAX_TX_AVAIL;
8218         bp->rx_ring_size = MAX_RX_AVAIL;
8219
8220         bp->rx_csum = 1;
8221
8222         bp->tx_ticks = 50;
8223         bp->rx_ticks = 25;
8224
8225         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8226         bp->current_interval = (poll ? poll : timer_interval);
8227
8228         init_timer(&bp->timer);
8229         bp->timer.expires = jiffies + bp->current_interval;
8230         bp->timer.data = (unsigned long) bp;
8231         bp->timer.function = bnx2x_timer;
8232
8233         return rc;
8234 }
8235
8236 /*
8237  * ethtool service functions
8238  */
8239
8240 /* All ethtool functions called with rtnl_lock */
8241
8242 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8243 {
8244         struct bnx2x *bp = netdev_priv(dev);
8245
8246         cmd->supported = bp->port.supported;
8247         cmd->advertising = bp->port.advertising;
8248
8249         if (netif_carrier_ok(dev)) {
8250                 cmd->speed = bp->link_vars.line_speed;
8251                 cmd->duplex = bp->link_vars.duplex;
8252         } else {
8253                 cmd->speed = bp->link_params.req_line_speed;
8254                 cmd->duplex = bp->link_params.req_duplex;
8255         }
8256         if (IS_E1HMF(bp)) {
8257                 u16 vn_max_rate;
8258
8259                 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8260                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8261                 if (vn_max_rate < cmd->speed)
8262                         cmd->speed = vn_max_rate;
8263         }
8264
8265         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8266                 u32 ext_phy_type =
8267                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8268
8269                 switch (ext_phy_type) {
8270                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8271                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8272                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8273                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8274                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8275                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8276                         cmd->port = PORT_FIBRE;
8277                         break;
8278
8279                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8280                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8281                         cmd->port = PORT_TP;
8282                         break;
8283
8284                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8285                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8286                                   bp->link_params.ext_phy_config);
8287                         break;
8288
8289                 default:
8290                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
8291                            bp->link_params.ext_phy_config);
8292                         break;
8293                 }
8294         } else
8295                 cmd->port = PORT_TP;
8296
8297         cmd->phy_address = bp->port.phy_addr;
8298         cmd->transceiver = XCVR_INTERNAL;
8299
8300         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8301                 cmd->autoneg = AUTONEG_ENABLE;
8302         else
8303                 cmd->autoneg = AUTONEG_DISABLE;
8304
8305         cmd->maxtxpkt = 0;
8306         cmd->maxrxpkt = 0;
8307
8308         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8309            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
8310            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
8311            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
8312            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8313            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8314            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8315
8316         return 0;
8317 }
8318
8319 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8320 {
8321         struct bnx2x *bp = netdev_priv(dev);
8322         u32 advertising;
8323
8324         if (IS_E1HMF(bp))
8325                 return 0;
8326
8327         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8328            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
8329            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
8330            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
8331            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8332            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8333            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8334
8335         if (cmd->autoneg == AUTONEG_ENABLE) {
8336                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8337                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8338                         return -EINVAL;
8339                 }
8340
8341                 /* advertise the requested speed and duplex if supported */
8342                 cmd->advertising &= bp->port.supported;
8343
8344                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8345                 bp->link_params.req_duplex = DUPLEX_FULL;
8346                 bp->port.advertising |= (ADVERTISED_Autoneg |
8347                                          cmd->advertising);
8348
8349         } else { /* forced speed */
8350                 /* advertise the requested speed and duplex if supported */
8351                 switch (cmd->speed) {
8352                 case SPEED_10:
8353                         if (cmd->duplex == DUPLEX_FULL) {
8354                                 if (!(bp->port.supported &
8355                                       SUPPORTED_10baseT_Full)) {
8356                                         DP(NETIF_MSG_LINK,
8357                                            "10M full not supported\n");
8358                                         return -EINVAL;
8359                                 }
8360
8361                                 advertising = (ADVERTISED_10baseT_Full |
8362                                                ADVERTISED_TP);
8363                         } else {
8364                                 if (!(bp->port.supported &
8365                                       SUPPORTED_10baseT_Half)) {
8366                                         DP(NETIF_MSG_LINK,
8367                                            "10M half not supported\n");
8368                                         return -EINVAL;
8369                                 }
8370
8371                                 advertising = (ADVERTISED_10baseT_Half |
8372                                                ADVERTISED_TP);
8373                         }
8374                         break;
8375
8376                 case SPEED_100:
8377                         if (cmd->duplex == DUPLEX_FULL) {
8378                                 if (!(bp->port.supported &
8379                                                 SUPPORTED_100baseT_Full)) {
8380                                         DP(NETIF_MSG_LINK,
8381                                            "100M full not supported\n");
8382                                         return -EINVAL;
8383                                 }
8384
8385                                 advertising = (ADVERTISED_100baseT_Full |
8386                                                ADVERTISED_TP);
8387                         } else {
8388                                 if (!(bp->port.supported &
8389                                                 SUPPORTED_100baseT_Half)) {
8390                                         DP(NETIF_MSG_LINK,
8391                                            "100M half not supported\n");
8392                                         return -EINVAL;
8393                                 }
8394
8395                                 advertising = (ADVERTISED_100baseT_Half |
8396                                                ADVERTISED_TP);
8397                         }
8398                         break;
8399
8400                 case SPEED_1000:
8401                         if (cmd->duplex != DUPLEX_FULL) {
8402                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
8403                                 return -EINVAL;
8404                         }
8405
8406                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
8407                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
8408                                 return -EINVAL;
8409                         }
8410
8411                         advertising = (ADVERTISED_1000baseT_Full |
8412                                        ADVERTISED_TP);
8413                         break;
8414
8415                 case SPEED_2500:
8416                         if (cmd->duplex != DUPLEX_FULL) {
8417                                 DP(NETIF_MSG_LINK,
8418                                    "2.5G half not supported\n");
8419                                 return -EINVAL;
8420                         }
8421
8422                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
8423                                 DP(NETIF_MSG_LINK,
8424                                    "2.5G full not supported\n");
8425                                 return -EINVAL;
8426                         }
8427
8428                         advertising = (ADVERTISED_2500baseX_Full |
8429                                        ADVERTISED_TP);
8430                         break;
8431
8432                 case SPEED_10000:
8433                         if (cmd->duplex != DUPLEX_FULL) {
8434                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
8435                                 return -EINVAL;
8436                         }
8437
8438                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
8439                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
8440                                 return -EINVAL;
8441                         }
8442
8443                         advertising = (ADVERTISED_10000baseT_Full |
8444                                        ADVERTISED_FIBRE);
8445                         break;
8446
8447                 default:
8448                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
8449                         return -EINVAL;
8450                 }
8451
8452                 bp->link_params.req_line_speed = cmd->speed;
8453                 bp->link_params.req_duplex = cmd->duplex;
8454                 bp->port.advertising = advertising;
8455         }
8456
8457         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
8458            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
8459            bp->link_params.req_line_speed, bp->link_params.req_duplex,
8460            bp->port.advertising);
8461
8462         if (netif_running(dev)) {
8463                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8464                 bnx2x_link_set(bp);
8465         }
8466
8467         return 0;
8468 }
8469
8470 #define PHY_FW_VER_LEN                  10
8471
8472 static void bnx2x_get_drvinfo(struct net_device *dev,
8473                               struct ethtool_drvinfo *info)
8474 {
8475         struct bnx2x *bp = netdev_priv(dev);
8476         u8 phy_fw_ver[PHY_FW_VER_LEN];
8477
8478         strcpy(info->driver, DRV_MODULE_NAME);
8479         strcpy(info->version, DRV_MODULE_VERSION);
8480
8481         phy_fw_ver[0] = '\0';
8482         if (bp->port.pmf) {
8483                 bnx2x_acquire_phy_lock(bp);
8484                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8485                                              (bp->state != BNX2X_STATE_CLOSED),
8486                                              phy_fw_ver, PHY_FW_VER_LEN);
8487                 bnx2x_release_phy_lock(bp);
8488         }
8489
8490         snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8491                  (bp->common.bc_ver & 0xff0000) >> 16,
8492                  (bp->common.bc_ver & 0xff00) >> 8,
8493                  (bp->common.bc_ver & 0xff),
8494                  ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
8495         strcpy(info->bus_info, pci_name(bp->pdev));
8496         info->n_stats = BNX2X_NUM_STATS;
8497         info->testinfo_len = BNX2X_NUM_TESTS;
8498         info->eedump_len = bp->common.flash_size;
8499         info->regdump_len = 0;
8500 }
8501
8502 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8503 {
8504         struct bnx2x *bp = netdev_priv(dev);
8505
8506         if (bp->flags & NO_WOL_FLAG) {
8507                 wol->supported = 0;
8508                 wol->wolopts = 0;
8509         } else {
8510                 wol->supported = WAKE_MAGIC;
8511                 if (bp->wol)
8512                         wol->wolopts = WAKE_MAGIC;
8513                 else
8514                         wol->wolopts = 0;
8515         }
8516         memset(&wol->sopass, 0, sizeof(wol->sopass));
8517 }
8518
8519 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8520 {
8521         struct bnx2x *bp = netdev_priv(dev);
8522
8523         if (wol->wolopts & ~WAKE_MAGIC)
8524                 return -EINVAL;
8525
8526         if (wol->wolopts & WAKE_MAGIC) {
8527                 if (bp->flags & NO_WOL_FLAG)
8528                         return -EINVAL;
8529
8530                 bp->wol = 1;
8531         } else
8532                 bp->wol = 0;
8533
8534         return 0;
8535 }
8536
8537 static u32 bnx2x_get_msglevel(struct net_device *dev)
8538 {
8539         struct bnx2x *bp = netdev_priv(dev);
8540
8541         return bp->msglevel;
8542 }
8543
8544 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8545 {
8546         struct bnx2x *bp = netdev_priv(dev);
8547
8548         if (capable(CAP_NET_ADMIN))
8549                 bp->msglevel = level;
8550 }
8551
8552 static int bnx2x_nway_reset(struct net_device *dev)
8553 {
8554         struct bnx2x *bp = netdev_priv(dev);
8555
8556         if (!bp->port.pmf)
8557                 return 0;
8558
8559         if (netif_running(dev)) {
8560                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8561                 bnx2x_link_set(bp);
8562         }
8563
8564         return 0;
8565 }
8566
8567 static int bnx2x_get_eeprom_len(struct net_device *dev)
8568 {
8569         struct bnx2x *bp = netdev_priv(dev);
8570
8571         return bp->common.flash_size;
8572 }
8573
8574 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8575 {
8576         int port = BP_PORT(bp);
8577         int count, i;
8578         u32 val = 0;
8579
8580         /* adjust timeout for emulation/FPGA */
8581         count = NVRAM_TIMEOUT_COUNT;
8582         if (CHIP_REV_IS_SLOW(bp))
8583                 count *= 100;
8584
8585         /* request access to nvram interface */
8586         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8587                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8588
8589         for (i = 0; i < count*10; i++) {
8590                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8591                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8592                         break;
8593
8594                 udelay(5);
8595         }
8596
8597         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
8598                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
8599                 return -EBUSY;
8600         }
8601
8602         return 0;
8603 }
8604
8605 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8606 {
8607         int port = BP_PORT(bp);
8608         int count, i;
8609         u32 val = 0;
8610
8611         /* adjust timeout for emulation/FPGA */
8612         count = NVRAM_TIMEOUT_COUNT;
8613         if (CHIP_REV_IS_SLOW(bp))
8614                 count *= 100;
8615
8616         /* relinquish nvram interface */
8617         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8618                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8619
8620         for (i = 0; i < count*10; i++) {
8621                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8622                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8623                         break;
8624
8625                 udelay(5);
8626         }
8627
8628         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
8629                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
8630                 return -EBUSY;
8631         }
8632
8633         return 0;
8634 }
8635
8636 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8637 {
8638         u32 val;
8639
8640         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8641
8642         /* enable both bits, even on read */
8643         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8644                (val | MCPR_NVM_ACCESS_ENABLE_EN |
8645                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
8646 }
8647
8648 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8649 {
8650         u32 val;
8651
8652         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8653
8654         /* disable both bits, even after read */
8655         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8656                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8657                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8658 }
8659
8660 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
8661                                   u32 cmd_flags)
8662 {
8663         int count, i, rc;
8664         u32 val;
8665
8666         /* build the command word */
8667         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8668
8669         /* need to clear DONE bit separately */
8670         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8671
8672         /* address of the NVRAM to read from */
8673         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8674                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8675
8676         /* issue a read command */
8677         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8678
8679         /* adjust timeout for emulation/FPGA */
8680         count = NVRAM_TIMEOUT_COUNT;
8681         if (CHIP_REV_IS_SLOW(bp))
8682                 count *= 100;
8683
8684         /* wait for completion */
8685         *ret_val = 0;
8686         rc = -EBUSY;
8687         for (i = 0; i < count; i++) {
8688                 udelay(5);
8689                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8690
8691                 if (val & MCPR_NVM_COMMAND_DONE) {
8692                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8693                         /* we read nvram data in cpu order
8694                          * but ethtool sees it as an array of bytes
8695                          * converting to big-endian will do the work */
8696                         *ret_val = cpu_to_be32(val);
8697                         rc = 0;
8698                         break;
8699                 }
8700         }
8701
8702         return rc;
8703 }
8704
8705 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8706                             int buf_size)
8707 {
8708         int rc;
8709         u32 cmd_flags;
8710         __be32 val;
8711
8712         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8713                 DP(BNX2X_MSG_NVM,
8714                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8715                    offset, buf_size);
8716                 return -EINVAL;
8717         }
8718
8719         if (offset + buf_size > bp->common.flash_size) {
8720                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8721                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8722                    offset, buf_size, bp->common.flash_size);
8723                 return -EINVAL;
8724         }
8725
8726         /* request access to nvram interface */
8727         rc = bnx2x_acquire_nvram_lock(bp);
8728         if (rc)
8729                 return rc;
8730
8731         /* enable access to nvram interface */
8732         bnx2x_enable_nvram_access(bp);
8733
8734         /* read the first word(s) */
8735         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8736         while ((buf_size > sizeof(u32)) && (rc == 0)) {
8737                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8738                 memcpy(ret_buf, &val, 4);
8739
8740                 /* advance to the next dword */
8741                 offset += sizeof(u32);
8742                 ret_buf += sizeof(u32);
8743                 buf_size -= sizeof(u32);
8744                 cmd_flags = 0;
8745         }
8746
8747         if (rc == 0) {
8748                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8749                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8750                 memcpy(ret_buf, &val, 4);
8751         }
8752
8753         /* disable access to nvram interface */
8754         bnx2x_disable_nvram_access(bp);
8755         bnx2x_release_nvram_lock(bp);
8756
8757         return rc;
8758 }
8759
8760 static int bnx2x_get_eeprom(struct net_device *dev,
8761                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8762 {
8763         struct bnx2x *bp = netdev_priv(dev);
8764         int rc;
8765
8766         if (!netif_running(dev))
8767                 return -EAGAIN;
8768
8769         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8770            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8771            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8772            eeprom->len, eeprom->len);
8773
8774         /* parameters already validated in ethtool_get_eeprom */
8775
8776         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8777
8778         return rc;
8779 }
8780
8781 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8782                                    u32 cmd_flags)
8783 {
8784         int count, i, rc;
8785
8786         /* build the command word */
8787         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8788
8789         /* need to clear DONE bit separately */
8790         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8791
8792         /* write the data */
8793         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8794
8795         /* address of the NVRAM to write to */
8796         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8797                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8798
8799         /* issue the write command */
8800         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8801
8802         /* adjust timeout for emulation/FPGA */
8803         count = NVRAM_TIMEOUT_COUNT;
8804         if (CHIP_REV_IS_SLOW(bp))
8805                 count *= 100;
8806
8807         /* wait for completion */
8808         rc = -EBUSY;
8809         for (i = 0; i < count; i++) {
8810                 udelay(5);
8811                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8812                 if (val & MCPR_NVM_COMMAND_DONE) {
8813                         rc = 0;
8814                         break;
8815                 }
8816         }
8817
8818         return rc;
8819 }
8820
8821 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
8822
8823 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8824                               int buf_size)
8825 {
8826         int rc;
8827         u32 cmd_flags;
8828         u32 align_offset;
8829         __be32 val;
8830
8831         if (offset + buf_size > bp->common.flash_size) {
8832                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8833                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8834                    offset, buf_size, bp->common.flash_size);
8835                 return -EINVAL;
8836         }
8837
8838         /* request access to nvram interface */
8839         rc = bnx2x_acquire_nvram_lock(bp);
8840         if (rc)
8841                 return rc;
8842
8843         /* enable access to nvram interface */
8844         bnx2x_enable_nvram_access(bp);
8845
8846         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8847         align_offset = (offset & ~0x03);
8848         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8849
8850         if (rc == 0) {
8851                 val &= ~(0xff << BYTE_OFFSET(offset));
8852                 val |= (*data_buf << BYTE_OFFSET(offset));
8853
8854                 /* nvram data is returned as an array of bytes
8855                  * convert it back to cpu order */
8856                 val = be32_to_cpu(val);
8857
8858                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8859                                              cmd_flags);
8860         }
8861
8862         /* disable access to nvram interface */
8863         bnx2x_disable_nvram_access(bp);
8864         bnx2x_release_nvram_lock(bp);
8865
8866         return rc;
8867 }
8868
8869 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8870                              int buf_size)
8871 {
8872         int rc;
8873         u32 cmd_flags;
8874         u32 val;
8875         u32 written_so_far;
8876
8877         if (buf_size == 1)      /* ethtool */
8878                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8879
8880         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8881                 DP(BNX2X_MSG_NVM,
8882                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8883                    offset, buf_size);
8884                 return -EINVAL;
8885         }
8886
8887         if (offset + buf_size > bp->common.flash_size) {
8888                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8889                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8890                    offset, buf_size, bp->common.flash_size);
8891                 return -EINVAL;
8892         }
8893
8894         /* request access to nvram interface */
8895         rc = bnx2x_acquire_nvram_lock(bp);
8896         if (rc)
8897                 return rc;
8898
8899         /* enable access to nvram interface */
8900         bnx2x_enable_nvram_access(bp);
8901
8902         written_so_far = 0;
8903         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8904         while ((written_so_far < buf_size) && (rc == 0)) {
8905                 if (written_so_far == (buf_size - sizeof(u32)))
8906                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8907                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8908                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8909                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8910                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8911
8912                 memcpy(&val, data_buf, 4);
8913
8914                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8915
8916                 /* advance to the next dword */
8917                 offset += sizeof(u32);
8918                 data_buf += sizeof(u32);
8919                 written_so_far += sizeof(u32);
8920                 cmd_flags = 0;
8921         }
8922
8923         /* disable access to nvram interface */
8924         bnx2x_disable_nvram_access(bp);
8925         bnx2x_release_nvram_lock(bp);
8926
8927         return rc;
8928 }
8929
8930 static int bnx2x_set_eeprom(struct net_device *dev,
8931                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8932 {
8933         struct bnx2x *bp = netdev_priv(dev);
8934         int rc;
8935
8936         if (!netif_running(dev))
8937                 return -EAGAIN;
8938
8939         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8940            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8941            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8942            eeprom->len, eeprom->len);
8943
8944         /* parameters already validated in ethtool_set_eeprom */
8945
8946         /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8947         if (eeprom->magic == 0x00504859)
8948                 if (bp->port.pmf) {
8949
8950                         bnx2x_acquire_phy_lock(bp);
8951                         rc = bnx2x_flash_download(bp, BP_PORT(bp),
8952                                              bp->link_params.ext_phy_config,
8953                                              (bp->state != BNX2X_STATE_CLOSED),
8954                                              eebuf, eeprom->len);
8955                         if ((bp->state == BNX2X_STATE_OPEN) ||
8956                             (bp->state == BNX2X_STATE_DISABLED)) {
8957                                 rc |= bnx2x_link_reset(&bp->link_params,
8958                                                        &bp->link_vars, 1);
8959                                 rc |= bnx2x_phy_init(&bp->link_params,
8960                                                      &bp->link_vars);
8961                         }
8962                         bnx2x_release_phy_lock(bp);
8963
8964                 } else /* Only the PMF can access the PHY */
8965                         return -EINVAL;
8966         else
8967                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8968
8969         return rc;
8970 }
8971
8972 static int bnx2x_get_coalesce(struct net_device *dev,
8973                               struct ethtool_coalesce *coal)
8974 {
8975         struct bnx2x *bp = netdev_priv(dev);
8976
8977         memset(coal, 0, sizeof(struct ethtool_coalesce));
8978
8979         coal->rx_coalesce_usecs = bp->rx_ticks;
8980         coal->tx_coalesce_usecs = bp->tx_ticks;
8981
8982         return 0;
8983 }
8984
8985 static int bnx2x_set_coalesce(struct net_device *dev,
8986                               struct ethtool_coalesce *coal)
8987 {
8988         struct bnx2x *bp = netdev_priv(dev);
8989
8990         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8991         if (bp->rx_ticks > 3000)
8992                 bp->rx_ticks = 3000;
8993
8994         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8995         if (bp->tx_ticks > 0x3000)
8996                 bp->tx_ticks = 0x3000;
8997
8998         if (netif_running(dev))
8999                 bnx2x_update_coalesce(bp);
9000
9001         return 0;
9002 }
9003
9004 static void bnx2x_get_ringparam(struct net_device *dev,
9005                                 struct ethtool_ringparam *ering)
9006 {
9007         struct bnx2x *bp = netdev_priv(dev);
9008
9009         ering->rx_max_pending = MAX_RX_AVAIL;
9010         ering->rx_mini_max_pending = 0;
9011         ering->rx_jumbo_max_pending = 0;
9012
9013         ering->rx_pending = bp->rx_ring_size;
9014         ering->rx_mini_pending = 0;
9015         ering->rx_jumbo_pending = 0;
9016
9017         ering->tx_max_pending = MAX_TX_AVAIL;
9018         ering->tx_pending = bp->tx_ring_size;
9019 }
9020
9021 static int bnx2x_set_ringparam(struct net_device *dev,
9022                                struct ethtool_ringparam *ering)
9023 {
9024         struct bnx2x *bp = netdev_priv(dev);
9025         int rc = 0;
9026
9027         if ((ering->rx_pending > MAX_RX_AVAIL) ||
9028             (ering->tx_pending > MAX_TX_AVAIL) ||
9029             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9030                 return -EINVAL;
9031
9032         bp->rx_ring_size = ering->rx_pending;
9033         bp->tx_ring_size = ering->tx_pending;
9034
9035         if (netif_running(dev)) {
9036                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9037                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9038         }
9039
9040         return rc;
9041 }
9042
9043 static void bnx2x_get_pauseparam(struct net_device *dev,
9044                                  struct ethtool_pauseparam *epause)
9045 {
9046         struct bnx2x *bp = netdev_priv(dev);
9047
9048         epause->autoneg = (bp->link_params.req_flow_ctrl ==
9049                            BNX2X_FLOW_CTRL_AUTO) &&
9050                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9051
9052         epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9053                             BNX2X_FLOW_CTRL_RX);
9054         epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9055                             BNX2X_FLOW_CTRL_TX);
9056
9057         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9058            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9059            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9060 }
9061
9062 static int bnx2x_set_pauseparam(struct net_device *dev,
9063                                 struct ethtool_pauseparam *epause)
9064 {
9065         struct bnx2x *bp = netdev_priv(dev);
9066
9067         if (IS_E1HMF(bp))
9068                 return 0;
9069
9070         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9071            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9072            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9073
9074         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9075
9076         if (epause->rx_pause)
9077                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9078
9079         if (epause->tx_pause)
9080                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9081
9082         if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9083                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9084
9085         if (epause->autoneg) {
9086                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9087                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
9088                         return -EINVAL;
9089                 }
9090
9091                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9092                         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9093         }
9094
9095         DP(NETIF_MSG_LINK,
9096            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9097
9098         if (netif_running(dev)) {
9099                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9100                 bnx2x_link_set(bp);
9101         }
9102
9103         return 0;
9104 }
9105
9106 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9107 {
9108         struct bnx2x *bp = netdev_priv(dev);
9109         int changed = 0;
9110         int rc = 0;
9111
9112         /* TPA requires Rx CSUM offloading */
9113         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9114                 if (!(dev->features & NETIF_F_LRO)) {
9115                         dev->features |= NETIF_F_LRO;
9116                         bp->flags |= TPA_ENABLE_FLAG;
9117                         changed = 1;
9118                 }
9119
9120         } else if (dev->features & NETIF_F_LRO) {
9121                 dev->features &= ~NETIF_F_LRO;
9122                 bp->flags &= ~TPA_ENABLE_FLAG;
9123                 changed = 1;
9124         }
9125
9126         if (changed && netif_running(dev)) {
9127                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9128                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9129         }
9130
9131         return rc;
9132 }
9133
9134 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9135 {
9136         struct bnx2x *bp = netdev_priv(dev);
9137
9138         return bp->rx_csum;
9139 }
9140
9141 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9142 {
9143         struct bnx2x *bp = netdev_priv(dev);
9144         int rc = 0;
9145
9146         bp->rx_csum = data;
9147
9148         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9149            TPA'ed packets will be discarded due to wrong TCP CSUM */
9150         if (!data) {
9151                 u32 flags = ethtool_op_get_flags(dev);
9152
9153                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9154         }
9155
9156         return rc;
9157 }
9158
9159 static int bnx2x_set_tso(struct net_device *dev, u32 data)
9160 {
9161         if (data) {
9162                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9163                 dev->features |= NETIF_F_TSO6;
9164         } else {
9165                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
9166                 dev->features &= ~NETIF_F_TSO6;
9167         }
9168
9169         return 0;
9170 }
9171
9172 static const struct {
9173         char string[ETH_GSTRING_LEN];
9174 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
9175         { "register_test (offline)" },
9176         { "memory_test (offline)" },
9177         { "loopback_test (offline)" },
9178         { "nvram_test (online)" },
9179         { "interrupt_test (online)" },
9180         { "link_test (online)" },
9181         { "idle check (online)" }
9182 };
9183
9184 static int bnx2x_self_test_count(struct net_device *dev)
9185 {
9186         return BNX2X_NUM_TESTS;
9187 }
9188
9189 static int bnx2x_test_registers(struct bnx2x *bp)
9190 {
9191         int idx, i, rc = -ENODEV;
9192         u32 wr_val = 0;
9193         int port = BP_PORT(bp);
9194         static const struct {
9195                 u32  offset0;
9196                 u32  offset1;
9197                 u32  mask;
9198         } reg_tbl[] = {
9199 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
9200                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
9201                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
9202                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
9203                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
9204                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
9205                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
9206                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
9207                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
9208                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
9209 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
9210                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
9211                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
9212                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
9213                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
9214                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9215                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
9216                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
9217                 { NIG_REG_EGRESS_MNG0_FIFO,           20, 0xffffffff },
9218                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
9219 /* 20 */        { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
9220                 { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
9221                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
9222                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
9223                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
9224                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
9225                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
9226                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
9227                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
9228                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
9229 /* 30 */        { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
9230                 { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
9231                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
9232                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
9233                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9234                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
9235                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9236                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
9237
9238                 { 0xffffffff, 0, 0x00000000 }
9239         };
9240
9241         if (!netif_running(bp->dev))
9242                 return rc;
9243
9244         /* Repeat the test twice:
9245            First by writing 0x00000000, second by writing 0xffffffff */
9246         for (idx = 0; idx < 2; idx++) {
9247
9248                 switch (idx) {
9249                 case 0:
9250                         wr_val = 0;
9251                         break;
9252                 case 1:
9253                         wr_val = 0xffffffff;
9254                         break;
9255                 }
9256
9257                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9258                         u32 offset, mask, save_val, val;
9259
9260                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9261                         mask = reg_tbl[i].mask;
9262
9263                         save_val = REG_RD(bp, offset);
9264
9265                         REG_WR(bp, offset, wr_val);
9266                         val = REG_RD(bp, offset);
9267
9268                         /* Restore the original register's value */
9269                         REG_WR(bp, offset, save_val);
9270
9271                         /* verify that value is as expected value */
9272                         if ((val & mask) != (wr_val & mask))
9273                                 goto test_reg_exit;
9274                 }
9275         }
9276
9277         rc = 0;
9278
9279 test_reg_exit:
9280         return rc;
9281 }
9282
9283 static int bnx2x_test_memory(struct bnx2x *bp)
9284 {
9285         int i, j, rc = -ENODEV;
9286         u32 val;
9287         static const struct {
9288                 u32 offset;
9289                 int size;
9290         } mem_tbl[] = {
9291                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
9292                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9293                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
9294                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
9295                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
9296                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
9297                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
9298
9299                 { 0xffffffff, 0 }
9300         };
9301         static const struct {
9302                 char *name;
9303                 u32 offset;
9304                 u32 e1_mask;
9305                 u32 e1h_mask;
9306         } prty_tbl[] = {
9307                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
9308                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
9309                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
9310                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
9311                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
9312                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
9313
9314                 { NULL, 0xffffffff, 0, 0 }
9315         };
9316
9317         if (!netif_running(bp->dev))
9318                 return rc;
9319
9320         /* Go through all the memories */
9321         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9322                 for (j = 0; j < mem_tbl[i].size; j++)
9323                         REG_RD(bp, mem_tbl[i].offset + j*4);
9324
9325         /* Check the parity status */
9326         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9327                 val = REG_RD(bp, prty_tbl[i].offset);
9328                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9329                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
9330                         DP(NETIF_MSG_HW,
9331                            "%s is 0x%x\n", prty_tbl[i].name, val);
9332                         goto test_mem_exit;
9333                 }
9334         }
9335
9336         rc = 0;
9337
9338 test_mem_exit:
9339         return rc;
9340 }
9341
9342 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9343 {
9344         int cnt = 1000;
9345
9346         if (link_up)
9347                 while (bnx2x_link_test(bp) && cnt--)
9348                         msleep(10);
9349 }
9350
9351 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9352 {
9353         unsigned int pkt_size, num_pkts, i;
9354         struct sk_buff *skb;
9355         unsigned char *packet;
9356         struct bnx2x_fastpath *fp = &bp->fp[0];
9357         u16 tx_start_idx, tx_idx;
9358         u16 rx_start_idx, rx_idx;
9359         u16 pkt_prod;
9360         struct sw_tx_bd *tx_buf;
9361         struct eth_tx_bd *tx_bd;
9362         dma_addr_t mapping;
9363         union eth_rx_cqe *cqe;
9364         u8 cqe_fp_flags;
9365         struct sw_rx_bd *rx_buf;
9366         u16 len;
9367         int rc = -ENODEV;
9368
9369         /* check the loopback mode */
9370         switch (loopback_mode) {
9371         case BNX2X_PHY_LOOPBACK:
9372                 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9373                         return -EINVAL;
9374                 break;
9375         case BNX2X_MAC_LOOPBACK:
9376                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
9377                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
9378                 break;
9379         default:
9380                 return -EINVAL;
9381         }
9382
9383         /* prepare the loopback packet */
9384         pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9385                      bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
9386         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9387         if (!skb) {
9388                 rc = -ENOMEM;
9389                 goto test_loopback_exit;
9390         }
9391         packet = skb_put(skb, pkt_size);
9392         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9393         memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9394         for (i = ETH_HLEN; i < pkt_size; i++)
9395                 packet[i] = (unsigned char) (i & 0xff);
9396
9397         /* send the loopback packet */
9398         num_pkts = 0;
9399         tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9400         rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9401
9402         pkt_prod = fp->tx_pkt_prod++;
9403         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9404         tx_buf->first_bd = fp->tx_bd_prod;
9405         tx_buf->skb = skb;
9406
9407         tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9408         mapping = pci_map_single(bp->pdev, skb->data,
9409                                  skb_headlen(skb), PCI_DMA_TODEVICE);
9410         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9411         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9412         tx_bd->nbd = cpu_to_le16(1);
9413         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9414         tx_bd->vlan = cpu_to_le16(pkt_prod);
9415         tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9416                                        ETH_TX_BD_FLAGS_END_BD);
9417         tx_bd->general_data = ((UNICAST_ADDRESS <<
9418                                 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9419
9420         wmb();
9421
9422         le16_add_cpu(&fp->hw_tx_prods->bds_prod, 1);
9423         mb(); /* FW restriction: must not reorder writing nbd and packets */
9424         le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
9425         DOORBELL(bp, fp->index, 0);
9426
9427         mmiowb();
9428
9429         num_pkts++;
9430         fp->tx_bd_prod++;
9431         bp->dev->trans_start = jiffies;
9432
9433         udelay(100);
9434
9435         tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9436         if (tx_idx != tx_start_idx + num_pkts)
9437                 goto test_loopback_exit;
9438
9439         rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9440         if (rx_idx != rx_start_idx + num_pkts)
9441                 goto test_loopback_exit;
9442
9443         cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9444         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9445         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9446                 goto test_loopback_rx_exit;
9447
9448         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9449         if (len != pkt_size)
9450                 goto test_loopback_rx_exit;
9451
9452         rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9453         skb = rx_buf->skb;
9454         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9455         for (i = ETH_HLEN; i < pkt_size; i++)
9456                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9457                         goto test_loopback_rx_exit;
9458
9459         rc = 0;
9460
9461 test_loopback_rx_exit:
9462
9463         fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9464         fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9465         fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9466         fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9467
9468         /* Update producers */
9469         bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9470                              fp->rx_sge_prod);
9471
9472 test_loopback_exit:
9473         bp->link_params.loopback_mode = LOOPBACK_NONE;
9474
9475         return rc;
9476 }
9477
9478 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9479 {
9480         int rc = 0, res;
9481
9482         if (!netif_running(bp->dev))
9483                 return BNX2X_LOOPBACK_FAILED;
9484
9485         bnx2x_netif_stop(bp, 1);
9486         bnx2x_acquire_phy_lock(bp);
9487
9488         res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9489         if (res) {
9490                 DP(NETIF_MSG_PROBE, "  PHY loopback failed  (res %d)\n", res);
9491                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
9492         }
9493
9494         res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9495         if (res) {
9496                 DP(NETIF_MSG_PROBE, "  MAC loopback failed  (res %d)\n", res);
9497                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
9498         }
9499
9500         bnx2x_release_phy_lock(bp);
9501         bnx2x_netif_start(bp);
9502
9503         return rc;
9504 }
9505
9506 #define CRC32_RESIDUAL                  0xdebb20e3
9507
9508 static int bnx2x_test_nvram(struct bnx2x *bp)
9509 {
9510         static const struct {
9511                 int offset;
9512                 int size;
9513         } nvram_tbl[] = {
9514                 {     0,  0x14 }, /* bootstrap */
9515                 {  0x14,  0xec }, /* dir */
9516                 { 0x100, 0x350 }, /* manuf_info */
9517                 { 0x450,  0xf0 }, /* feature_info */
9518                 { 0x640,  0x64 }, /* upgrade_key_info */
9519                 { 0x6a4,  0x64 },
9520                 { 0x708,  0x70 }, /* manuf_key_info */
9521                 { 0x778,  0x70 },
9522                 {     0,     0 }
9523         };
9524         __be32 buf[0x350 / 4];
9525         u8 *data = (u8 *)buf;
9526         int i, rc;
9527         u32 magic, csum;
9528
9529         rc = bnx2x_nvram_read(bp, 0, data, 4);
9530         if (rc) {
9531                 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
9532                 goto test_nvram_exit;
9533         }
9534
9535         magic = be32_to_cpu(buf[0]);
9536         if (magic != 0x669955aa) {
9537                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9538                 rc = -ENODEV;
9539                 goto test_nvram_exit;
9540         }
9541
9542         for (i = 0; nvram_tbl[i].size; i++) {
9543
9544                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9545                                       nvram_tbl[i].size);
9546                 if (rc) {
9547                         DP(NETIF_MSG_PROBE,
9548                            "nvram_tbl[%d] read data (rc %d)\n", i, rc);
9549                         goto test_nvram_exit;
9550                 }
9551
9552                 csum = ether_crc_le(nvram_tbl[i].size, data);
9553                 if (csum != CRC32_RESIDUAL) {
9554                         DP(NETIF_MSG_PROBE,
9555                            "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9556                         rc = -ENODEV;
9557                         goto test_nvram_exit;
9558                 }
9559         }
9560
9561 test_nvram_exit:
9562         return rc;
9563 }
9564
9565 static int bnx2x_test_intr(struct bnx2x *bp)
9566 {
9567         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9568         int i, rc;
9569
9570         if (!netif_running(bp->dev))
9571                 return -ENODEV;
9572
9573         config->hdr.length = 0;
9574         if (CHIP_IS_E1(bp))
9575                 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9576         else
9577                 config->hdr.offset = BP_FUNC(bp);
9578         config->hdr.client_id = bp->fp->cl_id;
9579         config->hdr.reserved1 = 0;
9580
9581         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9582                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9583                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9584         if (rc == 0) {
9585                 bp->set_mac_pending++;
9586                 for (i = 0; i < 10; i++) {
9587                         if (!bp->set_mac_pending)
9588                                 break;
9589                         msleep_interruptible(10);
9590                 }
9591                 if (i == 10)
9592                         rc = -ENODEV;
9593         }
9594
9595         return rc;
9596 }
9597
9598 static void bnx2x_self_test(struct net_device *dev,
9599                             struct ethtool_test *etest, u64 *buf)
9600 {
9601         struct bnx2x *bp = netdev_priv(dev);
9602
9603         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9604
9605         if (!netif_running(dev))
9606                 return;
9607
9608         /* offline tests are not supported in MF mode */
9609         if (IS_E1HMF(bp))
9610                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9611
9612         if (etest->flags & ETH_TEST_FL_OFFLINE) {
9613                 u8 link_up;
9614
9615                 link_up = bp->link_vars.link_up;
9616                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9617                 bnx2x_nic_load(bp, LOAD_DIAG);
9618                 /* wait until link state is restored */
9619                 bnx2x_wait_for_link(bp, link_up);
9620
9621                 if (bnx2x_test_registers(bp) != 0) {
9622                         buf[0] = 1;
9623                         etest->flags |= ETH_TEST_FL_FAILED;
9624                 }
9625                 if (bnx2x_test_memory(bp) != 0) {
9626                         buf[1] = 1;
9627                         etest->flags |= ETH_TEST_FL_FAILED;
9628                 }
9629                 buf[2] = bnx2x_test_loopback(bp, link_up);
9630                 if (buf[2] != 0)
9631                         etest->flags |= ETH_TEST_FL_FAILED;
9632
9633                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9634                 bnx2x_nic_load(bp, LOAD_NORMAL);
9635                 /* wait until link state is restored */
9636                 bnx2x_wait_for_link(bp, link_up);
9637         }
9638         if (bnx2x_test_nvram(bp) != 0) {
9639                 buf[3] = 1;
9640                 etest->flags |= ETH_TEST_FL_FAILED;
9641         }
9642         if (bnx2x_test_intr(bp) != 0) {
9643                 buf[4] = 1;
9644                 etest->flags |= ETH_TEST_FL_FAILED;
9645         }
9646         if (bp->port.pmf)
9647                 if (bnx2x_link_test(bp) != 0) {
9648                         buf[5] = 1;
9649                         etest->flags |= ETH_TEST_FL_FAILED;
9650                 }
9651
9652 #ifdef BNX2X_EXTRA_DEBUG
9653         bnx2x_panic_dump(bp);
9654 #endif
9655 }
9656
9657 static const struct {
9658         long offset;
9659         int size;
9660         u8 string[ETH_GSTRING_LEN];
9661 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9662 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9663         { Q_STATS_OFFSET32(error_bytes_received_hi),
9664                                                 8, "[%d]: rx_error_bytes" },
9665         { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9666                                                 8, "[%d]: rx_ucast_packets" },
9667         { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9668                                                 8, "[%d]: rx_mcast_packets" },
9669         { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9670                                                 8, "[%d]: rx_bcast_packets" },
9671         { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9672         { Q_STATS_OFFSET32(rx_err_discard_pkt),
9673                                          4, "[%d]: rx_phy_ip_err_discards"},
9674         { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9675                                          4, "[%d]: rx_skb_alloc_discard" },
9676         { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9677
9678 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9679         { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9680                                                         8, "[%d]: tx_packets" }
9681 };
9682
9683 static const struct {
9684         long offset;
9685         int size;
9686         u32 flags;
9687 #define STATS_FLAGS_PORT                1
9688 #define STATS_FLAGS_FUNC                2
9689 #define STATS_FLAGS_BOTH                (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
9690         u8 string[ETH_GSTRING_LEN];
9691 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9692 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9693                                 8, STATS_FLAGS_BOTH, "rx_bytes" },
9694         { STATS_OFFSET32(error_bytes_received_hi),
9695                                 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
9696         { STATS_OFFSET32(total_unicast_packets_received_hi),
9697                                 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
9698         { STATS_OFFSET32(total_multicast_packets_received_hi),
9699                                 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
9700         { STATS_OFFSET32(total_broadcast_packets_received_hi),
9701                                 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
9702         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9703                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9704         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9705                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
9706         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9707                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9708         { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9709                                 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9710 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9711                                 8, STATS_FLAGS_PORT, "rx_fragments" },
9712         { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9713                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
9714         { STATS_OFFSET32(no_buff_discard_hi),
9715                                 8, STATS_FLAGS_BOTH, "rx_discards" },
9716         { STATS_OFFSET32(mac_filter_discard),
9717                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9718         { STATS_OFFSET32(xxoverflow_discard),
9719                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9720         { STATS_OFFSET32(brb_drop_hi),
9721                                 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9722         { STATS_OFFSET32(brb_truncate_hi),
9723                                 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9724         { STATS_OFFSET32(pause_frames_received_hi),
9725                                 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9726         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9727                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9728         { STATS_OFFSET32(nig_timer_max),
9729                         4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9730 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9731                                 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9732         { STATS_OFFSET32(rx_skb_alloc_failed),
9733                                 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9734         { STATS_OFFSET32(hw_csum_err),
9735                                 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9736
9737         { STATS_OFFSET32(total_bytes_transmitted_hi),
9738                                 8, STATS_FLAGS_BOTH, "tx_bytes" },
9739         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9740                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9741         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9742                                 8, STATS_FLAGS_BOTH, "tx_packets" },
9743         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9744                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9745         { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9746                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9747         { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9748                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9749         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9750                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9751 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9752                                 8, STATS_FLAGS_PORT, "tx_deferred" },
9753         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9754                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9755         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9756                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9757         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9758                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9759         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9760                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9761         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9762                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9763         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9764                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9765         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9766                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9767         { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9768                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9769         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9770                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9771 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
9772                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9773         { STATS_OFFSET32(pause_frames_sent_hi),
9774                                 8, STATS_FLAGS_PORT, "tx_pause_frames" }
9775 };
9776
9777 #define IS_PORT_STAT(i) \
9778         ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9779 #define IS_FUNC_STAT(i)         (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9780 #define IS_E1HMF_MODE_STAT(bp) \
9781                         (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
9782
9783 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9784 {
9785         struct bnx2x *bp = netdev_priv(dev);
9786         int i, j, k;
9787
9788         switch (stringset) {
9789         case ETH_SS_STATS:
9790                 if (is_multi(bp)) {
9791                         k = 0;
9792                         for_each_queue(bp, i) {
9793                                 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9794                                         sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9795                                                 bnx2x_q_stats_arr[j].string, i);
9796                                 k += BNX2X_NUM_Q_STATS;
9797                         }
9798                         if (IS_E1HMF_MODE_STAT(bp))
9799                                 break;
9800                         for (j = 0; j < BNX2X_NUM_STATS; j++)
9801                                 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9802                                        bnx2x_stats_arr[j].string);
9803                 } else {
9804                         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9805                                 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9806                                         continue;
9807                                 strcpy(buf + j*ETH_GSTRING_LEN,
9808                                        bnx2x_stats_arr[i].string);
9809                                 j++;
9810                         }
9811                 }
9812                 break;
9813
9814         case ETH_SS_TEST:
9815                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9816                 break;
9817         }
9818 }
9819
9820 static int bnx2x_get_stats_count(struct net_device *dev)
9821 {
9822         struct bnx2x *bp = netdev_priv(dev);
9823         int i, num_stats;
9824
9825         if (is_multi(bp)) {
9826                 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9827                 if (!IS_E1HMF_MODE_STAT(bp))
9828                         num_stats += BNX2X_NUM_STATS;
9829         } else {
9830                 if (IS_E1HMF_MODE_STAT(bp)) {
9831                         num_stats = 0;
9832                         for (i = 0; i < BNX2X_NUM_STATS; i++)
9833                                 if (IS_FUNC_STAT(i))
9834                                         num_stats++;
9835                 } else
9836                         num_stats = BNX2X_NUM_STATS;
9837         }
9838
9839         return num_stats;
9840 }
9841
9842 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9843                                     struct ethtool_stats *stats, u64 *buf)
9844 {
9845         struct bnx2x *bp = netdev_priv(dev);
9846         u32 *hw_stats, *offset;
9847         int i, j, k;
9848
9849         if (is_multi(bp)) {
9850                 k = 0;
9851                 for_each_queue(bp, i) {
9852                         hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9853                         for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9854                                 if (bnx2x_q_stats_arr[j].size == 0) {
9855                                         /* skip this counter */
9856                                         buf[k + j] = 0;
9857                                         continue;
9858                                 }
9859                                 offset = (hw_stats +
9860                                           bnx2x_q_stats_arr[j].offset);
9861                                 if (bnx2x_q_stats_arr[j].size == 4) {
9862                                         /* 4-byte counter */
9863                                         buf[k + j] = (u64) *offset;
9864                                         continue;
9865                                 }
9866                                 /* 8-byte counter */
9867                                 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9868                         }
9869                         k += BNX2X_NUM_Q_STATS;
9870                 }
9871                 if (IS_E1HMF_MODE_STAT(bp))
9872                         return;
9873                 hw_stats = (u32 *)&bp->eth_stats;
9874                 for (j = 0; j < BNX2X_NUM_STATS; j++) {
9875                         if (bnx2x_stats_arr[j].size == 0) {
9876                                 /* skip this counter */
9877                                 buf[k + j] = 0;
9878                                 continue;
9879                         }
9880                         offset = (hw_stats + bnx2x_stats_arr[j].offset);
9881                         if (bnx2x_stats_arr[j].size == 4) {
9882                                 /* 4-byte counter */
9883                                 buf[k + j] = (u64) *offset;
9884                                 continue;
9885                         }
9886                         /* 8-byte counter */
9887                         buf[k + j] = HILO_U64(*offset, *(offset + 1));
9888                 }
9889         } else {
9890                 hw_stats = (u32 *)&bp->eth_stats;
9891                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9892                         if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9893                                 continue;
9894                         if (bnx2x_stats_arr[i].size == 0) {
9895                                 /* skip this counter */
9896                                 buf[j] = 0;
9897                                 j++;
9898                                 continue;
9899                         }
9900                         offset = (hw_stats + bnx2x_stats_arr[i].offset);
9901                         if (bnx2x_stats_arr[i].size == 4) {
9902                                 /* 4-byte counter */
9903                                 buf[j] = (u64) *offset;
9904                                 j++;
9905                                 continue;
9906                         }
9907                         /* 8-byte counter */
9908                         buf[j] = HILO_U64(*offset, *(offset + 1));
9909                         j++;
9910                 }
9911         }
9912 }
9913
9914 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9915 {
9916         struct bnx2x *bp = netdev_priv(dev);
9917         int port = BP_PORT(bp);
9918         int i;
9919
9920         if (!netif_running(dev))
9921                 return 0;
9922
9923         if (!bp->port.pmf)
9924                 return 0;
9925
9926         if (data == 0)
9927                 data = 2;
9928
9929         for (i = 0; i < (data * 2); i++) {
9930                 if ((i % 2) == 0)
9931                         bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9932                                       bp->link_params.hw_led_mode,
9933                                       bp->link_params.chip_id);
9934                 else
9935                         bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9936                                       bp->link_params.hw_led_mode,
9937                                       bp->link_params.chip_id);
9938
9939                 msleep_interruptible(500);
9940                 if (signal_pending(current))
9941                         break;
9942         }
9943
9944         if (bp->link_vars.link_up)
9945                 bnx2x_set_led(bp, port, LED_MODE_OPER,
9946                               bp->link_vars.line_speed,
9947                               bp->link_params.hw_led_mode,
9948                               bp->link_params.chip_id);
9949
9950         return 0;
9951 }
9952
9953 static struct ethtool_ops bnx2x_ethtool_ops = {
9954         .get_settings           = bnx2x_get_settings,
9955         .set_settings           = bnx2x_set_settings,
9956         .get_drvinfo            = bnx2x_get_drvinfo,
9957         .get_wol                = bnx2x_get_wol,
9958         .set_wol                = bnx2x_set_wol,
9959         .get_msglevel           = bnx2x_get_msglevel,
9960         .set_msglevel           = bnx2x_set_msglevel,
9961         .nway_reset             = bnx2x_nway_reset,
9962         .get_link               = ethtool_op_get_link,
9963         .get_eeprom_len         = bnx2x_get_eeprom_len,
9964         .get_eeprom             = bnx2x_get_eeprom,
9965         .set_eeprom             = bnx2x_set_eeprom,
9966         .get_coalesce           = bnx2x_get_coalesce,
9967         .set_coalesce           = bnx2x_set_coalesce,
9968         .get_ringparam          = bnx2x_get_ringparam,
9969         .set_ringparam          = bnx2x_set_ringparam,
9970         .get_pauseparam         = bnx2x_get_pauseparam,
9971         .set_pauseparam         = bnx2x_set_pauseparam,
9972         .get_rx_csum            = bnx2x_get_rx_csum,
9973         .set_rx_csum            = bnx2x_set_rx_csum,
9974         .get_tx_csum            = ethtool_op_get_tx_csum,
9975         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
9976         .set_flags              = bnx2x_set_flags,
9977         .get_flags              = ethtool_op_get_flags,
9978         .get_sg                 = ethtool_op_get_sg,
9979         .set_sg                 = ethtool_op_set_sg,
9980         .get_tso                = ethtool_op_get_tso,
9981         .set_tso                = bnx2x_set_tso,
9982         .self_test_count        = bnx2x_self_test_count,
9983         .self_test              = bnx2x_self_test,
9984         .get_strings            = bnx2x_get_strings,
9985         .phys_id                = bnx2x_phys_id,
9986         .get_stats_count        = bnx2x_get_stats_count,
9987         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
9988 };
9989
9990 /* end of ethtool_ops */
9991
9992 /****************************************************************************
9993 * General service functions
9994 ****************************************************************************/
9995
9996 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9997 {
9998         u16 pmcsr;
9999
10000         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10001
10002         switch (state) {
10003         case PCI_D0:
10004                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10005                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10006                                        PCI_PM_CTRL_PME_STATUS));
10007
10008                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10009                         /* delay required during transition out of D3hot */
10010                         msleep(20);
10011                 break;
10012
10013         case PCI_D3hot:
10014                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10015                 pmcsr |= 3;
10016
10017                 if (bp->wol)
10018                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10019
10020                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10021                                       pmcsr);
10022
10023                 /* No more memory access after this point until
10024                 * device is brought back to D0.
10025                 */
10026                 break;
10027
10028         default:
10029                 return -EINVAL;
10030         }
10031         return 0;
10032 }
10033
10034 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10035 {
10036         u16 rx_cons_sb;
10037
10038         /* Tell compiler that status block fields can change */
10039         barrier();
10040         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10041         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10042                 rx_cons_sb++;
10043         return (fp->rx_comp_cons != rx_cons_sb);
10044 }
10045
10046 /*
10047  * net_device service functions
10048  */
10049
10050 static int bnx2x_poll(struct napi_struct *napi, int budget)
10051 {
10052         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10053                                                  napi);
10054         struct bnx2x *bp = fp->bp;
10055         int work_done = 0;
10056
10057 #ifdef BNX2X_STOP_ON_ERROR
10058         if (unlikely(bp->panic))
10059                 goto poll_panic;
10060 #endif
10061
10062         prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
10063         prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10064         prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10065
10066         bnx2x_update_fpsb_idx(fp);
10067
10068         if (bnx2x_has_tx_work(fp))
10069                 bnx2x_tx_int(fp, budget);
10070
10071         if (bnx2x_has_rx_work(fp))
10072                 work_done = bnx2x_rx_int(fp, budget);
10073
10074         rmb(); /* BNX2X_HAS_WORK() reads the status block */
10075
10076         /* must not complete if we consumed full budget */
10077         if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
10078
10079 #ifdef BNX2X_STOP_ON_ERROR
10080 poll_panic:
10081 #endif
10082                 napi_complete(napi);
10083
10084                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10085                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
10086                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10087                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10088         }
10089
10090         return work_done;
10091 }
10092
10093
10094 /* we split the first BD into headers and data BDs
10095  * to ease the pain of our fellow microcode engineers
10096  * we use one mapping for both BDs
10097  * So far this has only been observed to happen
10098  * in Other Operating Systems(TM)
10099  */
10100 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10101                                    struct bnx2x_fastpath *fp,
10102                                    struct eth_tx_bd **tx_bd, u16 hlen,
10103                                    u16 bd_prod, int nbd)
10104 {
10105         struct eth_tx_bd *h_tx_bd = *tx_bd;
10106         struct eth_tx_bd *d_tx_bd;
10107         dma_addr_t mapping;
10108         int old_len = le16_to_cpu(h_tx_bd->nbytes);
10109
10110         /* first fix first BD */
10111         h_tx_bd->nbd = cpu_to_le16(nbd);
10112         h_tx_bd->nbytes = cpu_to_le16(hlen);
10113
10114         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10115            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10116            h_tx_bd->addr_lo, h_tx_bd->nbd);
10117
10118         /* now get a new data BD
10119          * (after the pbd) and fill it */
10120         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10121         d_tx_bd = &fp->tx_desc_ring[bd_prod];
10122
10123         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10124                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10125
10126         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10127         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10128         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10129         d_tx_bd->vlan = 0;
10130         /* this marks the BD as one that has no individual mapping
10131          * the FW ignores this flag in a BD not marked start
10132          */
10133         d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10134         DP(NETIF_MSG_TX_QUEUED,
10135            "TSO split data size is %d (%x:%x)\n",
10136            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10137
10138         /* update tx_bd for marking the last BD flag */
10139         *tx_bd = d_tx_bd;
10140
10141         return bd_prod;
10142 }
10143
10144 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10145 {
10146         if (fix > 0)
10147                 csum = (u16) ~csum_fold(csum_sub(csum,
10148                                 csum_partial(t_header - fix, fix, 0)));
10149
10150         else if (fix < 0)
10151                 csum = (u16) ~csum_fold(csum_add(csum,
10152                                 csum_partial(t_header, -fix, 0)));
10153
10154         return swab16(csum);
10155 }
10156
10157 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10158 {
10159         u32 rc;
10160
10161         if (skb->ip_summed != CHECKSUM_PARTIAL)
10162                 rc = XMIT_PLAIN;
10163
10164         else {
10165                 if (skb->protocol == htons(ETH_P_IPV6)) {
10166                         rc = XMIT_CSUM_V6;
10167                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10168                                 rc |= XMIT_CSUM_TCP;
10169
10170                 } else {
10171                         rc = XMIT_CSUM_V4;
10172                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10173                                 rc |= XMIT_CSUM_TCP;
10174                 }
10175         }
10176
10177         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10178                 rc |= XMIT_GSO_V4;
10179
10180         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10181                 rc |= XMIT_GSO_V6;
10182
10183         return rc;
10184 }
10185
10186 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10187 /* check if packet requires linearization (packet is too fragmented)
10188    no need to check fragmentation if page size > 8K (there will be no
10189    violation to FW restrictions) */
10190 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10191                              u32 xmit_type)
10192 {
10193         int to_copy = 0;
10194         int hlen = 0;
10195         int first_bd_sz = 0;
10196
10197         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10198         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10199
10200                 if (xmit_type & XMIT_GSO) {
10201                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10202                         /* Check if LSO packet needs to be copied:
10203                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10204                         int wnd_size = MAX_FETCH_BD - 3;
10205                         /* Number of windows to check */
10206                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10207                         int wnd_idx = 0;
10208                         int frag_idx = 0;
10209                         u32 wnd_sum = 0;
10210
10211                         /* Headers length */
10212                         hlen = (int)(skb_transport_header(skb) - skb->data) +
10213                                 tcp_hdrlen(skb);
10214
10215                         /* Amount of data (w/o headers) on linear part of SKB*/
10216                         first_bd_sz = skb_headlen(skb) - hlen;
10217
10218                         wnd_sum  = first_bd_sz;
10219
10220                         /* Calculate the first sum - it's special */
10221                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10222                                 wnd_sum +=
10223                                         skb_shinfo(skb)->frags[frag_idx].size;
10224
10225                         /* If there was data on linear skb data - check it */
10226                         if (first_bd_sz > 0) {
10227                                 if (unlikely(wnd_sum < lso_mss)) {
10228                                         to_copy = 1;
10229                                         goto exit_lbl;
10230                                 }
10231
10232                                 wnd_sum -= first_bd_sz;
10233                         }
10234
10235                         /* Others are easier: run through the frag list and
10236                            check all windows */
10237                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10238                                 wnd_sum +=
10239                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10240
10241                                 if (unlikely(wnd_sum < lso_mss)) {
10242                                         to_copy = 1;
10243                                         break;
10244                                 }
10245                                 wnd_sum -=
10246                                         skb_shinfo(skb)->frags[wnd_idx].size;
10247                         }
10248                 } else {
10249                         /* in non-LSO too fragmented packet should always
10250                            be linearized */
10251                         to_copy = 1;
10252                 }
10253         }
10254
10255 exit_lbl:
10256         if (unlikely(to_copy))
10257                 DP(NETIF_MSG_TX_QUEUED,
10258                    "Linearization IS REQUIRED for %s packet. "
10259                    "num_frags %d  hlen %d  first_bd_sz %d\n",
10260                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10261                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10262
10263         return to_copy;
10264 }
10265 #endif
10266
10267 /* called with netif_tx_lock
10268  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
10269  * netif_wake_queue()
10270  */
10271 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10272 {
10273         struct bnx2x *bp = netdev_priv(dev);
10274         struct bnx2x_fastpath *fp;
10275         struct netdev_queue *txq;
10276         struct sw_tx_bd *tx_buf;
10277         struct eth_tx_bd *tx_bd;
10278         struct eth_tx_parse_bd *pbd = NULL;
10279         u16 pkt_prod, bd_prod;
10280         int nbd, fp_index;
10281         dma_addr_t mapping;
10282         u32 xmit_type = bnx2x_xmit_type(bp, skb);
10283         int vlan_off = (bp->e1hov ? 4 : 0);
10284         int i;
10285         u8 hlen = 0;
10286
10287 #ifdef BNX2X_STOP_ON_ERROR
10288         if (unlikely(bp->panic))
10289                 return NETDEV_TX_BUSY;
10290 #endif
10291
10292         fp_index = skb_get_queue_mapping(skb);
10293         txq = netdev_get_tx_queue(dev, fp_index);
10294
10295         fp = &bp->fp[fp_index];
10296
10297         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
10298                 fp->eth_q_stats.driver_xoff++,
10299                 netif_tx_stop_queue(txq);
10300                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10301                 return NETDEV_TX_BUSY;
10302         }
10303
10304         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
10305            "  gso type %x  xmit_type %x\n",
10306            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10307            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10308
10309 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10310         /* First, check if we need to linearize the skb (due to FW
10311            restrictions). No need to check fragmentation if page size > 8K
10312            (there will be no violation to FW restrictions) */
10313         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10314                 /* Statistics of linearization */
10315                 bp->lin_cnt++;
10316                 if (skb_linearize(skb) != 0) {
10317                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10318                            "silently dropping this SKB\n");
10319                         dev_kfree_skb_any(skb);
10320                         return NETDEV_TX_OK;
10321                 }
10322         }
10323 #endif
10324
10325         /*
10326         Please read carefully. First we use one BD which we mark as start,
10327         then for TSO or xsum we have a parsing info BD,
10328         and only then we have the rest of the TSO BDs.
10329         (don't forget to mark the last one as last,
10330         and to unmap only AFTER you write to the BD ...)
10331         And above all, all pdb sizes are in words - NOT DWORDS!
10332         */
10333
10334         pkt_prod = fp->tx_pkt_prod++;
10335         bd_prod = TX_BD(fp->tx_bd_prod);
10336
10337         /* get a tx_buf and first BD */
10338         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10339         tx_bd = &fp->tx_desc_ring[bd_prod];
10340
10341         tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10342         tx_bd->general_data = (UNICAST_ADDRESS <<
10343                                ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
10344         /* header nbd */
10345         tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
10346
10347         /* remember the first BD of the packet */
10348         tx_buf->first_bd = fp->tx_bd_prod;
10349         tx_buf->skb = skb;
10350
10351         DP(NETIF_MSG_TX_QUEUED,
10352            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
10353            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10354
10355 #ifdef BCM_VLAN
10356         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10357             (bp->flags & HW_VLAN_TX_FLAG)) {
10358                 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10359                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10360                 vlan_off += 4;
10361         } else
10362 #endif
10363                 tx_bd->vlan = cpu_to_le16(pkt_prod);
10364
10365         if (xmit_type) {
10366                 /* turn on parsing and get a BD */
10367                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10368                 pbd = (void *)&fp->tx_desc_ring[bd_prod];
10369
10370                 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10371         }
10372
10373         if (xmit_type & XMIT_CSUM) {
10374                 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
10375
10376                 /* for now NS flag is not used in Linux */
10377                 pbd->global_data =
10378                         (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10379                                  ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
10380
10381                 pbd->ip_hlen = (skb_transport_header(skb) -
10382                                 skb_network_header(skb)) / 2;
10383
10384                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
10385
10386                 pbd->total_hlen = cpu_to_le16(hlen);
10387                 hlen = hlen*2 - vlan_off;
10388
10389                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10390
10391                 if (xmit_type & XMIT_CSUM_V4)
10392                         tx_bd->bd_flags.as_bitfield |=
10393                                                 ETH_TX_BD_FLAGS_IP_CSUM;
10394                 else
10395                         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10396
10397                 if (xmit_type & XMIT_CSUM_TCP) {
10398                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10399
10400                 } else {
10401                         s8 fix = SKB_CS_OFF(skb); /* signed! */
10402
10403                         pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
10404                         pbd->cs_offset = fix / 2;
10405
10406                         DP(NETIF_MSG_TX_QUEUED,
10407                            "hlen %d  offset %d  fix %d  csum before fix %x\n",
10408                            le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10409                            SKB_CS(skb));
10410
10411                         /* HW bug: fixup the CSUM */
10412                         pbd->tcp_pseudo_csum =
10413                                 bnx2x_csum_fix(skb_transport_header(skb),
10414                                                SKB_CS(skb), fix);
10415
10416                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10417                            pbd->tcp_pseudo_csum);
10418                 }
10419         }
10420
10421         mapping = pci_map_single(bp->pdev, skb->data,
10422                                  skb_headlen(skb), PCI_DMA_TODEVICE);
10423
10424         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10425         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10426         nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
10427         tx_bd->nbd = cpu_to_le16(nbd);
10428         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10429
10430         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
10431            "  nbytes %d  flags %x  vlan %x\n",
10432            tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10433            le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10434            le16_to_cpu(tx_bd->vlan));
10435
10436         if (xmit_type & XMIT_GSO) {
10437
10438                 DP(NETIF_MSG_TX_QUEUED,
10439                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
10440                    skb->len, hlen, skb_headlen(skb),
10441                    skb_shinfo(skb)->gso_size);
10442
10443                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10444
10445                 if (unlikely(skb_headlen(skb) > hlen))
10446                         bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10447                                                  bd_prod, ++nbd);
10448
10449                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10450                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
10451                 pbd->tcp_flags = pbd_tcp_flags(skb);
10452
10453                 if (xmit_type & XMIT_GSO_V4) {
10454                         pbd->ip_id = swab16(ip_hdr(skb)->id);
10455                         pbd->tcp_pseudo_csum =
10456                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10457                                                           ip_hdr(skb)->daddr,
10458                                                           0, IPPROTO_TCP, 0));
10459
10460                 } else
10461                         pbd->tcp_pseudo_csum =
10462                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10463                                                         &ipv6_hdr(skb)->daddr,
10464                                                         0, IPPROTO_TCP, 0));
10465
10466                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10467         }
10468
10469         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10470                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
10471
10472                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10473                 tx_bd = &fp->tx_desc_ring[bd_prod];
10474
10475                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10476                                        frag->size, PCI_DMA_TODEVICE);
10477
10478                 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10479                 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10480                 tx_bd->nbytes = cpu_to_le16(frag->size);
10481                 tx_bd->vlan = cpu_to_le16(pkt_prod);
10482                 tx_bd->bd_flags.as_bitfield = 0;
10483
10484                 DP(NETIF_MSG_TX_QUEUED,
10485                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d  flags %x\n",
10486                    i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10487                    le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
10488         }
10489
10490         /* now at last mark the BD as the last BD */
10491         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10492
10493         DP(NETIF_MSG_TX_QUEUED, "last bd @%p  flags %x\n",
10494            tx_bd, tx_bd->bd_flags.as_bitfield);
10495
10496         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10497
10498         /* now send a tx doorbell, counting the next BD
10499          * if the packet contains or ends with it
10500          */
10501         if (TX_BD_POFF(bd_prod) < nbd)
10502                 nbd++;
10503
10504         if (pbd)
10505                 DP(NETIF_MSG_TX_QUEUED,
10506                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
10507                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
10508                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10509                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
10510                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
10511
10512         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
10513
10514         /*
10515          * Make sure that the BD data is updated before updating the producer
10516          * since FW might read the BD right after the producer is updated.
10517          * This is only applicable for weak-ordered memory model archs such
10518          * as IA-64. The following barrier is also mandatory since FW will
10519          * assumes packets must have BDs.
10520          */
10521         wmb();
10522
10523         le16_add_cpu(&fp->hw_tx_prods->bds_prod, nbd);
10524         mb(); /* FW restriction: must not reorder writing nbd and packets */
10525         le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
10526         DOORBELL(bp, fp->index, 0);
10527
10528         mmiowb();
10529
10530         fp->tx_bd_prod += nbd;
10531         dev->trans_start = jiffies;
10532
10533         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
10534                 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10535                    if we put Tx into XOFF state. */
10536                 smp_mb();
10537                 netif_tx_stop_queue(txq);
10538                 fp->eth_q_stats.driver_xoff++;
10539                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
10540                         netif_tx_wake_queue(txq);
10541         }
10542         fp->tx_pkt++;
10543
10544         return NETDEV_TX_OK;
10545 }
10546
10547 /* called with rtnl_lock */
10548 static int bnx2x_open(struct net_device *dev)
10549 {
10550         struct bnx2x *bp = netdev_priv(dev);
10551
10552         netif_carrier_off(dev);
10553
10554         bnx2x_set_power_state(bp, PCI_D0);
10555
10556         return bnx2x_nic_load(bp, LOAD_OPEN);
10557 }
10558
10559 /* called with rtnl_lock */
10560 static int bnx2x_close(struct net_device *dev)
10561 {
10562         struct bnx2x *bp = netdev_priv(dev);
10563
10564         /* Unload the driver, release IRQs */
10565         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10566         if (atomic_read(&bp->pdev->enable_cnt) == 1)
10567                 if (!CHIP_REV_IS_SLOW(bp))
10568                         bnx2x_set_power_state(bp, PCI_D3hot);
10569
10570         return 0;
10571 }
10572
10573 /* called with netif_tx_lock from dev_mcast.c */
10574 static void bnx2x_set_rx_mode(struct net_device *dev)
10575 {
10576         struct bnx2x *bp = netdev_priv(dev);
10577         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10578         int port = BP_PORT(bp);
10579
10580         if (bp->state != BNX2X_STATE_OPEN) {
10581                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10582                 return;
10583         }
10584
10585         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10586
10587         if (dev->flags & IFF_PROMISC)
10588                 rx_mode = BNX2X_RX_MODE_PROMISC;
10589
10590         else if ((dev->flags & IFF_ALLMULTI) ||
10591                  ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10592                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10593
10594         else { /* some multicasts */
10595                 if (CHIP_IS_E1(bp)) {
10596                         int i, old, offset;
10597                         struct dev_mc_list *mclist;
10598                         struct mac_configuration_cmd *config =
10599                                                 bnx2x_sp(bp, mcast_config);
10600
10601                         for (i = 0, mclist = dev->mc_list;
10602                              mclist && (i < dev->mc_count);
10603                              i++, mclist = mclist->next) {
10604
10605                                 config->config_table[i].
10606                                         cam_entry.msb_mac_addr =
10607                                         swab16(*(u16 *)&mclist->dmi_addr[0]);
10608                                 config->config_table[i].
10609                                         cam_entry.middle_mac_addr =
10610                                         swab16(*(u16 *)&mclist->dmi_addr[2]);
10611                                 config->config_table[i].
10612                                         cam_entry.lsb_mac_addr =
10613                                         swab16(*(u16 *)&mclist->dmi_addr[4]);
10614                                 config->config_table[i].cam_entry.flags =
10615                                                         cpu_to_le16(port);
10616                                 config->config_table[i].
10617                                         target_table_entry.flags = 0;
10618                                 config->config_table[i].
10619                                         target_table_entry.client_id = 0;
10620                                 config->config_table[i].
10621                                         target_table_entry.vlan_id = 0;
10622
10623                                 DP(NETIF_MSG_IFUP,
10624                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10625                                    config->config_table[i].
10626                                                 cam_entry.msb_mac_addr,
10627                                    config->config_table[i].
10628                                                 cam_entry.middle_mac_addr,
10629                                    config->config_table[i].
10630                                                 cam_entry.lsb_mac_addr);
10631                         }
10632                         old = config->hdr.length;
10633                         if (old > i) {
10634                                 for (; i < old; i++) {
10635                                         if (CAM_IS_INVALID(config->
10636                                                            config_table[i])) {
10637                                                 /* already invalidated */
10638                                                 break;
10639                                         }
10640                                         /* invalidate */
10641                                         CAM_INVALIDATE(config->
10642                                                        config_table[i]);
10643                                 }
10644                         }
10645
10646                         if (CHIP_REV_IS_SLOW(bp))
10647                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10648                         else
10649                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
10650
10651                         config->hdr.length = i;
10652                         config->hdr.offset = offset;
10653                         config->hdr.client_id = bp->fp->cl_id;
10654                         config->hdr.reserved1 = 0;
10655
10656                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10657                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10658                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10659                                       0);
10660                 } else { /* E1H */
10661                         /* Accept one or more multicasts */
10662                         struct dev_mc_list *mclist;
10663                         u32 mc_filter[MC_HASH_SIZE];
10664                         u32 crc, bit, regidx;
10665                         int i;
10666
10667                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10668
10669                         for (i = 0, mclist = dev->mc_list;
10670                              mclist && (i < dev->mc_count);
10671                              i++, mclist = mclist->next) {
10672
10673                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10674                                    mclist->dmi_addr);
10675
10676                                 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10677                                 bit = (crc >> 24) & 0xff;
10678                                 regidx = bit >> 5;
10679                                 bit &= 0x1f;
10680                                 mc_filter[regidx] |= (1 << bit);
10681                         }
10682
10683                         for (i = 0; i < MC_HASH_SIZE; i++)
10684                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10685                                        mc_filter[i]);
10686                 }
10687         }
10688
10689         bp->rx_mode = rx_mode;
10690         bnx2x_set_storm_rx_mode(bp);
10691 }
10692
10693 /* called with rtnl_lock */
10694 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10695 {
10696         struct sockaddr *addr = p;
10697         struct bnx2x *bp = netdev_priv(dev);
10698
10699         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
10700                 return -EINVAL;
10701
10702         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
10703         if (netif_running(dev)) {
10704                 if (CHIP_IS_E1(bp))
10705                         bnx2x_set_mac_addr_e1(bp, 1);
10706                 else
10707                         bnx2x_set_mac_addr_e1h(bp, 1);
10708         }
10709
10710         return 0;
10711 }
10712
10713 /* called with rtnl_lock */
10714 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10715 {
10716         struct mii_ioctl_data *data = if_mii(ifr);
10717         struct bnx2x *bp = netdev_priv(dev);
10718         int port = BP_PORT(bp);
10719         int err;
10720
10721         switch (cmd) {
10722         case SIOCGMIIPHY:
10723                 data->phy_id = bp->port.phy_addr;
10724
10725                 /* fallthrough */
10726
10727         case SIOCGMIIREG: {
10728                 u16 mii_regval;
10729
10730                 if (!netif_running(dev))
10731                         return -EAGAIN;
10732
10733                 mutex_lock(&bp->port.phy_mutex);
10734                 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
10735                                       DEFAULT_PHY_DEV_ADDR,
10736                                       (data->reg_num & 0x1f), &mii_regval);
10737                 data->val_out = mii_regval;
10738                 mutex_unlock(&bp->port.phy_mutex);
10739                 return err;
10740         }
10741
10742         case SIOCSMIIREG:
10743                 if (!capable(CAP_NET_ADMIN))
10744                         return -EPERM;
10745
10746                 if (!netif_running(dev))
10747                         return -EAGAIN;
10748
10749                 mutex_lock(&bp->port.phy_mutex);
10750                 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
10751                                        DEFAULT_PHY_DEV_ADDR,
10752                                        (data->reg_num & 0x1f), data->val_in);
10753                 mutex_unlock(&bp->port.phy_mutex);
10754                 return err;
10755
10756         default:
10757                 /* do nothing */
10758                 break;
10759         }
10760
10761         return -EOPNOTSUPP;
10762 }
10763
10764 /* called with rtnl_lock */
10765 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10766 {
10767         struct bnx2x *bp = netdev_priv(dev);
10768         int rc = 0;
10769
10770         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10771             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10772                 return -EINVAL;
10773
10774         /* This does not race with packet allocation
10775          * because the actual alloc size is
10776          * only updated as part of load
10777          */
10778         dev->mtu = new_mtu;
10779
10780         if (netif_running(dev)) {
10781                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10782                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10783         }
10784
10785         return rc;
10786 }
10787
10788 static void bnx2x_tx_timeout(struct net_device *dev)
10789 {
10790         struct bnx2x *bp = netdev_priv(dev);
10791
10792 #ifdef BNX2X_STOP_ON_ERROR
10793         if (!bp->panic)
10794                 bnx2x_panic();
10795 #endif
10796         /* This allows the netif to be shutdown gracefully before resetting */
10797         schedule_work(&bp->reset_task);
10798 }
10799
10800 #ifdef BCM_VLAN
10801 /* called with rtnl_lock */
10802 static void bnx2x_vlan_rx_register(struct net_device *dev,
10803                                    struct vlan_group *vlgrp)
10804 {
10805         struct bnx2x *bp = netdev_priv(dev);
10806
10807         bp->vlgrp = vlgrp;
10808
10809         /* Set flags according to the required capabilities */
10810         bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10811
10812         if (dev->features & NETIF_F_HW_VLAN_TX)
10813                 bp->flags |= HW_VLAN_TX_FLAG;
10814
10815         if (dev->features & NETIF_F_HW_VLAN_RX)
10816                 bp->flags |= HW_VLAN_RX_FLAG;
10817
10818         if (netif_running(dev))
10819                 bnx2x_set_client_config(bp);
10820 }
10821
10822 #endif
10823
10824 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10825 static void poll_bnx2x(struct net_device *dev)
10826 {
10827         struct bnx2x *bp = netdev_priv(dev);
10828
10829         disable_irq(bp->pdev->irq);
10830         bnx2x_interrupt(bp->pdev->irq, dev);
10831         enable_irq(bp->pdev->irq);
10832 }
10833 #endif
10834
10835 static const struct net_device_ops bnx2x_netdev_ops = {
10836         .ndo_open               = bnx2x_open,
10837         .ndo_stop               = bnx2x_close,
10838         .ndo_start_xmit         = bnx2x_start_xmit,
10839         .ndo_set_multicast_list = bnx2x_set_rx_mode,
10840         .ndo_set_mac_address    = bnx2x_change_mac_addr,
10841         .ndo_validate_addr      = eth_validate_addr,
10842         .ndo_do_ioctl           = bnx2x_ioctl,
10843         .ndo_change_mtu         = bnx2x_change_mtu,
10844         .ndo_tx_timeout         = bnx2x_tx_timeout,
10845 #ifdef BCM_VLAN
10846         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
10847 #endif
10848 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10849         .ndo_poll_controller    = poll_bnx2x,
10850 #endif
10851 };
10852
10853 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10854                                     struct net_device *dev)
10855 {
10856         struct bnx2x *bp;
10857         int rc;
10858
10859         SET_NETDEV_DEV(dev, &pdev->dev);
10860         bp = netdev_priv(dev);
10861
10862         bp->dev = dev;
10863         bp->pdev = pdev;
10864         bp->flags = 0;
10865         bp->func = PCI_FUNC(pdev->devfn);
10866
10867         rc = pci_enable_device(pdev);
10868         if (rc) {
10869                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10870                 goto err_out;
10871         }
10872
10873         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10874                 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10875                        " aborting\n");
10876                 rc = -ENODEV;
10877                 goto err_out_disable;
10878         }
10879
10880         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10881                 printk(KERN_ERR PFX "Cannot find second PCI device"
10882                        " base address, aborting\n");
10883                 rc = -ENODEV;
10884                 goto err_out_disable;
10885         }
10886
10887         if (atomic_read(&pdev->enable_cnt) == 1) {
10888                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10889                 if (rc) {
10890                         printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10891                                " aborting\n");
10892                         goto err_out_disable;
10893                 }
10894
10895                 pci_set_master(pdev);
10896                 pci_save_state(pdev);
10897         }
10898
10899         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10900         if (bp->pm_cap == 0) {
10901                 printk(KERN_ERR PFX "Cannot find power management"
10902                        " capability, aborting\n");
10903                 rc = -EIO;
10904                 goto err_out_release;
10905         }
10906
10907         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10908         if (bp->pcie_cap == 0) {
10909                 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10910                        " aborting\n");
10911                 rc = -EIO;
10912                 goto err_out_release;
10913         }
10914
10915         if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10916                 bp->flags |= USING_DAC_FLAG;
10917                 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10918                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10919                                " failed, aborting\n");
10920                         rc = -EIO;
10921                         goto err_out_release;
10922                 }
10923
10924         } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10925                 printk(KERN_ERR PFX "System does not support DMA,"
10926                        " aborting\n");
10927                 rc = -EIO;
10928                 goto err_out_release;
10929         }
10930
10931         dev->mem_start = pci_resource_start(pdev, 0);
10932         dev->base_addr = dev->mem_start;
10933         dev->mem_end = pci_resource_end(pdev, 0);
10934
10935         dev->irq = pdev->irq;
10936
10937         bp->regview = pci_ioremap_bar(pdev, 0);
10938         if (!bp->regview) {
10939                 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10940                 rc = -ENOMEM;
10941                 goto err_out_release;
10942         }
10943
10944         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10945                                         min_t(u64, BNX2X_DB_SIZE,
10946                                               pci_resource_len(pdev, 2)));
10947         if (!bp->doorbells) {
10948                 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10949                 rc = -ENOMEM;
10950                 goto err_out_unmap;
10951         }
10952
10953         bnx2x_set_power_state(bp, PCI_D0);
10954
10955         /* clean indirect addresses */
10956         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10957                                PCICFG_VENDOR_ID_OFFSET);
10958         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10959         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10960         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10961         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10962
10963         dev->watchdog_timeo = TX_TIMEOUT;
10964
10965         dev->netdev_ops = &bnx2x_netdev_ops;
10966         dev->ethtool_ops = &bnx2x_ethtool_ops;
10967         dev->features |= NETIF_F_SG;
10968         dev->features |= NETIF_F_HW_CSUM;
10969         if (bp->flags & USING_DAC_FLAG)
10970                 dev->features |= NETIF_F_HIGHDMA;
10971 #ifdef BCM_VLAN
10972         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10973         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10974 #endif
10975         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10976         dev->features |= NETIF_F_TSO6;
10977
10978         return 0;
10979
10980 err_out_unmap:
10981         if (bp->regview) {
10982                 iounmap(bp->regview);
10983                 bp->regview = NULL;
10984         }
10985         if (bp->doorbells) {
10986                 iounmap(bp->doorbells);
10987                 bp->doorbells = NULL;
10988         }
10989
10990 err_out_release:
10991         if (atomic_read(&pdev->enable_cnt) == 1)
10992                 pci_release_regions(pdev);
10993
10994 err_out_disable:
10995         pci_disable_device(pdev);
10996         pci_set_drvdata(pdev, NULL);
10997
10998 err_out:
10999         return rc;
11000 }
11001
11002 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
11003 {
11004         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11005
11006         val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11007         return val;
11008 }
11009
11010 /* return value of 1=2.5GHz 2=5GHz */
11011 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11012 {
11013         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11014
11015         val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11016         return val;
11017 }
11018
11019 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11020                                     const struct pci_device_id *ent)
11021 {
11022         static int version_printed;
11023         struct net_device *dev = NULL;
11024         struct bnx2x *bp;
11025         int rc;
11026
11027         if (version_printed++ == 0)
11028                 printk(KERN_INFO "%s", version);
11029
11030         /* dev zeroed in init_etherdev */
11031         dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
11032         if (!dev) {
11033                 printk(KERN_ERR PFX "Cannot allocate net device\n");
11034                 return -ENOMEM;
11035         }
11036
11037         bp = netdev_priv(dev);
11038         bp->msglevel = debug;
11039
11040         rc = bnx2x_init_dev(pdev, dev);
11041         if (rc < 0) {
11042                 free_netdev(dev);
11043                 return rc;
11044         }
11045
11046         pci_set_drvdata(pdev, dev);
11047
11048         rc = bnx2x_init_bp(bp);
11049         if (rc)
11050                 goto init_one_exit;
11051
11052         rc = register_netdev(dev);
11053         if (rc) {
11054                 dev_err(&pdev->dev, "Cannot register net device\n");
11055                 goto init_one_exit;
11056         }
11057
11058         printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
11059                " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
11060                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
11061                bnx2x_get_pcie_width(bp),
11062                (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11063                dev->base_addr, bp->pdev->irq);
11064         printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
11065         return 0;
11066
11067 init_one_exit:
11068         if (bp->regview)
11069                 iounmap(bp->regview);
11070
11071         if (bp->doorbells)
11072                 iounmap(bp->doorbells);
11073
11074         free_netdev(dev);
11075
11076         if (atomic_read(&pdev->enable_cnt) == 1)
11077                 pci_release_regions(pdev);
11078
11079         pci_disable_device(pdev);
11080         pci_set_drvdata(pdev, NULL);
11081
11082         return rc;
11083 }
11084
11085 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11086 {
11087         struct net_device *dev = pci_get_drvdata(pdev);
11088         struct bnx2x *bp;
11089
11090         if (!dev) {
11091                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11092                 return;
11093         }
11094         bp = netdev_priv(dev);
11095
11096         unregister_netdev(dev);
11097
11098         if (bp->regview)
11099                 iounmap(bp->regview);
11100
11101         if (bp->doorbells)
11102                 iounmap(bp->doorbells);
11103
11104         free_netdev(dev);
11105
11106         if (atomic_read(&pdev->enable_cnt) == 1)
11107                 pci_release_regions(pdev);
11108
11109         pci_disable_device(pdev);
11110         pci_set_drvdata(pdev, NULL);
11111 }
11112
11113 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11114 {
11115         struct net_device *dev = pci_get_drvdata(pdev);
11116         struct bnx2x *bp;
11117
11118         if (!dev) {
11119                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11120                 return -ENODEV;
11121         }
11122         bp = netdev_priv(dev);
11123
11124         rtnl_lock();
11125
11126         pci_save_state(pdev);
11127
11128         if (!netif_running(dev)) {
11129                 rtnl_unlock();
11130                 return 0;
11131         }
11132
11133         netif_device_detach(dev);
11134
11135         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11136
11137         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
11138
11139         rtnl_unlock();
11140
11141         return 0;
11142 }
11143
11144 static int bnx2x_resume(struct pci_dev *pdev)
11145 {
11146         struct net_device *dev = pci_get_drvdata(pdev);
11147         struct bnx2x *bp;
11148         int rc;
11149
11150         if (!dev) {
11151                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11152                 return -ENODEV;
11153         }
11154         bp = netdev_priv(dev);
11155
11156         rtnl_lock();
11157
11158         pci_restore_state(pdev);
11159
11160         if (!netif_running(dev)) {
11161                 rtnl_unlock();
11162                 return 0;
11163         }
11164
11165         bnx2x_set_power_state(bp, PCI_D0);
11166         netif_device_attach(dev);
11167
11168         rc = bnx2x_nic_load(bp, LOAD_OPEN);
11169
11170         rtnl_unlock();
11171
11172         return rc;
11173 }
11174
11175 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11176 {
11177         int i;
11178
11179         bp->state = BNX2X_STATE_ERROR;
11180
11181         bp->rx_mode = BNX2X_RX_MODE_NONE;
11182
11183         bnx2x_netif_stop(bp, 0);
11184
11185         del_timer_sync(&bp->timer);
11186         bp->stats_state = STATS_STATE_DISABLED;
11187         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11188
11189         /* Release IRQs */
11190         bnx2x_free_irq(bp);
11191
11192         if (CHIP_IS_E1(bp)) {
11193                 struct mac_configuration_cmd *config =
11194                                                 bnx2x_sp(bp, mcast_config);
11195
11196                 for (i = 0; i < config->hdr.length; i++)
11197                         CAM_INVALIDATE(config->config_table[i]);
11198         }
11199
11200         /* Free SKBs, SGEs, TPA pool and driver internals */
11201         bnx2x_free_skbs(bp);
11202         for_each_rx_queue(bp, i)
11203                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
11204         for_each_rx_queue(bp, i)
11205                 netif_napi_del(&bnx2x_fp(bp, i, napi));
11206         bnx2x_free_mem(bp);
11207
11208         bp->state = BNX2X_STATE_CLOSED;
11209
11210         netif_carrier_off(bp->dev);
11211
11212         return 0;
11213 }
11214
11215 static void bnx2x_eeh_recover(struct bnx2x *bp)
11216 {
11217         u32 val;
11218
11219         mutex_init(&bp->port.phy_mutex);
11220
11221         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11222         bp->link_params.shmem_base = bp->common.shmem_base;
11223         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11224
11225         if (!bp->common.shmem_base ||
11226             (bp->common.shmem_base < 0xA0000) ||
11227             (bp->common.shmem_base >= 0xC0000)) {
11228                 BNX2X_DEV_INFO("MCP not active\n");
11229                 bp->flags |= NO_MCP_FLAG;
11230                 return;
11231         }
11232
11233         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11234         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11235                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11236                 BNX2X_ERR("BAD MCP validity signature\n");
11237
11238         if (!BP_NOMCP(bp)) {
11239                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11240                               & DRV_MSG_SEQ_NUMBER_MASK);
11241                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11242         }
11243 }
11244
11245 /**
11246  * bnx2x_io_error_detected - called when PCI error is detected
11247  * @pdev: Pointer to PCI device
11248  * @state: The current pci connection state
11249  *
11250  * This function is called after a PCI bus error affecting
11251  * this device has been detected.
11252  */
11253 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11254                                                 pci_channel_state_t state)
11255 {
11256         struct net_device *dev = pci_get_drvdata(pdev);
11257         struct bnx2x *bp = netdev_priv(dev);
11258
11259         rtnl_lock();
11260
11261         netif_device_detach(dev);
11262
11263         if (netif_running(dev))
11264                 bnx2x_eeh_nic_unload(bp);
11265
11266         pci_disable_device(pdev);
11267
11268         rtnl_unlock();
11269
11270         /* Request a slot reset */
11271         return PCI_ERS_RESULT_NEED_RESET;
11272 }
11273
11274 /**
11275  * bnx2x_io_slot_reset - called after the PCI bus has been reset
11276  * @pdev: Pointer to PCI device
11277  *
11278  * Restart the card from scratch, as if from a cold-boot.
11279  */
11280 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11281 {
11282         struct net_device *dev = pci_get_drvdata(pdev);
11283         struct bnx2x *bp = netdev_priv(dev);
11284
11285         rtnl_lock();
11286
11287         if (pci_enable_device(pdev)) {
11288                 dev_err(&pdev->dev,
11289                         "Cannot re-enable PCI device after reset\n");
11290                 rtnl_unlock();
11291                 return PCI_ERS_RESULT_DISCONNECT;
11292         }
11293
11294         pci_set_master(pdev);
11295         pci_restore_state(pdev);
11296
11297         if (netif_running(dev))
11298                 bnx2x_set_power_state(bp, PCI_D0);
11299
11300         rtnl_unlock();
11301
11302         return PCI_ERS_RESULT_RECOVERED;
11303 }
11304
11305 /**
11306  * bnx2x_io_resume - called when traffic can start flowing again
11307  * @pdev: Pointer to PCI device
11308  *
11309  * This callback is called when the error recovery driver tells us that
11310  * its OK to resume normal operation.
11311  */
11312 static void bnx2x_io_resume(struct pci_dev *pdev)
11313 {
11314         struct net_device *dev = pci_get_drvdata(pdev);
11315         struct bnx2x *bp = netdev_priv(dev);
11316
11317         rtnl_lock();
11318
11319         bnx2x_eeh_recover(bp);
11320
11321         if (netif_running(dev))
11322                 bnx2x_nic_load(bp, LOAD_NORMAL);
11323
11324         netif_device_attach(dev);
11325
11326         rtnl_unlock();
11327 }
11328
11329 static struct pci_error_handlers bnx2x_err_handler = {
11330         .error_detected = bnx2x_io_error_detected,
11331         .slot_reset     = bnx2x_io_slot_reset,
11332         .resume         = bnx2x_io_resume,
11333 };
11334
11335 static struct pci_driver bnx2x_pci_driver = {
11336         .name        = DRV_MODULE_NAME,
11337         .id_table    = bnx2x_pci_tbl,
11338         .probe       = bnx2x_init_one,
11339         .remove      = __devexit_p(bnx2x_remove_one),
11340         .suspend     = bnx2x_suspend,
11341         .resume      = bnx2x_resume,
11342         .err_handler = &bnx2x_err_handler,
11343 };
11344
11345 static int __init bnx2x_init(void)
11346 {
11347         bnx2x_wq = create_singlethread_workqueue("bnx2x");
11348         if (bnx2x_wq == NULL) {
11349                 printk(KERN_ERR PFX "Cannot create workqueue\n");
11350                 return -ENOMEM;
11351         }
11352
11353         return pci_register_driver(&bnx2x_pci_driver);
11354 }
11355
11356 static void __exit bnx2x_cleanup(void)
11357 {
11358         pci_unregister_driver(&bnx2x_pci_driver);
11359
11360         destroy_workqueue(bnx2x_wq);
11361 }
11362
11363 module_init(bnx2x_init);
11364 module_exit(bnx2x_cleanup);
11365