bnx2x: Spelling mistakes
[safe/jmp/linux-2.6] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2008 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #ifdef NETIF_F_HW_VLAN_TX
42         #include <linux/if_vlan.h>
43 #endif
44 #include <net/ip.h>
45 #include <net/tcp.h>
46 #include <net/checksum.h>
47 #include <linux/version.h>
48 #include <net/ip6_checksum.h>
49 #include <linux/workqueue.h>
50 #include <linux/crc32.h>
51 #include <linux/crc32c.h>
52 #include <linux/prefetch.h>
53 #include <linux/zlib.h>
54 #include <linux/io.h>
55
56 #include "bnx2x_reg.h"
57 #include "bnx2x_fw_defs.h"
58 #include "bnx2x_hsi.h"
59 #include "bnx2x_link.h"
60 #include "bnx2x.h"
61 #include "bnx2x_init.h"
62
63 #define DRV_MODULE_VERSION      "1.45.6"
64 #define DRV_MODULE_RELDATE      "2008/06/23"
65 #define BNX2X_BC_VER            0x040200
66
67 /* Time in jiffies before concluding the transmitter is hung */
68 #define TX_TIMEOUT              (5*HZ)
69
70 static char version[] __devinitdata =
71         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
72         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
73
74 MODULE_AUTHOR("Eliezer Tamir");
75 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
76 MODULE_LICENSE("GPL");
77 MODULE_VERSION(DRV_MODULE_VERSION);
78
79 static int disable_tpa;
80 static int use_inta;
81 static int poll;
82 static int debug;
83 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
84 static int use_multi;
85
86 module_param(disable_tpa, int, 0);
87 module_param(use_inta, int, 0);
88 module_param(poll, int, 0);
89 module_param(debug, int, 0);
90 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
91 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
92 MODULE_PARM_DESC(poll, "use polling (for debug)");
93 MODULE_PARM_DESC(debug, "default debug msglevel");
94
95 #ifdef BNX2X_MULTI
96 module_param(use_multi, int, 0);
97 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
98 #endif
99
100 enum bnx2x_board_type {
101         BCM57710 = 0,
102         BCM57711 = 1,
103         BCM57711E = 2,
104 };
105
106 /* indexed by board_type, above */
107 static struct {
108         char *name;
109 } board_info[] __devinitdata = {
110         { "Broadcom NetXtreme II BCM57710 XGb" },
111         { "Broadcom NetXtreme II BCM57711 XGb" },
112         { "Broadcom NetXtreme II BCM57711E XGb" }
113 };
114
115
116 static const struct pci_device_id bnx2x_pci_tbl[] = {
117         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
119         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
123         { 0 }
124 };
125
126 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
127
128 /****************************************************************************
129 * General service functions
130 ****************************************************************************/
131
132 /* used only at init
133  * locking is done by mcp
134  */
135 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
136 {
137         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140                                PCICFG_VENDOR_ID_OFFSET);
141 }
142
143 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
144 {
145         u32 val;
146
147         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150                                PCICFG_VENDOR_ID_OFFSET);
151
152         return val;
153 }
154
155 static const u32 dmae_reg_go_c[] = {
156         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160 };
161
162 /* copy command into DMAE command memory and set DMAE command go */
163 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
164                             int idx)
165 {
166         u32 cmd_offset;
167         int i;
168
169         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
172
173                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
175         }
176         REG_WR(bp, dmae_reg_go_c[idx], 1);
177 }
178
179 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
180                       u32 len32)
181 {
182         struct dmae_command *dmae = &bp->init_dmae;
183         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
184         int cnt = 200;
185
186         if (!bp->dmae_ready) {
187                 u32 *data = bnx2x_sp(bp, wb_data[0]);
188
189                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
190                    "  using indirect\n", dst_addr, len32);
191                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
192                 return;
193         }
194
195         mutex_lock(&bp->dmae_mutex);
196
197         memset(dmae, 0, sizeof(struct dmae_command));
198
199         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
202 #ifdef __BIG_ENDIAN
203                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
204 #else
205                         DMAE_CMD_ENDIANITY_DW_SWAP |
206 #endif
207                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
209         dmae->src_addr_lo = U64_LO(dma_addr);
210         dmae->src_addr_hi = U64_HI(dma_addr);
211         dmae->dst_addr_lo = dst_addr >> 2;
212         dmae->dst_addr_hi = 0;
213         dmae->len = len32;
214         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
216         dmae->comp_val = DMAE_COMP_VAL;
217
218         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
219            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
220                     "dst_addr [%x:%08x (%08x)]\n"
221            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
222            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
225         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
226            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
228
229         *wb_comp = 0;
230
231         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
232
233         udelay(5);
234
235         while (*wb_comp != DMAE_COMP_VAL) {
236                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237
238                 if (!cnt) {
239                         BNX2X_ERR("dmae timeout!\n");
240                         break;
241                 }
242                 cnt--;
243                 /* adjust delay for emulation/FPGA */
244                 if (CHIP_REV_IS_SLOW(bp))
245                         msleep(100);
246                 else
247                         udelay(5);
248         }
249
250         mutex_unlock(&bp->dmae_mutex);
251 }
252
253 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
254 {
255         struct dmae_command *dmae = &bp->init_dmae;
256         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
257         int cnt = 200;
258
259         if (!bp->dmae_ready) {
260                 u32 *data = bnx2x_sp(bp, wb_data[0]);
261                 int i;
262
263                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
264                    "  using indirect\n", src_addr, len32);
265                 for (i = 0; i < len32; i++)
266                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
267                 return;
268         }
269
270         mutex_lock(&bp->dmae_mutex);
271
272         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
273         memset(dmae, 0, sizeof(struct dmae_command));
274
275         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
276                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
277                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
278 #ifdef __BIG_ENDIAN
279                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
280 #else
281                         DMAE_CMD_ENDIANITY_DW_SWAP |
282 #endif
283                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
284                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
285         dmae->src_addr_lo = src_addr >> 2;
286         dmae->src_addr_hi = 0;
287         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
288         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
289         dmae->len = len32;
290         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
291         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
292         dmae->comp_val = DMAE_COMP_VAL;
293
294         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
295            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
296                     "dst_addr [%x:%08x (%08x)]\n"
297            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
298            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
299            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
300            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
301
302         *wb_comp = 0;
303
304         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
305
306         udelay(5);
307
308         while (*wb_comp != DMAE_COMP_VAL) {
309
310                 if (!cnt) {
311                         BNX2X_ERR("dmae timeout!\n");
312                         break;
313                 }
314                 cnt--;
315                 /* adjust delay for emulation/FPGA */
316                 if (CHIP_REV_IS_SLOW(bp))
317                         msleep(100);
318                 else
319                         udelay(5);
320         }
321         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
322            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
323            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
324
325         mutex_unlock(&bp->dmae_mutex);
326 }
327
328 /* used only for slowpath so not inlined */
329 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
330 {
331         u32 wb_write[2];
332
333         wb_write[0] = val_hi;
334         wb_write[1] = val_lo;
335         REG_WR_DMAE(bp, reg, wb_write, 2);
336 }
337
338 #ifdef USE_WB_RD
339 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
340 {
341         u32 wb_data[2];
342
343         REG_RD_DMAE(bp, reg, wb_data, 2);
344
345         return HILO_U64(wb_data[0], wb_data[1]);
346 }
347 #endif
348
349 static int bnx2x_mc_assert(struct bnx2x *bp)
350 {
351         char last_idx;
352         int i, rc = 0;
353         u32 row0, row1, row2, row3;
354
355         /* XSTORM */
356         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
357                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
358         if (last_idx)
359                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
360
361         /* print the asserts */
362         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
363
364                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365                               XSTORM_ASSERT_LIST_OFFSET(i));
366                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
368                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
370                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
372
373                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
374                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
375                                   " 0x%08x 0x%08x 0x%08x\n",
376                                   i, row3, row2, row1, row0);
377                         rc++;
378                 } else {
379                         break;
380                 }
381         }
382
383         /* TSTORM */
384         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
385                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
386         if (last_idx)
387                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
388
389         /* print the asserts */
390         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
391
392                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393                               TSTORM_ASSERT_LIST_OFFSET(i));
394                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
396                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
398                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
400
401                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
402                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
403                                   " 0x%08x 0x%08x 0x%08x\n",
404                                   i, row3, row2, row1, row0);
405                         rc++;
406                 } else {
407                         break;
408                 }
409         }
410
411         /* CSTORM */
412         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
413                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
414         if (last_idx)
415                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
416
417         /* print the asserts */
418         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
419
420                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421                               CSTORM_ASSERT_LIST_OFFSET(i));
422                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
424                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
426                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
428
429                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
430                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
431                                   " 0x%08x 0x%08x 0x%08x\n",
432                                   i, row3, row2, row1, row0);
433                         rc++;
434                 } else {
435                         break;
436                 }
437         }
438
439         /* USTORM */
440         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
441                            USTORM_ASSERT_LIST_INDEX_OFFSET);
442         if (last_idx)
443                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
444
445         /* print the asserts */
446         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
447
448                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
449                               USTORM_ASSERT_LIST_OFFSET(i));
450                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
451                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
452                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
453                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
454                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
455                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
456
457                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
458                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
459                                   " 0x%08x 0x%08x 0x%08x\n",
460                                   i, row3, row2, row1, row0);
461                         rc++;
462                 } else {
463                         break;
464                 }
465         }
466
467         return rc;
468 }
469
470 static void bnx2x_fw_dump(struct bnx2x *bp)
471 {
472         u32 mark, offset;
473         u32 data[9];
474         int word;
475
476         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
477         mark = ((mark + 0x3) & ~0x3);
478         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
479
480         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
481                 for (word = 0; word < 8; word++)
482                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
483                                                   offset + 4*word));
484                 data[8] = 0x0;
485                 printk(KERN_CONT "%s", (char *)data);
486         }
487         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
488                 for (word = 0; word < 8; word++)
489                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
490                                                   offset + 4*word));
491                 data[8] = 0x0;
492                 printk(KERN_CONT "%s", (char *)data);
493         }
494         printk("\n" KERN_ERR PFX "end of fw dump\n");
495 }
496
497 static void bnx2x_panic_dump(struct bnx2x *bp)
498 {
499         int i;
500         u16 j, start, end;
501
502         bp->stats_state = STATS_STATE_DISABLED;
503         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
504
505         BNX2X_ERR("begin crash dump -----------------\n");
506
507         for_each_queue(bp, i) {
508                 struct bnx2x_fastpath *fp = &bp->fp[i];
509                 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
510
511                 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
512                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
513                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
514                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
515                 BNX2X_ERR("          rx_bd_prod(%x)  rx_bd_cons(%x)"
516                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
517                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
518                           fp->rx_bd_prod, fp->rx_bd_cons,
519                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
520                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
521                 BNX2X_ERR("          rx_sge_prod(%x)  last_max_sge(%x)"
522                           "  fp_c_idx(%x)  *sb_c_idx(%x)  fp_u_idx(%x)"
523                           "  *sb_u_idx(%x)  bd data(%x,%x)\n",
524                           fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
525                           fp->status_blk->c_status_block.status_block_index,
526                           fp->fp_u_idx,
527                           fp->status_blk->u_status_block.status_block_index,
528                           hw_prods->packets_prod, hw_prods->bds_prod);
529
530                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
531                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
532                 for (j = start; j < end; j++) {
533                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
534
535                         BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
536                                   sw_bd->skb, sw_bd->first_bd);
537                 }
538
539                 start = TX_BD(fp->tx_bd_cons - 10);
540                 end = TX_BD(fp->tx_bd_cons + 254);
541                 for (j = start; j < end; j++) {
542                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
543
544                         BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
545                                   j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
546                 }
547
548                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
549                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
550                 for (j = start; j < end; j++) {
551                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
552                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
553
554                         BNX2X_ERR("rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
555                                   j, rx_bd[1], rx_bd[0], sw_bd->skb);
556                 }
557
558                 start = RX_SGE(fp->rx_sge_prod);
559                 end = RX_SGE(fp->last_max_sge);
560                 for (j = start; j < end; j++) {
561                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
562                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
563
564                         BNX2X_ERR("rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
565                                   j, rx_sge[1], rx_sge[0], sw_page->page);
566                 }
567
568                 start = RCQ_BD(fp->rx_comp_cons - 10);
569                 end = RCQ_BD(fp->rx_comp_cons + 503);
570                 for (j = start; j < end; j++) {
571                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
572
573                         BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
574                                   j, cqe[0], cqe[1], cqe[2], cqe[3]);
575                 }
576         }
577
578         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
579                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
580                   "  spq_prod_idx(%u)\n",
581                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
582                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
583
584         bnx2x_fw_dump(bp);
585         bnx2x_mc_assert(bp);
586         BNX2X_ERR("end crash dump -----------------\n");
587 }
588
589 static void bnx2x_int_enable(struct bnx2x *bp)
590 {
591         int port = BP_PORT(bp);
592         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
593         u32 val = REG_RD(bp, addr);
594         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
595
596         if (msix) {
597                 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
598                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
599                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
600         } else {
601                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
602                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
603                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
604                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
605
606                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
607                    val, port, addr, msix);
608
609                 REG_WR(bp, addr, val);
610
611                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
612         }
613
614         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
615            val, port, addr, msix);
616
617         REG_WR(bp, addr, val);
618
619         if (CHIP_IS_E1H(bp)) {
620                 /* init leading/trailing edge */
621                 if (IS_E1HMF(bp)) {
622                         val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
623                         if (bp->port.pmf)
624                                 /* enable nig attention */
625                                 val |= 0x0100;
626                 } else
627                         val = 0xffff;
628
629                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
630                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
631         }
632 }
633
634 static void bnx2x_int_disable(struct bnx2x *bp)
635 {
636         int port = BP_PORT(bp);
637         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
638         u32 val = REG_RD(bp, addr);
639
640         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
641                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
642                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
643                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
644
645         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
646            val, port, addr);
647
648         REG_WR(bp, addr, val);
649         if (REG_RD(bp, addr) != val)
650                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
651 }
652
653 static void bnx2x_int_disable_sync(struct bnx2x *bp)
654 {
655         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
656         int i;
657
658         /* disable interrupt handling */
659         atomic_inc(&bp->intr_sem);
660         /* prevent the HW from sending interrupts */
661         bnx2x_int_disable(bp);
662
663         /* make sure all ISRs are done */
664         if (msix) {
665                 for_each_queue(bp, i)
666                         synchronize_irq(bp->msix_table[i].vector);
667
668                 /* one more for the Slow Path IRQ */
669                 synchronize_irq(bp->msix_table[i].vector);
670         } else
671                 synchronize_irq(bp->pdev->irq);
672
673         /* make sure sp_task is not running */
674         cancel_work_sync(&bp->sp_task);
675 }
676
677 /* fast path */
678
679 /*
680  * General service functions
681  */
682
683 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
684                                 u8 storm, u16 index, u8 op, u8 update)
685 {
686         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
687                        COMMAND_REG_INT_ACK);
688         struct igu_ack_register igu_ack;
689
690         igu_ack.status_block_index = index;
691         igu_ack.sb_id_and_flags =
692                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
693                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
694                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
695                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
696
697         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
698            (*(u32 *)&igu_ack), hc_addr);
699         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
700 }
701
702 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
703 {
704         struct host_status_block *fpsb = fp->status_blk;
705         u16 rc = 0;
706
707         barrier(); /* status block is written to by the chip */
708         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
709                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
710                 rc |= 1;
711         }
712         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
713                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
714                 rc |= 2;
715         }
716         return rc;
717 }
718
719 static u16 bnx2x_ack_int(struct bnx2x *bp)
720 {
721         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
722                        COMMAND_REG_SIMD_MASK);
723         u32 result = REG_RD(bp, hc_addr);
724
725         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
726            result, hc_addr);
727
728         return result;
729 }
730
731
732 /*
733  * fast path service functions
734  */
735
736 /* free skb in the packet ring at pos idx
737  * return idx of last bd freed
738  */
739 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
740                              u16 idx)
741 {
742         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
743         struct eth_tx_bd *tx_bd;
744         struct sk_buff *skb = tx_buf->skb;
745         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
746         int nbd;
747
748         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
749            idx, tx_buf, skb);
750
751         /* unmap first bd */
752         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
753         tx_bd = &fp->tx_desc_ring[bd_idx];
754         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
755                          BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
756
757         nbd = le16_to_cpu(tx_bd->nbd) - 1;
758         new_cons = nbd + tx_buf->first_bd;
759 #ifdef BNX2X_STOP_ON_ERROR
760         if (nbd > (MAX_SKB_FRAGS + 2)) {
761                 BNX2X_ERR("BAD nbd!\n");
762                 bnx2x_panic();
763         }
764 #endif
765
766         /* Skip a parse bd and the TSO split header bd
767            since they have no mapping */
768         if (nbd)
769                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
770
771         if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
772                                            ETH_TX_BD_FLAGS_TCP_CSUM |
773                                            ETH_TX_BD_FLAGS_SW_LSO)) {
774                 if (--nbd)
775                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
776                 tx_bd = &fp->tx_desc_ring[bd_idx];
777                 /* is this a TSO split header bd? */
778                 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
779                         if (--nbd)
780                                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
781                 }
782         }
783
784         /* now free frags */
785         while (nbd > 0) {
786
787                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
788                 tx_bd = &fp->tx_desc_ring[bd_idx];
789                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
790                                BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
791                 if (--nbd)
792                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
793         }
794
795         /* release skb */
796         WARN_ON(!skb);
797         dev_kfree_skb(skb);
798         tx_buf->first_bd = 0;
799         tx_buf->skb = NULL;
800
801         return new_cons;
802 }
803
804 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
805 {
806         s16 used;
807         u16 prod;
808         u16 cons;
809
810         barrier(); /* Tell compiler that prod and cons can change */
811         prod = fp->tx_bd_prod;
812         cons = fp->tx_bd_cons;
813
814         /* NUM_TX_RINGS = number of "next-page" entries
815            It will be used as a threshold */
816         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
817
818 #ifdef BNX2X_STOP_ON_ERROR
819         WARN_ON(used < 0);
820         WARN_ON(used > fp->bp->tx_ring_size);
821         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
822 #endif
823
824         return (s16)(fp->bp->tx_ring_size) - used;
825 }
826
827 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
828 {
829         struct bnx2x *bp = fp->bp;
830         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
831         int done = 0;
832
833 #ifdef BNX2X_STOP_ON_ERROR
834         if (unlikely(bp->panic))
835                 return;
836 #endif
837
838         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
839         sw_cons = fp->tx_pkt_cons;
840
841         while (sw_cons != hw_cons) {
842                 u16 pkt_cons;
843
844                 pkt_cons = TX_BD(sw_cons);
845
846                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
847
848                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
849                    hw_cons, sw_cons, pkt_cons);
850
851 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
852                         rmb();
853                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
854                 }
855 */
856                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
857                 sw_cons++;
858                 done++;
859
860                 if (done == work)
861                         break;
862         }
863
864         fp->tx_pkt_cons = sw_cons;
865         fp->tx_bd_cons = bd_cons;
866
867         /* Need to make the tx_cons update visible to start_xmit()
868          * before checking for netif_queue_stopped().  Without the
869          * memory barrier, there is a small possibility that start_xmit()
870          * will miss it and cause the queue to be stopped forever.
871          */
872         smp_mb();
873
874         /* TBD need a thresh? */
875         if (unlikely(netif_queue_stopped(bp->dev))) {
876
877                 netif_tx_lock(bp->dev);
878
879                 if (netif_queue_stopped(bp->dev) &&
880                     (bp->state == BNX2X_STATE_OPEN) &&
881                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
882                         netif_wake_queue(bp->dev);
883
884                 netif_tx_unlock(bp->dev);
885         }
886 }
887
888
889 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
890                            union eth_rx_cqe *rr_cqe)
891 {
892         struct bnx2x *bp = fp->bp;
893         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
894         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
895
896         DP(BNX2X_MSG_SP,
897            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
898            FP_IDX(fp), cid, command, bp->state,
899            rr_cqe->ramrod_cqe.ramrod_type);
900
901         bp->spq_left++;
902
903         if (FP_IDX(fp)) {
904                 switch (command | fp->state) {
905                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
906                                                 BNX2X_FP_STATE_OPENING):
907                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
908                            cid);
909                         fp->state = BNX2X_FP_STATE_OPEN;
910                         break;
911
912                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
913                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
914                            cid);
915                         fp->state = BNX2X_FP_STATE_HALTED;
916                         break;
917
918                 default:
919                         BNX2X_ERR("unexpected MC reply (%d)  "
920                                   "fp->state is %x\n", command, fp->state);
921                         break;
922                 }
923                 mb(); /* force bnx2x_wait_ramrod() to see the change */
924                 return;
925         }
926
927         switch (command | bp->state) {
928         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
929                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
930                 bp->state = BNX2X_STATE_OPEN;
931                 break;
932
933         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
934                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
935                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
936                 fp->state = BNX2X_FP_STATE_HALTED;
937                 break;
938
939         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
940                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
941                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
942                 break;
943
944
945         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
946         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
947                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
948                 bp->set_mac_pending = 0;
949                 break;
950
951         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
952                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
953                 break;
954
955         default:
956                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
957                           command, bp->state);
958                 break;
959         }
960         mb(); /* force bnx2x_wait_ramrod() to see the change */
961 }
962
963 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
964                                      struct bnx2x_fastpath *fp, u16 index)
965 {
966         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
967         struct page *page = sw_buf->page;
968         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
969
970         /* Skip "next page" elements */
971         if (!page)
972                 return;
973
974         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
975                        BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
976         __free_pages(page, PAGES_PER_SGE_SHIFT);
977
978         sw_buf->page = NULL;
979         sge->addr_hi = 0;
980         sge->addr_lo = 0;
981 }
982
983 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
984                                            struct bnx2x_fastpath *fp, int last)
985 {
986         int i;
987
988         for (i = 0; i < last; i++)
989                 bnx2x_free_rx_sge(bp, fp, i);
990 }
991
992 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
993                                      struct bnx2x_fastpath *fp, u16 index)
994 {
995         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
996         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
997         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
998         dma_addr_t mapping;
999
1000         if (unlikely(page == NULL))
1001                 return -ENOMEM;
1002
1003         mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
1004                                PCI_DMA_FROMDEVICE);
1005         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1006                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1007                 return -ENOMEM;
1008         }
1009
1010         sw_buf->page = page;
1011         pci_unmap_addr_set(sw_buf, mapping, mapping);
1012
1013         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1014         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1015
1016         return 0;
1017 }
1018
1019 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1020                                      struct bnx2x_fastpath *fp, u16 index)
1021 {
1022         struct sk_buff *skb;
1023         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1024         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1025         dma_addr_t mapping;
1026
1027         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1028         if (unlikely(skb == NULL))
1029                 return -ENOMEM;
1030
1031         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1032                                  PCI_DMA_FROMDEVICE);
1033         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1034                 dev_kfree_skb(skb);
1035                 return -ENOMEM;
1036         }
1037
1038         rx_buf->skb = skb;
1039         pci_unmap_addr_set(rx_buf, mapping, mapping);
1040
1041         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1042         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1043
1044         return 0;
1045 }
1046
1047 /* note that we are not allocating a new skb,
1048  * we are just moving one from cons to prod
1049  * we are not creating a new mapping,
1050  * so there is no need to check for dma_mapping_error().
1051  */
1052 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1053                                struct sk_buff *skb, u16 cons, u16 prod)
1054 {
1055         struct bnx2x *bp = fp->bp;
1056         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1057         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1058         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1059         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1060
1061         pci_dma_sync_single_for_device(bp->pdev,
1062                                        pci_unmap_addr(cons_rx_buf, mapping),
1063                                        bp->rx_offset + RX_COPY_THRESH,
1064                                        PCI_DMA_FROMDEVICE);
1065
1066         prod_rx_buf->skb = cons_rx_buf->skb;
1067         pci_unmap_addr_set(prod_rx_buf, mapping,
1068                            pci_unmap_addr(cons_rx_buf, mapping));
1069         *prod_bd = *cons_bd;
1070 }
1071
1072 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1073                                              u16 idx)
1074 {
1075         u16 last_max = fp->last_max_sge;
1076
1077         if (SUB_S16(idx, last_max) > 0)
1078                 fp->last_max_sge = idx;
1079 }
1080
1081 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1082 {
1083         int i, j;
1084
1085         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1086                 int idx = RX_SGE_CNT * i - 1;
1087
1088                 for (j = 0; j < 2; j++) {
1089                         SGE_MASK_CLEAR_BIT(fp, idx);
1090                         idx--;
1091                 }
1092         }
1093 }
1094
1095 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1096                                   struct eth_fast_path_rx_cqe *fp_cqe)
1097 {
1098         struct bnx2x *bp = fp->bp;
1099         u16 sge_len = BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1100                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1101                       BCM_PAGE_SHIFT;
1102         u16 last_max, last_elem, first_elem;
1103         u16 delta = 0;
1104         u16 i;
1105
1106         if (!sge_len)
1107                 return;
1108
1109         /* First mark all used pages */
1110         for (i = 0; i < sge_len; i++)
1111                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1112
1113         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1114            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1115
1116         /* Here we assume that the last SGE index is the biggest */
1117         prefetch((void *)(fp->sge_mask));
1118         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1119
1120         last_max = RX_SGE(fp->last_max_sge);
1121         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1122         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1123
1124         /* If ring is not full */
1125         if (last_elem + 1 != first_elem)
1126                 last_elem++;
1127
1128         /* Now update the prod */
1129         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1130                 if (likely(fp->sge_mask[i]))
1131                         break;
1132
1133                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1134                 delta += RX_SGE_MASK_ELEM_SZ;
1135         }
1136
1137         if (delta > 0) {
1138                 fp->rx_sge_prod += delta;
1139                 /* clear page-end entries */
1140                 bnx2x_clear_sge_mask_next_elems(fp);
1141         }
1142
1143         DP(NETIF_MSG_RX_STATUS,
1144            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1145            fp->last_max_sge, fp->rx_sge_prod);
1146 }
1147
1148 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1149 {
1150         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1151         memset(fp->sge_mask, 0xff,
1152                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1153
1154         /* Clear the two last indices in the page to 1:
1155            these are the indices that correspond to the "next" element,
1156            hence will never be indicated and should be removed from
1157            the calculations. */
1158         bnx2x_clear_sge_mask_next_elems(fp);
1159 }
1160
1161 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1162                             struct sk_buff *skb, u16 cons, u16 prod)
1163 {
1164         struct bnx2x *bp = fp->bp;
1165         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1166         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1167         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1168         dma_addr_t mapping;
1169
1170         /* move empty skb from pool to prod and map it */
1171         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1172         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1173                                  bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1174         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1175
1176         /* move partial skb from cons to pool (don't unmap yet) */
1177         fp->tpa_pool[queue] = *cons_rx_buf;
1178
1179         /* mark bin state as start - print error if current state != stop */
1180         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1181                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1182
1183         fp->tpa_state[queue] = BNX2X_TPA_START;
1184
1185         /* point prod_bd to new skb */
1186         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1187         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1188
1189 #ifdef BNX2X_STOP_ON_ERROR
1190         fp->tpa_queue_used |= (1 << queue);
1191 #ifdef __powerpc64__
1192         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1193 #else
1194         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1195 #endif
1196            fp->tpa_queue_used);
1197 #endif
1198 }
1199
1200 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1201                                struct sk_buff *skb,
1202                                struct eth_fast_path_rx_cqe *fp_cqe,
1203                                u16 cqe_idx)
1204 {
1205         struct sw_rx_page *rx_pg, old_rx_pg;
1206         struct page *sge;
1207         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1208         u32 i, frag_len, frag_size, pages;
1209         int err;
1210         int j;
1211
1212         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1213         pages = BCM_PAGE_ALIGN(frag_size) >> BCM_PAGE_SHIFT;
1214
1215         /* This is needed in order to enable forwarding support */
1216         if (frag_size)
1217                 skb_shinfo(skb)->gso_size = min((u32)BCM_PAGE_SIZE,
1218                                                max(frag_size, (u32)len_on_bd));
1219
1220 #ifdef BNX2X_STOP_ON_ERROR
1221         if (pages > 8*PAGES_PER_SGE) {
1222                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1223                           pages, cqe_idx);
1224                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1225                           fp_cqe->pkt_len, len_on_bd);
1226                 bnx2x_panic();
1227                 return -EINVAL;
1228         }
1229 #endif
1230
1231         /* Run through the SGL and compose the fragmented skb */
1232         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1233                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1234
1235                 /* FW gives the indices of the SGE as if the ring is an array
1236                    (meaning that "next" element will consume 2 indices) */
1237                 frag_len = min(frag_size, (u32)(BCM_PAGE_SIZE*PAGES_PER_SGE));
1238                 rx_pg = &fp->rx_page_ring[sge_idx];
1239                 sge = rx_pg->page;
1240                 old_rx_pg = *rx_pg;
1241
1242                 /* If we fail to allocate a substitute page, we simply stop
1243                    where we are and drop the whole packet */
1244                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1245                 if (unlikely(err)) {
1246                         bp->eth_stats.rx_skb_alloc_failed++;
1247                         return err;
1248                 }
1249
1250                 /* Unmap the page as we r going to pass it to the stack */
1251                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1252                               BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1253
1254                 /* Add one frag and update the appropriate fields in the skb */
1255                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1256
1257                 skb->data_len += frag_len;
1258                 skb->truesize += frag_len;
1259                 skb->len += frag_len;
1260
1261                 frag_size -= frag_len;
1262         }
1263
1264         return 0;
1265 }
1266
1267 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1268                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1269                            u16 cqe_idx)
1270 {
1271         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1272         struct sk_buff *skb = rx_buf->skb;
1273         /* alloc new skb */
1274         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1275
1276         /* Unmap skb in the pool anyway, as we are going to change
1277            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1278            fails. */
1279         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1280                          bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1281
1282         if (likely(new_skb)) {
1283                 /* fix ip xsum and give it to the stack */
1284                 /* (no need to map the new skb) */
1285
1286                 prefetch(skb);
1287                 prefetch(((char *)(skb)) + 128);
1288
1289 #ifdef BNX2X_STOP_ON_ERROR
1290                 if (pad + len > bp->rx_buf_size) {
1291                         BNX2X_ERR("skb_put is about to fail...  "
1292                                   "pad %d  len %d  rx_buf_size %d\n",
1293                                   pad, len, bp->rx_buf_size);
1294                         bnx2x_panic();
1295                         return;
1296                 }
1297 #endif
1298
1299                 skb_reserve(skb, pad);
1300                 skb_put(skb, len);
1301
1302                 skb->protocol = eth_type_trans(skb, bp->dev);
1303                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1304
1305                 {
1306                         struct iphdr *iph;
1307
1308                         iph = (struct iphdr *)skb->data;
1309                         iph->check = 0;
1310                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1311                 }
1312
1313                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1314                                          &cqe->fast_path_cqe, cqe_idx)) {
1315 #ifdef BCM_VLAN
1316                         if ((bp->vlgrp != NULL) &&
1317                             (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1318                              PARSING_FLAGS_VLAN))
1319                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1320                                                 le16_to_cpu(cqe->fast_path_cqe.
1321                                                             vlan_tag));
1322                         else
1323 #endif
1324                                 netif_receive_skb(skb);
1325                 } else {
1326                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1327                            " - dropping packet!\n");
1328                         dev_kfree_skb(skb);
1329                 }
1330
1331                 bp->dev->last_rx = jiffies;
1332
1333                 /* put new skb in bin */
1334                 fp->tpa_pool[queue].skb = new_skb;
1335
1336         } else {
1337                 /* else drop the packet and keep the buffer in the bin */
1338                 DP(NETIF_MSG_RX_STATUS,
1339                    "Failed to allocate new skb - dropping packet!\n");
1340                 bp->eth_stats.rx_skb_alloc_failed++;
1341         }
1342
1343         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1344 }
1345
1346 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1347                                         struct bnx2x_fastpath *fp,
1348                                         u16 bd_prod, u16 rx_comp_prod,
1349                                         u16 rx_sge_prod)
1350 {
1351         struct tstorm_eth_rx_producers rx_prods = {0};
1352         int i;
1353
1354         /* Update producers */
1355         rx_prods.bd_prod = bd_prod;
1356         rx_prods.cqe_prod = rx_comp_prod;
1357         rx_prods.sge_prod = rx_sge_prod;
1358
1359         for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1360                 REG_WR(bp, BAR_TSTRORM_INTMEM +
1361                        TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1362                        ((u32 *)&rx_prods)[i]);
1363
1364         DP(NETIF_MSG_RX_STATUS,
1365            "Wrote: bd_prod %u  cqe_prod %u  sge_prod %u\n",
1366            bd_prod, rx_comp_prod, rx_sge_prod);
1367 }
1368
1369 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1370 {
1371         struct bnx2x *bp = fp->bp;
1372         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1373         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1374         int rx_pkt = 0;
1375
1376 #ifdef BNX2X_STOP_ON_ERROR
1377         if (unlikely(bp->panic))
1378                 return 0;
1379 #endif
1380
1381         /* CQ "next element" is of the size of the regular element,
1382            that's why it's ok here */
1383         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1384         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1385                 hw_comp_cons++;
1386
1387         bd_cons = fp->rx_bd_cons;
1388         bd_prod = fp->rx_bd_prod;
1389         bd_prod_fw = bd_prod;
1390         sw_comp_cons = fp->rx_comp_cons;
1391         sw_comp_prod = fp->rx_comp_prod;
1392
1393         /* Memory barrier necessary as speculative reads of the rx
1394          * buffer can be ahead of the index in the status block
1395          */
1396         rmb();
1397
1398         DP(NETIF_MSG_RX_STATUS,
1399            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1400            FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1401
1402         while (sw_comp_cons != hw_comp_cons) {
1403                 struct sw_rx_bd *rx_buf = NULL;
1404                 struct sk_buff *skb;
1405                 union eth_rx_cqe *cqe;
1406                 u8 cqe_fp_flags;
1407                 u16 len, pad;
1408
1409                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1410                 bd_prod = RX_BD(bd_prod);
1411                 bd_cons = RX_BD(bd_cons);
1412
1413                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1414                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1415
1416                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1417                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1418                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1419                    cqe->fast_path_cqe.rss_hash_result,
1420                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1421                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1422
1423                 /* is this a slowpath msg? */
1424                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1425                         bnx2x_sp_event(fp, cqe);
1426                         goto next_cqe;
1427
1428                 /* this is an rx packet */
1429                 } else {
1430                         rx_buf = &fp->rx_buf_ring[bd_cons];
1431                         skb = rx_buf->skb;
1432                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1433                         pad = cqe->fast_path_cqe.placement_offset;
1434
1435                         /* If CQE is marked both TPA_START and TPA_END
1436                            it is a non-TPA CQE */
1437                         if ((!fp->disable_tpa) &&
1438                             (TPA_TYPE(cqe_fp_flags) !=
1439                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1440                                 u16 queue = cqe->fast_path_cqe.queue_index;
1441
1442                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1443                                         DP(NETIF_MSG_RX_STATUS,
1444                                            "calling tpa_start on queue %d\n",
1445                                            queue);
1446
1447                                         bnx2x_tpa_start(fp, queue, skb,
1448                                                         bd_cons, bd_prod);
1449                                         goto next_rx;
1450                                 }
1451
1452                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1453                                         DP(NETIF_MSG_RX_STATUS,
1454                                            "calling tpa_stop on queue %d\n",
1455                                            queue);
1456
1457                                         if (!BNX2X_RX_SUM_FIX(cqe))
1458                                                 BNX2X_ERR("STOP on none TCP "
1459                                                           "data\n");
1460
1461                                         /* This is a size of the linear data
1462                                            on this skb */
1463                                         len = le16_to_cpu(cqe->fast_path_cqe.
1464                                                                 len_on_bd);
1465                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1466                                                     len, cqe, comp_ring_cons);
1467 #ifdef BNX2X_STOP_ON_ERROR
1468                                         if (bp->panic)
1469                                                 return -EINVAL;
1470 #endif
1471
1472                                         bnx2x_update_sge_prod(fp,
1473                                                         &cqe->fast_path_cqe);
1474                                         goto next_cqe;
1475                                 }
1476                         }
1477
1478                         pci_dma_sync_single_for_device(bp->pdev,
1479                                         pci_unmap_addr(rx_buf, mapping),
1480                                                        pad + RX_COPY_THRESH,
1481                                                        PCI_DMA_FROMDEVICE);
1482                         prefetch(skb);
1483                         prefetch(((char *)(skb)) + 128);
1484
1485                         /* is this an error packet? */
1486                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1487                                 DP(NETIF_MSG_RX_ERR,
1488                                    "ERROR  flags %x  rx packet %u\n",
1489                                    cqe_fp_flags, sw_comp_cons);
1490                                 bp->eth_stats.rx_err_discard_pkt++;
1491                                 goto reuse_rx;
1492                         }
1493
1494                         /* Since we don't have a jumbo ring
1495                          * copy small packets if mtu > 1500
1496                          */
1497                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1498                             (len <= RX_COPY_THRESH)) {
1499                                 struct sk_buff *new_skb;
1500
1501                                 new_skb = netdev_alloc_skb(bp->dev,
1502                                                            len + pad);
1503                                 if (new_skb == NULL) {
1504                                         DP(NETIF_MSG_RX_ERR,
1505                                            "ERROR  packet dropped "
1506                                            "because of alloc failure\n");
1507                                         bp->eth_stats.rx_skb_alloc_failed++;
1508                                         goto reuse_rx;
1509                                 }
1510
1511                                 /* aligned copy */
1512                                 skb_copy_from_linear_data_offset(skb, pad,
1513                                                     new_skb->data + pad, len);
1514                                 skb_reserve(new_skb, pad);
1515                                 skb_put(new_skb, len);
1516
1517                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1518
1519                                 skb = new_skb;
1520
1521                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1522                                 pci_unmap_single(bp->pdev,
1523                                         pci_unmap_addr(rx_buf, mapping),
1524                                                  bp->rx_buf_use_size,
1525                                                  PCI_DMA_FROMDEVICE);
1526                                 skb_reserve(skb, pad);
1527                                 skb_put(skb, len);
1528
1529                         } else {
1530                                 DP(NETIF_MSG_RX_ERR,
1531                                    "ERROR  packet dropped because "
1532                                    "of alloc failure\n");
1533                                 bp->eth_stats.rx_skb_alloc_failed++;
1534 reuse_rx:
1535                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1536                                 goto next_rx;
1537                         }
1538
1539                         skb->protocol = eth_type_trans(skb, bp->dev);
1540
1541                         skb->ip_summed = CHECKSUM_NONE;
1542                         if (bp->rx_csum) {
1543                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1544                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1545                                 else
1546                                         bp->eth_stats.hw_csum_err++;
1547                         }
1548                 }
1549
1550 #ifdef BCM_VLAN
1551                 if ((bp->vlgrp != NULL) &&
1552                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1553                      PARSING_FLAGS_VLAN))
1554                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1555                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1556                 else
1557 #endif
1558                         netif_receive_skb(skb);
1559
1560                 bp->dev->last_rx = jiffies;
1561
1562 next_rx:
1563                 rx_buf->skb = NULL;
1564
1565                 bd_cons = NEXT_RX_IDX(bd_cons);
1566                 bd_prod = NEXT_RX_IDX(bd_prod);
1567                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1568                 rx_pkt++;
1569 next_cqe:
1570                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1571                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1572
1573                 if (rx_pkt == budget)
1574                         break;
1575         } /* while */
1576
1577         fp->rx_bd_cons = bd_cons;
1578         fp->rx_bd_prod = bd_prod_fw;
1579         fp->rx_comp_cons = sw_comp_cons;
1580         fp->rx_comp_prod = sw_comp_prod;
1581
1582         /* Update producers */
1583         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1584                              fp->rx_sge_prod);
1585         mmiowb(); /* keep prod updates ordered */
1586
1587         fp->rx_pkt += rx_pkt;
1588         fp->rx_calls++;
1589
1590         return rx_pkt;
1591 }
1592
1593 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1594 {
1595         struct bnx2x_fastpath *fp = fp_cookie;
1596         struct bnx2x *bp = fp->bp;
1597         struct net_device *dev = bp->dev;
1598         int index = FP_IDX(fp);
1599
1600         /* Return here if interrupt is disabled */
1601         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1602                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1603                 return IRQ_HANDLED;
1604         }
1605
1606         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1607            index, FP_SB_ID(fp));
1608         bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1609
1610 #ifdef BNX2X_STOP_ON_ERROR
1611         if (unlikely(bp->panic))
1612                 return IRQ_HANDLED;
1613 #endif
1614
1615         prefetch(fp->rx_cons_sb);
1616         prefetch(fp->tx_cons_sb);
1617         prefetch(&fp->status_blk->c_status_block.status_block_index);
1618         prefetch(&fp->status_blk->u_status_block.status_block_index);
1619
1620         netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
1621
1622         return IRQ_HANDLED;
1623 }
1624
1625 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1626 {
1627         struct net_device *dev = dev_instance;
1628         struct bnx2x *bp = netdev_priv(dev);
1629         u16 status = bnx2x_ack_int(bp);
1630         u16 mask;
1631
1632         /* Return here if interrupt is shared and it's not for us */
1633         if (unlikely(status == 0)) {
1634                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1635                 return IRQ_NONE;
1636         }
1637         DP(NETIF_MSG_INTR, "got an interrupt  status %u\n", status);
1638
1639         /* Return here if interrupt is disabled */
1640         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1641                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1642                 return IRQ_HANDLED;
1643         }
1644
1645 #ifdef BNX2X_STOP_ON_ERROR
1646         if (unlikely(bp->panic))
1647                 return IRQ_HANDLED;
1648 #endif
1649
1650         mask = 0x2 << bp->fp[0].sb_id;
1651         if (status & mask) {
1652                 struct bnx2x_fastpath *fp = &bp->fp[0];
1653
1654                 prefetch(fp->rx_cons_sb);
1655                 prefetch(fp->tx_cons_sb);
1656                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1657                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1658
1659                 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1660
1661                 status &= ~mask;
1662         }
1663
1664
1665         if (unlikely(status & 0x1)) {
1666                 schedule_work(&bp->sp_task);
1667
1668                 status &= ~0x1;
1669                 if (!status)
1670                         return IRQ_HANDLED;
1671         }
1672
1673         if (status)
1674                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1675                    status);
1676
1677         return IRQ_HANDLED;
1678 }
1679
1680 /* end of fast path */
1681
1682 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1683
1684 /* Link */
1685
1686 /*
1687  * General service functions
1688  */
1689
1690 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1691 {
1692         u32 lock_status;
1693         u32 resource_bit = (1 << resource);
1694         int func = BP_FUNC(bp);
1695         u32 hw_lock_control_reg;
1696         int cnt;
1697
1698         /* Validating that the resource is within range */
1699         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1700                 DP(NETIF_MSG_HW,
1701                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1702                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1703                 return -EINVAL;
1704         }
1705
1706         if (func <= 5) {
1707                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1708         } else {
1709                 hw_lock_control_reg =
1710                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1711         }
1712
1713         /* Validating that the resource is not already taken */
1714         lock_status = REG_RD(bp, hw_lock_control_reg);
1715         if (lock_status & resource_bit) {
1716                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1717                    lock_status, resource_bit);
1718                 return -EEXIST;
1719         }
1720
1721         /* Try for 1 second every 5ms */
1722         for (cnt = 0; cnt < 200; cnt++) {
1723                 /* Try to acquire the lock */
1724                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1725                 lock_status = REG_RD(bp, hw_lock_control_reg);
1726                 if (lock_status & resource_bit)
1727                         return 0;
1728
1729                 msleep(5);
1730         }
1731         DP(NETIF_MSG_HW, "Timeout\n");
1732         return -EAGAIN;
1733 }
1734
1735 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1736 {
1737         u32 lock_status;
1738         u32 resource_bit = (1 << resource);
1739         int func = BP_FUNC(bp);
1740         u32 hw_lock_control_reg;
1741
1742         /* Validating that the resource is within range */
1743         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1744                 DP(NETIF_MSG_HW,
1745                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1746                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1747                 return -EINVAL;
1748         }
1749
1750         if (func <= 5) {
1751                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1752         } else {
1753                 hw_lock_control_reg =
1754                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1755         }
1756
1757         /* Validating that the resource is currently taken */
1758         lock_status = REG_RD(bp, hw_lock_control_reg);
1759         if (!(lock_status & resource_bit)) {
1760                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1761                    lock_status, resource_bit);
1762                 return -EFAULT;
1763         }
1764
1765         REG_WR(bp, hw_lock_control_reg, resource_bit);
1766         return 0;
1767 }
1768
1769 /* HW Lock for shared dual port PHYs */
1770 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1771 {
1772         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1773
1774         mutex_lock(&bp->port.phy_mutex);
1775
1776         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1777             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1778                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1779 }
1780
1781 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1782 {
1783         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1784
1785         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1786             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1787                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1788
1789         mutex_unlock(&bp->port.phy_mutex);
1790 }
1791
1792 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1793 {
1794         /* The GPIO should be swapped if swap register is set and active */
1795         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1796                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1797         int gpio_shift = gpio_num +
1798                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1799         u32 gpio_mask = (1 << gpio_shift);
1800         u32 gpio_reg;
1801
1802         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1803                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1804                 return -EINVAL;
1805         }
1806
1807         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1808         /* read GPIO and mask except the float bits */
1809         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1810
1811         switch (mode) {
1812         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1813                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1814                    gpio_num, gpio_shift);
1815                 /* clear FLOAT and set CLR */
1816                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1817                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1818                 break;
1819
1820         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1821                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1822                    gpio_num, gpio_shift);
1823                 /* clear FLOAT and set SET */
1824                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1825                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1826                 break;
1827
1828         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1829                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1830                    gpio_num, gpio_shift);
1831                 /* set FLOAT */
1832                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1833                 break;
1834
1835         default:
1836                 break;
1837         }
1838
1839         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1840         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1841
1842         return 0;
1843 }
1844
1845 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1846 {
1847         u32 spio_mask = (1 << spio_num);
1848         u32 spio_reg;
1849
1850         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1851             (spio_num > MISC_REGISTERS_SPIO_7)) {
1852                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1853                 return -EINVAL;
1854         }
1855
1856         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1857         /* read SPIO and mask except the float bits */
1858         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1859
1860         switch (mode) {
1861         case MISC_REGISTERS_SPIO_OUTPUT_LOW :
1862                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1863                 /* clear FLOAT and set CLR */
1864                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1865                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1866                 break;
1867
1868         case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
1869                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1870                 /* clear FLOAT and set SET */
1871                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1872                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1873                 break;
1874
1875         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1876                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1877                 /* set FLOAT */
1878                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1879                 break;
1880
1881         default:
1882                 break;
1883         }
1884
1885         REG_WR(bp, MISC_REG_SPIO, spio_reg);
1886         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1887
1888         return 0;
1889 }
1890
1891 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1892 {
1893         switch (bp->link_vars.ieee_fc) {
1894         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1895                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1896                                           ADVERTISED_Pause);
1897                 break;
1898         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1899                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1900                                          ADVERTISED_Pause);
1901                 break;
1902         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1903                 bp->port.advertising |= ADVERTISED_Asym_Pause;
1904                 break;
1905         default:
1906                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1907                                           ADVERTISED_Pause);
1908                 break;
1909         }
1910 }
1911
1912 static void bnx2x_link_report(struct bnx2x *bp)
1913 {
1914         if (bp->link_vars.link_up) {
1915                 if (bp->state == BNX2X_STATE_OPEN)
1916                         netif_carrier_on(bp->dev);
1917                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1918
1919                 printk("%d Mbps ", bp->link_vars.line_speed);
1920
1921                 if (bp->link_vars.duplex == DUPLEX_FULL)
1922                         printk("full duplex");
1923                 else
1924                         printk("half duplex");
1925
1926                 if (bp->link_vars.flow_ctrl != FLOW_CTRL_NONE) {
1927                         if (bp->link_vars.flow_ctrl & FLOW_CTRL_RX) {
1928                                 printk(", receive ");
1929                                 if (bp->link_vars.flow_ctrl & FLOW_CTRL_TX)
1930                                         printk("& transmit ");
1931                         } else {
1932                                 printk(", transmit ");
1933                         }
1934                         printk("flow control ON");
1935                 }
1936                 printk("\n");
1937
1938         } else { /* link_down */
1939                 netif_carrier_off(bp->dev);
1940                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1941         }
1942 }
1943
1944 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1945 {
1946         if (!BP_NOMCP(bp)) {
1947                 u8 rc;
1948
1949                 /* Initialize link parameters structure variables */
1950                 /* It is recommended to turn off RX FC for jumbo frames
1951                    for better performance */
1952                 if (IS_E1HMF(bp))
1953                         bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH;
1954                 else if (bp->dev->mtu > 5000)
1955                         bp->link_params.req_fc_auto_adv = FLOW_CTRL_TX;
1956                 else
1957                         bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH;
1958
1959                 bnx2x_acquire_phy_lock(bp);
1960                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1961                 bnx2x_release_phy_lock(bp);
1962
1963                 if (bp->link_vars.link_up)
1964                         bnx2x_link_report(bp);
1965
1966                 bnx2x_calc_fc_adv(bp);
1967
1968                 return rc;
1969         }
1970         BNX2X_ERR("Bootcode is missing -not initializing link\n");
1971         return -EINVAL;
1972 }
1973
1974 static void bnx2x_link_set(struct bnx2x *bp)
1975 {
1976         if (!BP_NOMCP(bp)) {
1977                 bnx2x_acquire_phy_lock(bp);
1978                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1979                 bnx2x_release_phy_lock(bp);
1980
1981                 bnx2x_calc_fc_adv(bp);
1982         } else
1983                 BNX2X_ERR("Bootcode is missing -not setting link\n");
1984 }
1985
1986 static void bnx2x__link_reset(struct bnx2x *bp)
1987 {
1988         if (!BP_NOMCP(bp)) {
1989                 bnx2x_acquire_phy_lock(bp);
1990                 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
1991                 bnx2x_release_phy_lock(bp);
1992         } else
1993                 BNX2X_ERR("Bootcode is missing -not resetting link\n");
1994 }
1995
1996 static u8 bnx2x_link_test(struct bnx2x *bp)
1997 {
1998         u8 rc;
1999
2000         bnx2x_acquire_phy_lock(bp);
2001         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2002         bnx2x_release_phy_lock(bp);
2003
2004         return rc;
2005 }
2006
2007 /* Calculates the sum of vn_min_rates.
2008    It's needed for further normalizing of the min_rates.
2009
2010    Returns:
2011      sum of vn_min_rates
2012        or
2013      0 - if all the min_rates are 0.
2014      In the later case fairness algorithm should be deactivated.
2015      If not all min_rates are zero then those that are zeroes will
2016      be set to 1.
2017  */
2018 static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2019 {
2020         int i, port = BP_PORT(bp);
2021         u32 wsum = 0;
2022         int all_zero = 1;
2023
2024         for (i = 0; i < E1HVN_MAX; i++) {
2025                 u32 vn_cfg =
2026                         SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2027                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2028                                      FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2029                 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2030                         /* If min rate is zero - set it to 1 */
2031                         if (!vn_min_rate)
2032                                 vn_min_rate = DEF_MIN_RATE;
2033                         else
2034                                 all_zero = 0;
2035
2036                         wsum += vn_min_rate;
2037                 }
2038         }
2039
2040         /* ... only if all min rates are zeros - disable FAIRNESS */
2041         if (all_zero)
2042                 return 0;
2043
2044         return wsum;
2045 }
2046
2047 static void bnx2x_init_port_minmax(struct bnx2x *bp,
2048                                    int en_fness,
2049                                    u16 port_rate,
2050                                    struct cmng_struct_per_port *m_cmng_port)
2051 {
2052         u32 r_param = port_rate / 8;
2053         int port = BP_PORT(bp);
2054         int i;
2055
2056         memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2057
2058         /* Enable minmax only if we are in e1hmf mode */
2059         if (IS_E1HMF(bp)) {
2060                 u32 fair_periodic_timeout_usec;
2061                 u32 t_fair;
2062
2063                 /* Enable rate shaping and fairness */
2064                 m_cmng_port->flags.cmng_vn_enable = 1;
2065                 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2066                 m_cmng_port->flags.rate_shaping_enable = 1;
2067
2068                 if (!en_fness)
2069                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2070                            "  fairness will be disabled\n");
2071
2072                 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2073                 m_cmng_port->rs_vars.rs_periodic_timeout =
2074                                                 RS_PERIODIC_TIMEOUT_USEC / 4;
2075
2076                 /* this is the threshold below which no timer arming will occur
2077                    1.25 coefficient is for the threshold to be a little bigger
2078                    than the real time, to compensate for timer in-accuracy */
2079                 m_cmng_port->rs_vars.rs_threshold =
2080                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2081
2082                 /* resolution of fairness timer */
2083                 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2084                 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2085                 t_fair = T_FAIR_COEF / port_rate;
2086
2087                 /* this is the threshold below which we won't arm
2088                    the timer anymore */
2089                 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2090
2091                 /* we multiply by 1e3/8 to get bytes/msec.
2092                    We don't want the credits to pass a credit
2093                    of the T_FAIR*FAIR_MEM (algorithm resolution) */
2094                 m_cmng_port->fair_vars.upper_bound =
2095                                                 r_param * t_fair * FAIR_MEM;
2096                 /* since each tick is 4 usec */
2097                 m_cmng_port->fair_vars.fairness_timeout =
2098                                                 fair_periodic_timeout_usec / 4;
2099
2100         } else {
2101                 /* Disable rate shaping and fairness */
2102                 m_cmng_port->flags.cmng_vn_enable = 0;
2103                 m_cmng_port->flags.fairness_enable = 0;
2104                 m_cmng_port->flags.rate_shaping_enable = 0;
2105
2106                 DP(NETIF_MSG_IFUP,
2107                    "Single function mode  minmax will be disabled\n");
2108         }
2109
2110         /* Store it to internal memory */
2111         for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2112                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2113                        XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2114                        ((u32 *)(m_cmng_port))[i]);
2115 }
2116
2117 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2118                                    u32 wsum, u16 port_rate,
2119                                  struct cmng_struct_per_port *m_cmng_port)
2120 {
2121         struct rate_shaping_vars_per_vn m_rs_vn;
2122         struct fairness_vars_per_vn m_fair_vn;
2123         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2124         u16 vn_min_rate, vn_max_rate;
2125         int i;
2126
2127         /* If function is hidden - set min and max to zeroes */
2128         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2129                 vn_min_rate = 0;
2130                 vn_max_rate = 0;
2131
2132         } else {
2133                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2134                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2135                 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2136                    if current min rate is zero - set it to 1.
2137                    This is a requirement of the algorithm. */
2138                 if ((vn_min_rate == 0) && wsum)
2139                         vn_min_rate = DEF_MIN_RATE;
2140                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2141                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2142         }
2143
2144         DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d  vn_max_rate=%d  "
2145            "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2146
2147         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2148         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2149
2150         /* global vn counter - maximal Mbps for this vn */
2151         m_rs_vn.vn_counter.rate = vn_max_rate;
2152
2153         /* quota - number of bytes transmitted in this period */
2154         m_rs_vn.vn_counter.quota =
2155                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2156
2157 #ifdef BNX2X_PER_PROT_QOS
2158         /* per protocol counter */
2159         for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2160                 /* maximal Mbps for this protocol */
2161                 m_rs_vn.protocol_counters[protocol].rate =
2162                                                 protocol_max_rate[protocol];
2163                 /* the quota in each timer period -
2164                    number of bytes transmitted in this period */
2165                 m_rs_vn.protocol_counters[protocol].quota =
2166                         (u32)(rs_periodic_timeout_usec *
2167                           ((double)m_rs_vn.
2168                                    protocol_counters[protocol].rate/8));
2169         }
2170 #endif
2171
2172         if (wsum) {
2173                 /* credit for each period of the fairness algorithm:
2174                    number of bytes in T_FAIR (the vn share the port rate).
2175                    wsum should not be larger than 10000, thus
2176                    T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2177                 m_fair_vn.vn_credit_delta =
2178                         max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2179                             (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2180                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2181                    m_fair_vn.vn_credit_delta);
2182         }
2183
2184 #ifdef BNX2X_PER_PROT_QOS
2185         do {
2186                 u32 protocolWeightSum = 0;
2187
2188                 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2189                         protocolWeightSum +=
2190                                         drvInit.protocol_min_rate[protocol];
2191                 /* per protocol counter -
2192                    NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2193                 if (protocolWeightSum > 0) {
2194                         for (protocol = 0;
2195                              protocol < NUM_OF_PROTOCOLS; protocol++)
2196                                 /* credit for each period of the
2197                                    fairness algorithm - number of bytes in
2198                                    T_FAIR (the protocol share the vn rate) */
2199                                 m_fair_vn.protocol_credit_delta[protocol] =
2200                                         (u32)((vn_min_rate / 8) * t_fair *
2201                                         protocol_min_rate / protocolWeightSum);
2202                 }
2203         } while (0);
2204 #endif
2205
2206         /* Store it to internal memory */
2207         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2208                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2209                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2210                        ((u32 *)(&m_rs_vn))[i]);
2211
2212         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2213                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2214                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2215                        ((u32 *)(&m_fair_vn))[i]);
2216 }
2217
2218 /* This function is called upon link interrupt */
2219 static void bnx2x_link_attn(struct bnx2x *bp)
2220 {
2221         int vn;
2222
2223         /* Make sure that we are synced with the current statistics */
2224         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2225
2226         bnx2x_acquire_phy_lock(bp);
2227         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2228         bnx2x_release_phy_lock(bp);
2229
2230         if (bp->link_vars.link_up) {
2231
2232                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2233                         struct host_port_stats *pstats;
2234
2235                         pstats = bnx2x_sp(bp, port_stats);
2236                         /* reset old bmac stats */
2237                         memset(&(pstats->mac_stx[0]), 0,
2238                                sizeof(struct mac_stx));
2239                 }
2240                 if ((bp->state == BNX2X_STATE_OPEN) ||
2241                     (bp->state == BNX2X_STATE_DISABLED))
2242                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2243         }
2244
2245         /* indicate link status */
2246         bnx2x_link_report(bp);
2247
2248         if (IS_E1HMF(bp)) {
2249                 int func;
2250
2251                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2252                         if (vn == BP_E1HVN(bp))
2253                                 continue;
2254
2255                         func = ((vn << 1) | BP_PORT(bp));
2256
2257                         /* Set the attention towards other drivers
2258                            on the same port */
2259                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2260                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2261                 }
2262         }
2263
2264         if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2265                 struct cmng_struct_per_port m_cmng_port;
2266                 u32 wsum;
2267                 int port = BP_PORT(bp);
2268
2269                 /* Init RATE SHAPING and FAIRNESS contexts */
2270                 wsum = bnx2x_calc_vn_wsum(bp);
2271                 bnx2x_init_port_minmax(bp, (int)wsum,
2272                                         bp->link_vars.line_speed,
2273                                         &m_cmng_port);
2274                 if (IS_E1HMF(bp))
2275                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2276                                 bnx2x_init_vn_minmax(bp, 2*vn + port,
2277                                         wsum, bp->link_vars.line_speed,
2278                                                      &m_cmng_port);
2279         }
2280 }
2281
2282 static void bnx2x__link_status_update(struct bnx2x *bp)
2283 {
2284         if (bp->state != BNX2X_STATE_OPEN)
2285                 return;
2286
2287         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2288
2289         if (bp->link_vars.link_up)
2290                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2291         else
2292                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2293
2294         /* indicate link status */
2295         bnx2x_link_report(bp);
2296 }
2297
2298 static void bnx2x_pmf_update(struct bnx2x *bp)
2299 {
2300         int port = BP_PORT(bp);
2301         u32 val;
2302
2303         bp->port.pmf = 1;
2304         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2305
2306         /* enable nig attention */
2307         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2308         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2309         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2310
2311         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2312 }
2313
2314 /* end of Link */
2315
2316 /* slow path */
2317
2318 /*
2319  * General service functions
2320  */
2321
2322 /* the slow path queue is odd since completions arrive on the fastpath ring */
2323 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2324                          u32 data_hi, u32 data_lo, int common)
2325 {
2326         int func = BP_FUNC(bp);
2327
2328         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2329            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2330            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2331            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2332            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2333
2334 #ifdef BNX2X_STOP_ON_ERROR
2335         if (unlikely(bp->panic))
2336                 return -EIO;
2337 #endif
2338
2339         spin_lock_bh(&bp->spq_lock);
2340
2341         if (!bp->spq_left) {
2342                 BNX2X_ERR("BUG! SPQ ring full!\n");
2343                 spin_unlock_bh(&bp->spq_lock);
2344                 bnx2x_panic();
2345                 return -EBUSY;
2346         }
2347
2348         /* CID needs port number to be encoded int it */
2349         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2350                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2351                                      HW_CID(bp, cid)));
2352         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2353         if (common)
2354                 bp->spq_prod_bd->hdr.type |=
2355                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2356
2357         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2358         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2359
2360         bp->spq_left--;
2361
2362         if (bp->spq_prod_bd == bp->spq_last_bd) {
2363                 bp->spq_prod_bd = bp->spq;
2364                 bp->spq_prod_idx = 0;
2365                 DP(NETIF_MSG_TIMER, "end of spq\n");
2366
2367         } else {
2368                 bp->spq_prod_bd++;
2369                 bp->spq_prod_idx++;
2370         }
2371
2372         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2373                bp->spq_prod_idx);
2374
2375         spin_unlock_bh(&bp->spq_lock);
2376         return 0;
2377 }
2378
2379 /* acquire split MCP access lock register */
2380 static int bnx2x_acquire_alr(struct bnx2x *bp)
2381 {
2382         u32 i, j, val;
2383         int rc = 0;
2384
2385         might_sleep();
2386         i = 100;
2387         for (j = 0; j < i*10; j++) {
2388                 val = (1UL << 31);
2389                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2390                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2391                 if (val & (1L << 31))
2392                         break;
2393
2394                 msleep(5);
2395         }
2396         if (!(val & (1L << 31))) {
2397                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2398                 rc = -EBUSY;
2399         }
2400
2401         return rc;
2402 }
2403
2404 /* release split MCP access lock register */
2405 static void bnx2x_release_alr(struct bnx2x *bp)
2406 {
2407         u32 val = 0;
2408
2409         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2410 }
2411
2412 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2413 {
2414         struct host_def_status_block *def_sb = bp->def_status_blk;
2415         u16 rc = 0;
2416
2417         barrier(); /* status block is written to by the chip */
2418         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2419                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2420                 rc |= 1;
2421         }
2422         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2423                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2424                 rc |= 2;
2425         }
2426         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2427                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2428                 rc |= 4;
2429         }
2430         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2431                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2432                 rc |= 8;
2433         }
2434         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2435                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2436                 rc |= 16;
2437         }
2438         return rc;
2439 }
2440
2441 /*
2442  * slow path service functions
2443  */
2444
2445 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2446 {
2447         int port = BP_PORT(bp);
2448         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2449                        COMMAND_REG_ATTN_BITS_SET);
2450         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2451                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2452         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2453                                        NIG_REG_MASK_INTERRUPT_PORT0;
2454         u32 aeu_mask;
2455
2456         if (bp->attn_state & asserted)
2457                 BNX2X_ERR("IGU ERROR\n");
2458
2459         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2460         aeu_mask = REG_RD(bp, aeu_addr);
2461
2462         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2463            aeu_mask, asserted);
2464         aeu_mask &= ~(asserted & 0xff);
2465         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2466
2467         REG_WR(bp, aeu_addr, aeu_mask);
2468         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2469
2470         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2471         bp->attn_state |= asserted;
2472         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2473
2474         if (asserted & ATTN_HARD_WIRED_MASK) {
2475                 if (asserted & ATTN_NIG_FOR_FUNC) {
2476
2477                         /* save nig interrupt mask */
2478                         bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2479                         REG_WR(bp, nig_int_mask_addr, 0);
2480
2481                         bnx2x_link_attn(bp);
2482
2483                         /* handle unicore attn? */
2484                 }
2485                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2486                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2487
2488                 if (asserted & GPIO_2_FUNC)
2489                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2490
2491                 if (asserted & GPIO_3_FUNC)
2492                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2493
2494                 if (asserted & GPIO_4_FUNC)
2495                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2496
2497                 if (port == 0) {
2498                         if (asserted & ATTN_GENERAL_ATTN_1) {
2499                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2500                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2501                         }
2502                         if (asserted & ATTN_GENERAL_ATTN_2) {
2503                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2504                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2505                         }
2506                         if (asserted & ATTN_GENERAL_ATTN_3) {
2507                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2508                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2509                         }
2510                 } else {
2511                         if (asserted & ATTN_GENERAL_ATTN_4) {
2512                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2513                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2514                         }
2515                         if (asserted & ATTN_GENERAL_ATTN_5) {
2516                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2517                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2518                         }
2519                         if (asserted & ATTN_GENERAL_ATTN_6) {
2520                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2521                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2522                         }
2523                 }
2524
2525         } /* if hardwired */
2526
2527         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2528            asserted, hc_addr);
2529         REG_WR(bp, hc_addr, asserted);
2530
2531         /* now set back the mask */
2532         if (asserted & ATTN_NIG_FOR_FUNC)
2533                 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2534 }
2535
2536 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2537 {
2538         int port = BP_PORT(bp);
2539         int reg_offset;
2540         u32 val;
2541
2542         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2543                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2544
2545         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2546
2547                 val = REG_RD(bp, reg_offset);
2548                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2549                 REG_WR(bp, reg_offset, val);
2550
2551                 BNX2X_ERR("SPIO5 hw attention\n");
2552
2553                 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2554                 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2555                         /* Fan failure attention */
2556
2557                         /* The PHY reset is controlled by GPIO 1 */
2558                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2559                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2560                         /* Low power mode is controlled by GPIO 2 */
2561                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2562                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2563                         /* mark the failure */
2564                         bp->link_params.ext_phy_config &=
2565                                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2566                         bp->link_params.ext_phy_config |=
2567                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2568                         SHMEM_WR(bp,
2569                                  dev_info.port_hw_config[port].
2570                                                         external_phy_config,
2571                                  bp->link_params.ext_phy_config);
2572                         /* log the failure */
2573                         printk(KERN_ERR PFX "Fan Failure on Network"
2574                                " Controller %s has caused the driver to"
2575                                " shutdown the card to prevent permanent"
2576                                " damage.  Please contact Dell Support for"
2577                                " assistance\n", bp->dev->name);
2578                         break;
2579
2580                 default:
2581                         break;
2582                 }
2583         }
2584
2585         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2586
2587                 val = REG_RD(bp, reg_offset);
2588                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2589                 REG_WR(bp, reg_offset, val);
2590
2591                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2592                           (attn & HW_INTERRUT_ASSERT_SET_0));
2593                 bnx2x_panic();
2594         }
2595 }
2596
2597 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2598 {
2599         u32 val;
2600
2601         if (attn & BNX2X_DOORQ_ASSERT) {
2602
2603                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2604                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2605                 /* DORQ discard attention */
2606                 if (val & 0x2)
2607                         BNX2X_ERR("FATAL error from DORQ\n");
2608         }
2609
2610         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2611
2612                 int port = BP_PORT(bp);
2613                 int reg_offset;
2614
2615                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2616                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2617
2618                 val = REG_RD(bp, reg_offset);
2619                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2620                 REG_WR(bp, reg_offset, val);
2621
2622                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2623                           (attn & HW_INTERRUT_ASSERT_SET_1));
2624                 bnx2x_panic();
2625         }
2626 }
2627
2628 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2629 {
2630         u32 val;
2631
2632         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2633
2634                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2635                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2636                 /* CFC error attention */
2637                 if (val & 0x2)
2638                         BNX2X_ERR("FATAL error from CFC\n");
2639         }
2640
2641         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2642
2643                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2644                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2645                 /* RQ_USDMDP_FIFO_OVERFLOW */
2646                 if (val & 0x18000)
2647                         BNX2X_ERR("FATAL error from PXP\n");
2648         }
2649
2650         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2651
2652                 int port = BP_PORT(bp);
2653                 int reg_offset;
2654
2655                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2656                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2657
2658                 val = REG_RD(bp, reg_offset);
2659                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2660                 REG_WR(bp, reg_offset, val);
2661
2662                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2663                           (attn & HW_INTERRUT_ASSERT_SET_2));
2664                 bnx2x_panic();
2665         }
2666 }
2667
2668 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2669 {
2670         u32 val;
2671
2672         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2673
2674                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2675                         int func = BP_FUNC(bp);
2676
2677                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2678                         bnx2x__link_status_update(bp);
2679                         if (SHMEM_RD(bp, func_mb[func].drv_status) &
2680                                                         DRV_STATUS_PMF)
2681                                 bnx2x_pmf_update(bp);
2682
2683                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2684
2685                         BNX2X_ERR("MC assert!\n");
2686                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2687                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2688                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2689                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2690                         bnx2x_panic();
2691
2692                 } else if (attn & BNX2X_MCP_ASSERT) {
2693
2694                         BNX2X_ERR("MCP assert!\n");
2695                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2696                         bnx2x_fw_dump(bp);
2697
2698                 } else
2699                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2700         }
2701
2702         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2703                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2704                 if (attn & BNX2X_GRC_TIMEOUT) {
2705                         val = CHIP_IS_E1H(bp) ?
2706                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2707                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2708                 }
2709                 if (attn & BNX2X_GRC_RSV) {
2710                         val = CHIP_IS_E1H(bp) ?
2711                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2712                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2713                 }
2714                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2715         }
2716 }
2717
2718 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2719 {
2720         struct attn_route attn;
2721         struct attn_route group_mask;
2722         int port = BP_PORT(bp);
2723         int index;
2724         u32 reg_addr;
2725         u32 val;
2726         u32 aeu_mask;
2727
2728         /* need to take HW lock because MCP or other port might also
2729            try to handle this event */
2730         bnx2x_acquire_alr(bp);
2731
2732         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2733         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2734         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2735         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2736         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2737            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2738
2739         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2740                 if (deasserted & (1 << index)) {
2741                         group_mask = bp->attn_group[index];
2742
2743                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2744                            index, group_mask.sig[0], group_mask.sig[1],
2745                            group_mask.sig[2], group_mask.sig[3]);
2746
2747                         bnx2x_attn_int_deasserted3(bp,
2748                                         attn.sig[3] & group_mask.sig[3]);
2749                         bnx2x_attn_int_deasserted1(bp,
2750                                         attn.sig[1] & group_mask.sig[1]);
2751                         bnx2x_attn_int_deasserted2(bp,
2752                                         attn.sig[2] & group_mask.sig[2]);
2753                         bnx2x_attn_int_deasserted0(bp,
2754                                         attn.sig[0] & group_mask.sig[0]);
2755
2756                         if ((attn.sig[0] & group_mask.sig[0] &
2757                                                 HW_PRTY_ASSERT_SET_0) ||
2758                             (attn.sig[1] & group_mask.sig[1] &
2759                                                 HW_PRTY_ASSERT_SET_1) ||
2760                             (attn.sig[2] & group_mask.sig[2] &
2761                                                 HW_PRTY_ASSERT_SET_2))
2762                                BNX2X_ERR("FATAL HW block parity attention\n");
2763                 }
2764         }
2765
2766         bnx2x_release_alr(bp);
2767
2768         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2769
2770         val = ~deasserted;
2771         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2772            val, reg_addr);
2773         REG_WR(bp, reg_addr, val);
2774
2775         if (~bp->attn_state & deasserted)
2776                 BNX2X_ERR("IGU ERROR\n");
2777
2778         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2779                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2780
2781         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2782         aeu_mask = REG_RD(bp, reg_addr);
2783
2784         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2785            aeu_mask, deasserted);
2786         aeu_mask |= (deasserted & 0xff);
2787         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2788
2789         REG_WR(bp, reg_addr, aeu_mask);
2790         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2791
2792         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2793         bp->attn_state &= ~deasserted;
2794         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2795 }
2796
2797 static void bnx2x_attn_int(struct bnx2x *bp)
2798 {
2799         /* read local copy of bits */
2800         u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2801         u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2802         u32 attn_state = bp->attn_state;
2803
2804         /* look for changed bits */
2805         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2806         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2807
2808         DP(NETIF_MSG_HW,
2809            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2810            attn_bits, attn_ack, asserted, deasserted);
2811
2812         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2813                 BNX2X_ERR("BAD attention state\n");
2814
2815         /* handle bits that were raised */
2816         if (asserted)
2817                 bnx2x_attn_int_asserted(bp, asserted);
2818
2819         if (deasserted)
2820                 bnx2x_attn_int_deasserted(bp, deasserted);
2821 }
2822
2823 static void bnx2x_sp_task(struct work_struct *work)
2824 {
2825         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
2826         u16 status;
2827
2828
2829         /* Return here if interrupt is disabled */
2830         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2831                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2832                 return;
2833         }
2834
2835         status = bnx2x_update_dsb_idx(bp);
2836 /*      if (status == 0)                                     */
2837 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2838
2839         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2840
2841         /* HW attentions */
2842         if (status & 0x1)
2843                 bnx2x_attn_int(bp);
2844
2845         /* CStorm events: query_stats, port delete ramrod */
2846         if (status & 0x2)
2847                 bp->stats_pending = 0;
2848
2849         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2850                      IGU_INT_NOP, 1);
2851         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2852                      IGU_INT_NOP, 1);
2853         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2854                      IGU_INT_NOP, 1);
2855         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2856                      IGU_INT_NOP, 1);
2857         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2858                      IGU_INT_ENABLE, 1);
2859
2860 }
2861
2862 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2863 {
2864         struct net_device *dev = dev_instance;
2865         struct bnx2x *bp = netdev_priv(dev);
2866
2867         /* Return here if interrupt is disabled */
2868         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2869                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2870                 return IRQ_HANDLED;
2871         }
2872
2873         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
2874
2875 #ifdef BNX2X_STOP_ON_ERROR
2876         if (unlikely(bp->panic))
2877                 return IRQ_HANDLED;
2878 #endif
2879
2880         schedule_work(&bp->sp_task);
2881
2882         return IRQ_HANDLED;
2883 }
2884
2885 /* end of slow path */
2886
2887 /* Statistics */
2888
2889 /****************************************************************************
2890 * Macros
2891 ****************************************************************************/
2892
2893 /* sum[hi:lo] += add[hi:lo] */
2894 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2895         do { \
2896                 s_lo += a_lo; \
2897                 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2898         } while (0)
2899
2900 /* difference = minuend - subtrahend */
2901 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2902         do { \
2903                 if (m_lo < s_lo) { \
2904                         /* underflow */ \
2905                         d_hi = m_hi - s_hi; \
2906                         if (d_hi > 0) { \
2907                         /* we can 'loan' 1 */ \
2908                                 d_hi--; \
2909                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2910                         } else { \
2911                         /* m_hi <= s_hi */ \
2912                                 d_hi = 0; \
2913                                 d_lo = 0; \
2914                         } \
2915                 } else { \
2916                         /* m_lo >= s_lo */ \
2917                         if (m_hi < s_hi) { \
2918                                 d_hi = 0; \
2919                                 d_lo = 0; \
2920                         } else { \
2921                         /* m_hi >= s_hi */ \
2922                                 d_hi = m_hi - s_hi; \
2923                                 d_lo = m_lo - s_lo; \
2924                         } \
2925                 } \
2926         } while (0)
2927
2928 #define UPDATE_STAT64(s, t) \
2929         do { \
2930                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2931                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2932                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2933                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2934                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2935                        pstats->mac_stx[1].t##_lo, diff.lo); \
2936         } while (0)
2937
2938 #define UPDATE_STAT64_NIG(s, t) \
2939         do { \
2940                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2941                         diff.lo, new->s##_lo, old->s##_lo); \
2942                 ADD_64(estats->t##_hi, diff.hi, \
2943                        estats->t##_lo, diff.lo); \
2944         } while (0)
2945
2946 /* sum[hi:lo] += add */
2947 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2948         do { \
2949                 s_lo += a; \
2950                 s_hi += (s_lo < a) ? 1 : 0; \
2951         } while (0)
2952
2953 #define UPDATE_EXTEND_STAT(s) \
2954         do { \
2955                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2956                               pstats->mac_stx[1].s##_lo, \
2957                               new->s); \
2958         } while (0)
2959
2960 #define UPDATE_EXTEND_TSTAT(s, t) \
2961         do { \
2962                 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2963                 old_tclient->s = le32_to_cpu(tclient->s); \
2964                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2965         } while (0)
2966
2967 #define UPDATE_EXTEND_XSTAT(s, t) \
2968         do { \
2969                 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2970                 old_xclient->s = le32_to_cpu(xclient->s); \
2971                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2972         } while (0)
2973
2974 /*
2975  * General service functions
2976  */
2977
2978 static inline long bnx2x_hilo(u32 *hiref)
2979 {
2980         u32 lo = *(hiref + 1);
2981 #if (BITS_PER_LONG == 64)
2982         u32 hi = *hiref;
2983
2984         return HILO_U64(hi, lo);
2985 #else
2986         return lo;
2987 #endif
2988 }
2989
2990 /*
2991  * Init service functions
2992  */
2993
2994 static void bnx2x_storm_stats_post(struct bnx2x *bp)
2995 {
2996         if (!bp->stats_pending) {
2997                 struct eth_query_ramrod_data ramrod_data = {0};
2998                 int rc;
2999
3000                 ramrod_data.drv_counter = bp->stats_counter++;
3001                 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3002                 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3003
3004                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3005                                    ((u32 *)&ramrod_data)[1],
3006                                    ((u32 *)&ramrod_data)[0], 0);
3007                 if (rc == 0) {
3008                         /* stats ramrod has it's own slot on the spq */
3009                         bp->spq_left++;
3010                         bp->stats_pending = 1;
3011                 }
3012         }
3013 }
3014
3015 static void bnx2x_stats_init(struct bnx2x *bp)
3016 {
3017         int port = BP_PORT(bp);
3018
3019         bp->executer_idx = 0;
3020         bp->stats_counter = 0;
3021
3022         /* port stats */
3023         if (!BP_NOMCP(bp))
3024                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3025         else
3026                 bp->port.port_stx = 0;
3027         DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3028
3029         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3030         bp->port.old_nig_stats.brb_discard =
3031                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3032         bp->port.old_nig_stats.brb_truncate =
3033                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3034         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3035                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3036         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3037                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3038
3039         /* function stats */
3040         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3041         memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3042         memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3043         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3044
3045         bp->stats_state = STATS_STATE_DISABLED;
3046         if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3047                 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3048 }
3049
3050 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3051 {
3052         struct dmae_command *dmae = &bp->stats_dmae;
3053         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3054
3055         *stats_comp = DMAE_COMP_VAL;
3056
3057         /* loader */
3058         if (bp->executer_idx) {
3059                 int loader_idx = PMF_DMAE_C(bp);
3060
3061                 memset(dmae, 0, sizeof(struct dmae_command));
3062
3063                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3064                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3065                                 DMAE_CMD_DST_RESET |
3066 #ifdef __BIG_ENDIAN
3067                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3068 #else
3069                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3070 #endif
3071                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3072                                                DMAE_CMD_PORT_0) |
3073                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3074                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3075                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3076                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3077                                      sizeof(struct dmae_command) *
3078                                      (loader_idx + 1)) >> 2;
3079                 dmae->dst_addr_hi = 0;
3080                 dmae->len = sizeof(struct dmae_command) >> 2;
3081                 if (CHIP_IS_E1(bp))
3082                         dmae->len--;
3083                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3084                 dmae->comp_addr_hi = 0;
3085                 dmae->comp_val = 1;
3086
3087                 *stats_comp = 0;
3088                 bnx2x_post_dmae(bp, dmae, loader_idx);
3089
3090         } else if (bp->func_stx) {
3091                 *stats_comp = 0;
3092                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3093         }
3094 }
3095
3096 static int bnx2x_stats_comp(struct bnx2x *bp)
3097 {
3098         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3099         int cnt = 10;
3100
3101         might_sleep();
3102         while (*stats_comp != DMAE_COMP_VAL) {
3103                 if (!cnt) {
3104                         BNX2X_ERR("timeout waiting for stats finished\n");
3105                         break;
3106                 }
3107                 cnt--;
3108                 msleep(1);
3109         }
3110         return 1;
3111 }
3112
3113 /*
3114  * Statistics service functions
3115  */
3116
3117 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3118 {
3119         struct dmae_command *dmae;
3120         u32 opcode;
3121         int loader_idx = PMF_DMAE_C(bp);
3122         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3123
3124         /* sanity */
3125         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3126                 BNX2X_ERR("BUG!\n");
3127                 return;
3128         }
3129
3130         bp->executer_idx = 0;
3131
3132         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3133                   DMAE_CMD_C_ENABLE |
3134                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3135 #ifdef __BIG_ENDIAN
3136                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3137 #else
3138                   DMAE_CMD_ENDIANITY_DW_SWAP |
3139 #endif
3140                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3141                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3142
3143         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3144         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3145         dmae->src_addr_lo = bp->port.port_stx >> 2;
3146         dmae->src_addr_hi = 0;
3147         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3148         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3149         dmae->len = DMAE_LEN32_RD_MAX;
3150         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3151         dmae->comp_addr_hi = 0;
3152         dmae->comp_val = 1;
3153
3154         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3155         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3156         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3157         dmae->src_addr_hi = 0;
3158         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3159                                    DMAE_LEN32_RD_MAX * 4);
3160         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3161                                    DMAE_LEN32_RD_MAX * 4);
3162         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3163         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3164         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3165         dmae->comp_val = DMAE_COMP_VAL;
3166
3167         *stats_comp = 0;
3168         bnx2x_hw_stats_post(bp);
3169         bnx2x_stats_comp(bp);
3170 }
3171
3172 static void bnx2x_port_stats_init(struct bnx2x *bp)
3173 {
3174         struct dmae_command *dmae;
3175         int port = BP_PORT(bp);
3176         int vn = BP_E1HVN(bp);
3177         u32 opcode;
3178         int loader_idx = PMF_DMAE_C(bp);
3179         u32 mac_addr;
3180         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3181
3182         /* sanity */
3183         if (!bp->link_vars.link_up || !bp->port.pmf) {
3184                 BNX2X_ERR("BUG!\n");
3185                 return;
3186         }
3187
3188         bp->executer_idx = 0;
3189
3190         /* MCP */
3191         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3192                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3193                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3194 #ifdef __BIG_ENDIAN
3195                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3196 #else
3197                   DMAE_CMD_ENDIANITY_DW_SWAP |
3198 #endif
3199                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3200                   (vn << DMAE_CMD_E1HVN_SHIFT));
3201
3202         if (bp->port.port_stx) {
3203
3204                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3205                 dmae->opcode = opcode;
3206                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3207                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3208                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3209                 dmae->dst_addr_hi = 0;
3210                 dmae->len = sizeof(struct host_port_stats) >> 2;
3211                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3212                 dmae->comp_addr_hi = 0;
3213                 dmae->comp_val = 1;
3214         }
3215
3216         if (bp->func_stx) {
3217
3218                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3219                 dmae->opcode = opcode;
3220                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3221                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3222                 dmae->dst_addr_lo = bp->func_stx >> 2;
3223                 dmae->dst_addr_hi = 0;
3224                 dmae->len = sizeof(struct host_func_stats) >> 2;
3225                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3226                 dmae->comp_addr_hi = 0;
3227                 dmae->comp_val = 1;
3228         }
3229
3230         /* MAC */
3231         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3232                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3233                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3234 #ifdef __BIG_ENDIAN
3235                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3236 #else
3237                   DMAE_CMD_ENDIANITY_DW_SWAP |
3238 #endif
3239                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3240                   (vn << DMAE_CMD_E1HVN_SHIFT));
3241
3242         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3243
3244                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3245                                    NIG_REG_INGRESS_BMAC0_MEM);
3246
3247                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3248                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3249                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3250                 dmae->opcode = opcode;
3251                 dmae->src_addr_lo = (mac_addr +
3252                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3253                 dmae->src_addr_hi = 0;
3254                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3255                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3256                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3257                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3258                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3259                 dmae->comp_addr_hi = 0;
3260                 dmae->comp_val = 1;
3261
3262                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3263                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3264                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3265                 dmae->opcode = opcode;
3266                 dmae->src_addr_lo = (mac_addr +
3267                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3268                 dmae->src_addr_hi = 0;
3269                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3270                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3271                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3272                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3273                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3274                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3275                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3276                 dmae->comp_addr_hi = 0;
3277                 dmae->comp_val = 1;
3278
3279         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3280
3281                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3282
3283                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3284                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3285                 dmae->opcode = opcode;
3286                 dmae->src_addr_lo = (mac_addr +
3287                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3288                 dmae->src_addr_hi = 0;
3289                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3290                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3291                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3292                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3293                 dmae->comp_addr_hi = 0;
3294                 dmae->comp_val = 1;
3295
3296                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3297                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3298                 dmae->opcode = opcode;
3299                 dmae->src_addr_lo = (mac_addr +
3300                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3301                 dmae->src_addr_hi = 0;
3302                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3303                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3304                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3305                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3306                 dmae->len = 1;
3307                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3308                 dmae->comp_addr_hi = 0;
3309                 dmae->comp_val = 1;
3310
3311                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3312                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3313                 dmae->opcode = opcode;
3314                 dmae->src_addr_lo = (mac_addr +
3315                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3316                 dmae->src_addr_hi = 0;
3317                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3318                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3319                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3320                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3321                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3322                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3323                 dmae->comp_addr_hi = 0;
3324                 dmae->comp_val = 1;
3325         }
3326
3327         /* NIG */
3328         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3329         dmae->opcode = opcode;
3330         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3331                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3332         dmae->src_addr_hi = 0;
3333         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3334         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3335         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3336         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3337         dmae->comp_addr_hi = 0;
3338         dmae->comp_val = 1;
3339
3340         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3341         dmae->opcode = opcode;
3342         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3343                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3344         dmae->src_addr_hi = 0;
3345         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3346                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3347         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3348                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3349         dmae->len = (2*sizeof(u32)) >> 2;
3350         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3351         dmae->comp_addr_hi = 0;
3352         dmae->comp_val = 1;
3353
3354         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3355         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3356                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3357                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3358 #ifdef __BIG_ENDIAN
3359                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3360 #else
3361                         DMAE_CMD_ENDIANITY_DW_SWAP |
3362 #endif
3363                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3364                         (vn << DMAE_CMD_E1HVN_SHIFT));
3365         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3366                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3367         dmae->src_addr_hi = 0;
3368         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3369                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3370         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3371                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3372         dmae->len = (2*sizeof(u32)) >> 2;
3373         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3374         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3375         dmae->comp_val = DMAE_COMP_VAL;
3376
3377         *stats_comp = 0;
3378 }
3379
3380 static void bnx2x_func_stats_init(struct bnx2x *bp)
3381 {
3382         struct dmae_command *dmae = &bp->stats_dmae;
3383         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3384
3385         /* sanity */
3386         if (!bp->func_stx) {
3387                 BNX2X_ERR("BUG!\n");
3388                 return;
3389         }
3390
3391         bp->executer_idx = 0;
3392         memset(dmae, 0, sizeof(struct dmae_command));
3393
3394         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3395                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3396                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3397 #ifdef __BIG_ENDIAN
3398                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3399 #else
3400                         DMAE_CMD_ENDIANITY_DW_SWAP |
3401 #endif
3402                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3403                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3404         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3405         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3406         dmae->dst_addr_lo = bp->func_stx >> 2;
3407         dmae->dst_addr_hi = 0;
3408         dmae->len = sizeof(struct host_func_stats) >> 2;
3409         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3410         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3411         dmae->comp_val = DMAE_COMP_VAL;
3412
3413         *stats_comp = 0;
3414 }
3415
3416 static void bnx2x_stats_start(struct bnx2x *bp)
3417 {
3418         if (bp->port.pmf)
3419                 bnx2x_port_stats_init(bp);
3420
3421         else if (bp->func_stx)
3422                 bnx2x_func_stats_init(bp);
3423
3424         bnx2x_hw_stats_post(bp);
3425         bnx2x_storm_stats_post(bp);
3426 }
3427
3428 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3429 {
3430         bnx2x_stats_comp(bp);
3431         bnx2x_stats_pmf_update(bp);
3432         bnx2x_stats_start(bp);
3433 }
3434
3435 static void bnx2x_stats_restart(struct bnx2x *bp)
3436 {
3437         bnx2x_stats_comp(bp);
3438         bnx2x_stats_start(bp);
3439 }
3440
3441 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3442 {
3443         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3444         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3445         struct regpair diff;
3446
3447         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3448         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3449         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3450         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3451         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3452         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3453         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3454         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3455         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3456         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3457         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3458         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3459         UPDATE_STAT64(tx_stat_gt127,
3460                                 tx_stat_etherstatspkts65octetsto127octets);
3461         UPDATE_STAT64(tx_stat_gt255,
3462                                 tx_stat_etherstatspkts128octetsto255octets);
3463         UPDATE_STAT64(tx_stat_gt511,
3464                                 tx_stat_etherstatspkts256octetsto511octets);
3465         UPDATE_STAT64(tx_stat_gt1023,
3466                                 tx_stat_etherstatspkts512octetsto1023octets);
3467         UPDATE_STAT64(tx_stat_gt1518,
3468                                 tx_stat_etherstatspkts1024octetsto1522octets);
3469         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3470         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3471         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3472         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3473         UPDATE_STAT64(tx_stat_gterr,
3474                                 tx_stat_dot3statsinternalmactransmiterrors);
3475         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3476 }
3477
3478 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3479 {
3480         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3481         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3482
3483         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3484         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3485         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3486         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3487         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3488         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3489         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3490         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3491         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3492         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3493         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3494         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3495         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3496         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3497         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3498         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3499         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3500         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3501         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3502         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3503         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3504         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3505         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3506         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3507         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3508         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3509         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3510         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3511         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3512         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3513         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3514 }
3515
3516 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3517 {
3518         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3519         struct nig_stats *old = &(bp->port.old_nig_stats);
3520         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3521         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3522         struct regpair diff;
3523
3524         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3525                 bnx2x_bmac_stats_update(bp);
3526
3527         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3528                 bnx2x_emac_stats_update(bp);
3529
3530         else { /* unreached */
3531                 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3532                 return -1;
3533         }
3534
3535         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3536                       new->brb_discard - old->brb_discard);
3537         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3538                       new->brb_truncate - old->brb_truncate);
3539
3540         UPDATE_STAT64_NIG(egress_mac_pkt0,
3541                                         etherstatspkts1024octetsto1522octets);
3542         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3543
3544         memcpy(old, new, sizeof(struct nig_stats));
3545
3546         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3547                sizeof(struct mac_stx));
3548         estats->brb_drop_hi = pstats->brb_drop_hi;
3549         estats->brb_drop_lo = pstats->brb_drop_lo;
3550
3551         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3552
3553         return 0;
3554 }
3555
3556 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3557 {
3558         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3559         int cl_id = BP_CL_ID(bp);
3560         struct tstorm_per_port_stats *tport =
3561                                 &stats->tstorm_common.port_statistics;
3562         struct tstorm_per_client_stats *tclient =
3563                         &stats->tstorm_common.client_statistics[cl_id];
3564         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3565         struct xstorm_per_client_stats *xclient =
3566                         &stats->xstorm_common.client_statistics[cl_id];
3567         struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3568         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3569         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3570         u32 diff;
3571
3572         /* are storm stats valid? */
3573         if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3574                                                         bp->stats_counter) {
3575                 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3576                    "  tstorm counter (%d) != stats_counter (%d)\n",
3577                    tclient->stats_counter, bp->stats_counter);
3578                 return -1;
3579         }
3580         if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3581                                                         bp->stats_counter) {
3582                 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3583                    "  xstorm counter (%d) != stats_counter (%d)\n",
3584                    xclient->stats_counter, bp->stats_counter);
3585                 return -2;
3586         }
3587
3588         fstats->total_bytes_received_hi =
3589         fstats->valid_bytes_received_hi =
3590                                 le32_to_cpu(tclient->total_rcv_bytes.hi);
3591         fstats->total_bytes_received_lo =
3592         fstats->valid_bytes_received_lo =
3593                                 le32_to_cpu(tclient->total_rcv_bytes.lo);
3594
3595         estats->error_bytes_received_hi =
3596                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
3597         estats->error_bytes_received_lo =
3598                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
3599         ADD_64(estats->error_bytes_received_hi,
3600                estats->rx_stat_ifhcinbadoctets_hi,
3601                estats->error_bytes_received_lo,
3602                estats->rx_stat_ifhcinbadoctets_lo);
3603
3604         ADD_64(fstats->total_bytes_received_hi,
3605                estats->error_bytes_received_hi,
3606                fstats->total_bytes_received_lo,
3607                estats->error_bytes_received_lo);
3608
3609         UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
3610         UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3611                                 total_multicast_packets_received);
3612         UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3613                                 total_broadcast_packets_received);
3614
3615         fstats->total_bytes_transmitted_hi =
3616                                 le32_to_cpu(xclient->total_sent_bytes.hi);
3617         fstats->total_bytes_transmitted_lo =
3618                                 le32_to_cpu(xclient->total_sent_bytes.lo);
3619
3620         UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3621                                 total_unicast_packets_transmitted);
3622         UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3623                                 total_multicast_packets_transmitted);
3624         UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3625                                 total_broadcast_packets_transmitted);
3626
3627         memcpy(estats, &(fstats->total_bytes_received_hi),
3628                sizeof(struct host_func_stats) - 2*sizeof(u32));
3629
3630         estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3631         estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3632         estats->brb_truncate_discard =
3633                                 le32_to_cpu(tport->brb_truncate_discard);
3634         estats->mac_discard = le32_to_cpu(tport->mac_discard);
3635
3636         old_tclient->rcv_unicast_bytes.hi =
3637                                 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
3638         old_tclient->rcv_unicast_bytes.lo =
3639                                 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
3640         old_tclient->rcv_broadcast_bytes.hi =
3641                                 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3642         old_tclient->rcv_broadcast_bytes.lo =
3643                                 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3644         old_tclient->rcv_multicast_bytes.hi =
3645                                 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
3646         old_tclient->rcv_multicast_bytes.lo =
3647                                 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
3648         old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
3649
3650         old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3651         old_tclient->packets_too_big_discard =
3652                                 le32_to_cpu(tclient->packets_too_big_discard);
3653         estats->no_buff_discard =
3654         old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3655         old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3656
3657         old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3658         old_xclient->unicast_bytes_sent.hi =
3659                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3660         old_xclient->unicast_bytes_sent.lo =
3661                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3662         old_xclient->multicast_bytes_sent.hi =
3663                                 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3664         old_xclient->multicast_bytes_sent.lo =
3665                                 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3666         old_xclient->broadcast_bytes_sent.hi =
3667                                 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3668         old_xclient->broadcast_bytes_sent.lo =
3669                                 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3670
3671         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3672
3673         return 0;
3674 }
3675
3676 static void bnx2x_net_stats_update(struct bnx2x *bp)
3677 {
3678         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3679         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3680         struct net_device_stats *nstats = &bp->dev->stats;
3681
3682         nstats->rx_packets =
3683                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3684                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3685                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3686
3687         nstats->tx_packets =
3688                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3689                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3690                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3691
3692         nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
3693
3694         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3695
3696         nstats->rx_dropped = old_tclient->checksum_discard +
3697                              estats->mac_discard;
3698         nstats->tx_dropped = 0;
3699
3700         nstats->multicast =
3701                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3702
3703         nstats->collisions =
3704                         estats->tx_stat_dot3statssinglecollisionframes_lo +
3705                         estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3706                         estats->tx_stat_dot3statslatecollisions_lo +
3707                         estats->tx_stat_dot3statsexcessivecollisions_lo;
3708
3709         estats->jabber_packets_received =
3710                                 old_tclient->packets_too_big_discard +
3711                                 estats->rx_stat_dot3statsframestoolong_lo;
3712
3713         nstats->rx_length_errors =
3714                                 estats->rx_stat_etherstatsundersizepkts_lo +
3715                                 estats->jabber_packets_received;
3716         nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3717         nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3718         nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3719         nstats->rx_fifo_errors = old_tclient->no_buff_discard;
3720         nstats->rx_missed_errors = estats->xxoverflow_discard;
3721
3722         nstats->rx_errors = nstats->rx_length_errors +
3723                             nstats->rx_over_errors +
3724                             nstats->rx_crc_errors +
3725                             nstats->rx_frame_errors +
3726                             nstats->rx_fifo_errors +
3727                             nstats->rx_missed_errors;
3728
3729         nstats->tx_aborted_errors =
3730                         estats->tx_stat_dot3statslatecollisions_lo +
3731                         estats->tx_stat_dot3statsexcessivecollisions_lo;
3732         nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
3733         nstats->tx_fifo_errors = 0;
3734         nstats->tx_heartbeat_errors = 0;
3735         nstats->tx_window_errors = 0;
3736
3737         nstats->tx_errors = nstats->tx_aborted_errors +
3738                             nstats->tx_carrier_errors;
3739 }
3740
3741 static void bnx2x_stats_update(struct bnx2x *bp)
3742 {
3743         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3744         int update = 0;
3745
3746         if (*stats_comp != DMAE_COMP_VAL)
3747                 return;
3748
3749         if (bp->port.pmf)
3750                 update = (bnx2x_hw_stats_update(bp) == 0);
3751
3752         update |= (bnx2x_storm_stats_update(bp) == 0);
3753
3754         if (update)
3755                 bnx2x_net_stats_update(bp);
3756
3757         else {
3758                 if (bp->stats_pending) {
3759                         bp->stats_pending++;
3760                         if (bp->stats_pending == 3) {
3761                                 BNX2X_ERR("stats not updated for 3 times\n");
3762                                 bnx2x_panic();
3763                                 return;
3764                         }
3765                 }
3766         }
3767
3768         if (bp->msglevel & NETIF_MSG_TIMER) {
3769                 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3770                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3771                 struct net_device_stats *nstats = &bp->dev->stats;
3772                 int i;
3773
3774                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3775                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
3776                                   "  tx pkt (%lx)\n",
3777                        bnx2x_tx_avail(bp->fp),
3778                        le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3779                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
3780                                   "  rx pkt (%lx)\n",
3781                        (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3782                              bp->fp->rx_comp_cons),
3783                        le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3784                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u\n",
3785                        netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
3786                        estats->driver_xoff, estats->brb_drop_lo);
3787                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
3788                         "packets_too_big_discard %u  no_buff_discard %u  "
3789                         "mac_discard %u  mac_filter_discard %u  "
3790                         "xxovrflow_discard %u  brb_truncate_discard %u  "
3791                         "ttl0_discard %u\n",
3792                        old_tclient->checksum_discard,
3793                        old_tclient->packets_too_big_discard,
3794                        old_tclient->no_buff_discard, estats->mac_discard,
3795                        estats->mac_filter_discard, estats->xxoverflow_discard,
3796                        estats->brb_truncate_discard,
3797                        old_tclient->ttl0_discard);
3798
3799                 for_each_queue(bp, i) {
3800                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3801                                bnx2x_fp(bp, i, tx_pkt),
3802                                bnx2x_fp(bp, i, rx_pkt),
3803                                bnx2x_fp(bp, i, rx_calls));
3804                 }
3805         }
3806
3807         bnx2x_hw_stats_post(bp);
3808         bnx2x_storm_stats_post(bp);
3809 }
3810
3811 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3812 {
3813         struct dmae_command *dmae;
3814         u32 opcode;
3815         int loader_idx = PMF_DMAE_C(bp);
3816         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3817
3818         bp->executer_idx = 0;
3819
3820         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3821                   DMAE_CMD_C_ENABLE |
3822                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3823 #ifdef __BIG_ENDIAN
3824                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3825 #else
3826                   DMAE_CMD_ENDIANITY_DW_SWAP |
3827 #endif
3828                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3829                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3830
3831         if (bp->port.port_stx) {
3832
3833                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3834                 if (bp->func_stx)
3835                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3836                 else
3837                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3838                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3839                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3840                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3841                 dmae->dst_addr_hi = 0;
3842                 dmae->len = sizeof(struct host_port_stats) >> 2;
3843                 if (bp->func_stx) {
3844                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3845                         dmae->comp_addr_hi = 0;
3846                         dmae->comp_val = 1;
3847                 } else {
3848                         dmae->comp_addr_lo =
3849                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3850                         dmae->comp_addr_hi =
3851                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3852                         dmae->comp_val = DMAE_COMP_VAL;
3853
3854                         *stats_comp = 0;
3855                 }
3856         }
3857
3858         if (bp->func_stx) {
3859
3860                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3861                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3862                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3863                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3864                 dmae->dst_addr_lo = bp->func_stx >> 2;
3865                 dmae->dst_addr_hi = 0;
3866                 dmae->len = sizeof(struct host_func_stats) >> 2;
3867                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3868                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3869                 dmae->comp_val = DMAE_COMP_VAL;
3870
3871                 *stats_comp = 0;
3872         }
3873 }
3874
3875 static void bnx2x_stats_stop(struct bnx2x *bp)
3876 {
3877         int update = 0;
3878
3879         bnx2x_stats_comp(bp);
3880
3881         if (bp->port.pmf)
3882                 update = (bnx2x_hw_stats_update(bp) == 0);
3883
3884         update |= (bnx2x_storm_stats_update(bp) == 0);
3885
3886         if (update) {
3887                 bnx2x_net_stats_update(bp);
3888
3889                 if (bp->port.pmf)
3890                         bnx2x_port_stats_stop(bp);
3891
3892                 bnx2x_hw_stats_post(bp);
3893                 bnx2x_stats_comp(bp);
3894         }
3895 }
3896
3897 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3898 {
3899 }
3900
3901 static const struct {
3902         void (*action)(struct bnx2x *bp);
3903         enum bnx2x_stats_state next_state;
3904 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3905 /* state        event   */
3906 {
3907 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3908 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
3909 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3910 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3911 },
3912 {
3913 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
3914 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
3915 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
3916 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
3917 }
3918 };
3919
3920 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3921 {
3922         enum bnx2x_stats_state state = bp->stats_state;
3923
3924         bnx2x_stats_stm[state][event].action(bp);
3925         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3926
3927         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3928                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3929                    state, event, bp->stats_state);
3930 }
3931
3932 static void bnx2x_timer(unsigned long data)
3933 {
3934         struct bnx2x *bp = (struct bnx2x *) data;
3935
3936         if (!netif_running(bp->dev))
3937                 return;
3938
3939         if (atomic_read(&bp->intr_sem) != 0)
3940                 goto timer_restart;
3941
3942         if (poll) {
3943                 struct bnx2x_fastpath *fp = &bp->fp[0];
3944                 int rc;
3945
3946                 bnx2x_tx_int(fp, 1000);
3947                 rc = bnx2x_rx_int(fp, 1000);
3948         }
3949
3950         if (!BP_NOMCP(bp)) {
3951                 int func = BP_FUNC(bp);
3952                 u32 drv_pulse;
3953                 u32 mcp_pulse;
3954
3955                 ++bp->fw_drv_pulse_wr_seq;
3956                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3957                 /* TBD - add SYSTEM_TIME */
3958                 drv_pulse = bp->fw_drv_pulse_wr_seq;
3959                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
3960
3961                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
3962                              MCP_PULSE_SEQ_MASK);
3963                 /* The delta between driver pulse and mcp response
3964                  * should be 1 (before mcp response) or 0 (after mcp response)
3965                  */
3966                 if ((drv_pulse != mcp_pulse) &&
3967                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3968                         /* someone lost a heartbeat... */
3969                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3970                                   drv_pulse, mcp_pulse);
3971                 }
3972         }
3973
3974         if ((bp->state == BNX2X_STATE_OPEN) ||
3975             (bp->state == BNX2X_STATE_DISABLED))
3976                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
3977
3978 timer_restart:
3979         mod_timer(&bp->timer, jiffies + bp->current_interval);
3980 }
3981
3982 /* end of Statistics */
3983
3984 /* nic init */
3985
3986 /*
3987  * nic init service functions
3988  */
3989
3990 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
3991 {
3992         int port = BP_PORT(bp);
3993
3994         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
3995                         USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3996                         sizeof(struct ustorm_status_block)/4);
3997         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
3998                         CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3999                         sizeof(struct cstorm_status_block)/4);
4000 }
4001
4002 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4003                           dma_addr_t mapping, int sb_id)
4004 {
4005         int port = BP_PORT(bp);
4006         int func = BP_FUNC(bp);
4007         int index;
4008         u64 section;
4009
4010         /* USTORM */
4011         section = ((u64)mapping) + offsetof(struct host_status_block,
4012                                             u_status_block);
4013         sb->u_status_block.status_block_id = sb_id;
4014
4015         REG_WR(bp, BAR_USTRORM_INTMEM +
4016                USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4017         REG_WR(bp, BAR_USTRORM_INTMEM +
4018                ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4019                U64_HI(section));
4020         REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4021                 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4022
4023         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4024                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4025                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4026
4027         /* CSTORM */
4028         section = ((u64)mapping) + offsetof(struct host_status_block,
4029                                             c_status_block);
4030         sb->c_status_block.status_block_id = sb_id;
4031
4032         REG_WR(bp, BAR_CSTRORM_INTMEM +
4033                CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4034         REG_WR(bp, BAR_CSTRORM_INTMEM +
4035                ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4036                U64_HI(section));
4037         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4038                 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4039
4040         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4041                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4042                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4043
4044         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4045 }
4046
4047 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4048 {
4049         int func = BP_FUNC(bp);
4050
4051         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4052                         USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4053                         sizeof(struct ustorm_def_status_block)/4);
4054         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4055                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4056                         sizeof(struct cstorm_def_status_block)/4);
4057         bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4058                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4059                         sizeof(struct xstorm_def_status_block)/4);
4060         bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4061                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4062                         sizeof(struct tstorm_def_status_block)/4);
4063 }
4064
4065 static void bnx2x_init_def_sb(struct bnx2x *bp,
4066                               struct host_def_status_block *def_sb,
4067                               dma_addr_t mapping, int sb_id)
4068 {
4069         int port = BP_PORT(bp);
4070         int func = BP_FUNC(bp);
4071         int index, val, reg_offset;
4072         u64 section;
4073
4074         /* ATTN */
4075         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4076                                             atten_status_block);
4077         def_sb->atten_status_block.status_block_id = sb_id;
4078
4079         bp->attn_state = 0;
4080
4081         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4082                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4083
4084         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4085                 bp->attn_group[index].sig[0] = REG_RD(bp,
4086                                                      reg_offset + 0x10*index);
4087                 bp->attn_group[index].sig[1] = REG_RD(bp,
4088                                                reg_offset + 0x4 + 0x10*index);
4089                 bp->attn_group[index].sig[2] = REG_RD(bp,
4090                                                reg_offset + 0x8 + 0x10*index);
4091                 bp->attn_group[index].sig[3] = REG_RD(bp,
4092                                                reg_offset + 0xc + 0x10*index);
4093         }
4094
4095         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4096                              HC_REG_ATTN_MSG0_ADDR_L);
4097
4098         REG_WR(bp, reg_offset, U64_LO(section));
4099         REG_WR(bp, reg_offset + 4, U64_HI(section));
4100
4101         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4102
4103         val = REG_RD(bp, reg_offset);
4104         val |= sb_id;
4105         REG_WR(bp, reg_offset, val);
4106
4107         /* USTORM */
4108         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4109                                             u_def_status_block);
4110         def_sb->u_def_status_block.status_block_id = sb_id;
4111
4112         REG_WR(bp, BAR_USTRORM_INTMEM +
4113                USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4114         REG_WR(bp, BAR_USTRORM_INTMEM +
4115                ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4116                U64_HI(section));
4117         REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4118                 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4119
4120         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4121                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4122                          USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4123
4124         /* CSTORM */
4125         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4126                                             c_def_status_block);
4127         def_sb->c_def_status_block.status_block_id = sb_id;
4128
4129         REG_WR(bp, BAR_CSTRORM_INTMEM +
4130                CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4131         REG_WR(bp, BAR_CSTRORM_INTMEM +
4132                ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4133                U64_HI(section));
4134         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4135                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4136
4137         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4138                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4139                          CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4140
4141         /* TSTORM */
4142         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4143                                             t_def_status_block);
4144         def_sb->t_def_status_block.status_block_id = sb_id;
4145
4146         REG_WR(bp, BAR_TSTRORM_INTMEM +
4147                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4148         REG_WR(bp, BAR_TSTRORM_INTMEM +
4149                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4150                U64_HI(section));
4151         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4152                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4153
4154         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4155                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4156                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4157
4158         /* XSTORM */
4159         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4160                                             x_def_status_block);
4161         def_sb->x_def_status_block.status_block_id = sb_id;
4162
4163         REG_WR(bp, BAR_XSTRORM_INTMEM +
4164                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4165         REG_WR(bp, BAR_XSTRORM_INTMEM +
4166                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4167                U64_HI(section));
4168         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4169                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4170
4171         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4172                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4173                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4174
4175         bp->stats_pending = 0;
4176         bp->set_mac_pending = 0;
4177
4178         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4179 }
4180
4181 static void bnx2x_update_coalesce(struct bnx2x *bp)
4182 {
4183         int port = BP_PORT(bp);
4184         int i;
4185
4186         for_each_queue(bp, i) {
4187                 int sb_id = bp->fp[i].sb_id;
4188
4189                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4190                 REG_WR8(bp, BAR_USTRORM_INTMEM +
4191                         USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4192                                                     U_SB_ETH_RX_CQ_INDEX),
4193                         bp->rx_ticks/12);
4194                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4195                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4196                                                      U_SB_ETH_RX_CQ_INDEX),
4197                          bp->rx_ticks ? 0 : 1);
4198                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4199                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4200                                                      U_SB_ETH_RX_BD_INDEX),
4201                          bp->rx_ticks ? 0 : 1);
4202
4203                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4204                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4205                         CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4206                                                     C_SB_ETH_TX_CQ_INDEX),
4207                         bp->tx_ticks/12);
4208                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4209                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4210                                                      C_SB_ETH_TX_CQ_INDEX),
4211                          bp->tx_ticks ? 0 : 1);
4212         }
4213 }
4214
4215 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4216                                        struct bnx2x_fastpath *fp, int last)
4217 {
4218         int i;
4219
4220         for (i = 0; i < last; i++) {
4221                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4222                 struct sk_buff *skb = rx_buf->skb;
4223
4224                 if (skb == NULL) {
4225                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4226                         continue;
4227                 }
4228
4229                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4230                         pci_unmap_single(bp->pdev,
4231                                          pci_unmap_addr(rx_buf, mapping),
4232                                          bp->rx_buf_use_size,
4233                                          PCI_DMA_FROMDEVICE);
4234
4235                 dev_kfree_skb(skb);
4236                 rx_buf->skb = NULL;
4237         }
4238 }
4239
4240 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4241 {
4242         int func = BP_FUNC(bp);
4243         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4244                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4245         u16 ring_prod, cqe_ring_prod;
4246         int i, j;
4247
4248         bp->rx_buf_use_size = bp->dev->mtu;
4249         bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
4250         bp->rx_buf_size = bp->rx_buf_use_size + 64;
4251
4252         if (bp->flags & TPA_ENABLE_FLAG) {
4253                 DP(NETIF_MSG_IFUP,
4254                    "rx_buf_use_size %d  rx_buf_size %d  effective_mtu %d\n",
4255                    bp->rx_buf_use_size, bp->rx_buf_size,
4256                    bp->dev->mtu + ETH_OVREHEAD);
4257
4258                 for_each_queue(bp, j) {
4259                         struct bnx2x_fastpath *fp = &bp->fp[j];
4260
4261                         for (i = 0; i < max_agg_queues; i++) {
4262                                 fp->tpa_pool[i].skb =
4263                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4264                                 if (!fp->tpa_pool[i].skb) {
4265                                         BNX2X_ERR("Failed to allocate TPA "
4266                                                   "skb pool for queue[%d] - "
4267                                                   "disabling TPA on this "
4268                                                   "queue!\n", j);
4269                                         bnx2x_free_tpa_pool(bp, fp, i);
4270                                         fp->disable_tpa = 1;
4271                                         break;
4272                                 }
4273                                 pci_unmap_addr_set((struct sw_rx_bd *)
4274                                                         &bp->fp->tpa_pool[i],
4275                                                    mapping, 0);
4276                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4277                         }
4278                 }
4279         }
4280
4281         for_each_queue(bp, j) {
4282                 struct bnx2x_fastpath *fp = &bp->fp[j];
4283
4284                 fp->rx_bd_cons = 0;
4285                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4286                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4287
4288                 /* "next page" elements initialization */
4289                 /* SGE ring */
4290                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4291                         struct eth_rx_sge *sge;
4292
4293                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4294                         sge->addr_hi =
4295                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4296                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4297                         sge->addr_lo =
4298                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4299                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4300                 }
4301
4302                 bnx2x_init_sge_ring_bit_mask(fp);
4303
4304                 /* RX BD ring */
4305                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4306                         struct eth_rx_bd *rx_bd;
4307
4308                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4309                         rx_bd->addr_hi =
4310                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4311                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4312                         rx_bd->addr_lo =
4313                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4314                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4315                 }
4316
4317                 /* CQ ring */
4318                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4319                         struct eth_rx_cqe_next_page *nextpg;
4320
4321                         nextpg = (struct eth_rx_cqe_next_page *)
4322                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4323                         nextpg->addr_hi =
4324                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4325                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4326                         nextpg->addr_lo =
4327                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4328                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4329                 }
4330
4331                 /* Allocate SGEs and initialize the ring elements */
4332                 for (i = 0, ring_prod = 0;
4333                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4334
4335                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4336                                 BNX2X_ERR("was only able to allocate "
4337                                           "%d rx sges\n", i);
4338                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4339                                 /* Cleanup already allocated elements */
4340                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4341                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4342                                 fp->disable_tpa = 1;
4343                                 ring_prod = 0;
4344                                 break;
4345                         }
4346                         ring_prod = NEXT_SGE_IDX(ring_prod);
4347                 }
4348                 fp->rx_sge_prod = ring_prod;
4349
4350                 /* Allocate BDs and initialize BD ring */
4351                 fp->rx_comp_cons = 0;
4352                 cqe_ring_prod = ring_prod = 0;
4353                 for (i = 0; i < bp->rx_ring_size; i++) {
4354                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4355                                 BNX2X_ERR("was only able to allocate "
4356                                           "%d rx skbs\n", i);
4357                                 bp->eth_stats.rx_skb_alloc_failed++;
4358                                 break;
4359                         }
4360                         ring_prod = NEXT_RX_IDX(ring_prod);
4361                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4362                         WARN_ON(ring_prod <= i);
4363                 }
4364
4365                 fp->rx_bd_prod = ring_prod;
4366                 /* must not have more available CQEs than BDs */
4367                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4368                                        cqe_ring_prod);
4369                 fp->rx_pkt = fp->rx_calls = 0;
4370
4371                 /* Warning!
4372                  * this will generate an interrupt (to the TSTORM)
4373                  * must only be done after chip is initialized
4374                  */
4375                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4376                                      fp->rx_sge_prod);
4377                 if (j != 0)
4378                         continue;
4379
4380                 REG_WR(bp, BAR_USTRORM_INTMEM +
4381                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4382                        U64_LO(fp->rx_comp_mapping));
4383                 REG_WR(bp, BAR_USTRORM_INTMEM +
4384                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4385                        U64_HI(fp->rx_comp_mapping));
4386         }
4387 }
4388
4389 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4390 {
4391         int i, j;
4392
4393         for_each_queue(bp, j) {
4394                 struct bnx2x_fastpath *fp = &bp->fp[j];
4395
4396                 for (i = 1; i <= NUM_TX_RINGS; i++) {
4397                         struct eth_tx_bd *tx_bd =
4398                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4399
4400                         tx_bd->addr_hi =
4401                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4402                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4403                         tx_bd->addr_lo =
4404                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4405                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4406                 }
4407
4408                 fp->tx_pkt_prod = 0;
4409                 fp->tx_pkt_cons = 0;
4410                 fp->tx_bd_prod = 0;
4411                 fp->tx_bd_cons = 0;
4412                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4413                 fp->tx_pkt = 0;
4414         }
4415 }
4416
4417 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4418 {
4419         int func = BP_FUNC(bp);
4420
4421         spin_lock_init(&bp->spq_lock);
4422
4423         bp->spq_left = MAX_SPQ_PENDING;
4424         bp->spq_prod_idx = 0;
4425         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4426         bp->spq_prod_bd = bp->spq;
4427         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4428
4429         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4430                U64_LO(bp->spq_mapping));
4431         REG_WR(bp,
4432                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4433                U64_HI(bp->spq_mapping));
4434
4435         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4436                bp->spq_prod_idx);
4437 }
4438
4439 static void bnx2x_init_context(struct bnx2x *bp)
4440 {
4441         int i;
4442
4443         for_each_queue(bp, i) {
4444                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4445                 struct bnx2x_fastpath *fp = &bp->fp[i];
4446                 u8 sb_id = FP_SB_ID(fp);
4447
4448                 context->xstorm_st_context.tx_bd_page_base_hi =
4449                                                 U64_HI(fp->tx_desc_mapping);
4450                 context->xstorm_st_context.tx_bd_page_base_lo =
4451                                                 U64_LO(fp->tx_desc_mapping);
4452                 context->xstorm_st_context.db_data_addr_hi =
4453                                                 U64_HI(fp->tx_prods_mapping);
4454                 context->xstorm_st_context.db_data_addr_lo =
4455                                                 U64_LO(fp->tx_prods_mapping);
4456                 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4457                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4458
4459                 context->ustorm_st_context.common.sb_index_numbers =
4460                                                 BNX2X_RX_SB_INDEX_NUM;
4461                 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4462                 context->ustorm_st_context.common.status_block_id = sb_id;
4463                 context->ustorm_st_context.common.flags =
4464                         USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4465                 context->ustorm_st_context.common.mc_alignment_size = 64;
4466                 context->ustorm_st_context.common.bd_buff_size =
4467                                                 bp->rx_buf_use_size;
4468                 context->ustorm_st_context.common.bd_page_base_hi =
4469                                                 U64_HI(fp->rx_desc_mapping);
4470                 context->ustorm_st_context.common.bd_page_base_lo =
4471                                                 U64_LO(fp->rx_desc_mapping);
4472                 if (!fp->disable_tpa) {
4473                         context->ustorm_st_context.common.flags |=
4474                                 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4475                                  USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4476                         context->ustorm_st_context.common.sge_buff_size =
4477                                         (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4478                         context->ustorm_st_context.common.sge_page_base_hi =
4479                                                 U64_HI(fp->rx_sge_mapping);
4480                         context->ustorm_st_context.common.sge_page_base_lo =
4481                                                 U64_LO(fp->rx_sge_mapping);
4482                 }
4483
4484                 context->cstorm_st_context.sb_index_number =
4485                                                 C_SB_ETH_TX_CQ_INDEX;
4486                 context->cstorm_st_context.status_block_id = sb_id;
4487
4488                 context->xstorm_ag_context.cdu_reserved =
4489                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4490                                                CDU_REGION_NUMBER_XCM_AG,
4491                                                ETH_CONNECTION_TYPE);
4492                 context->ustorm_ag_context.cdu_usage =
4493                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4494                                                CDU_REGION_NUMBER_UCM_AG,
4495                                                ETH_CONNECTION_TYPE);
4496         }
4497 }
4498
4499 static void bnx2x_init_ind_table(struct bnx2x *bp)
4500 {
4501         int port = BP_PORT(bp);
4502         int i;
4503
4504         if (!is_multi(bp))
4505                 return;
4506
4507         DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
4508         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4509                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4510                         TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
4511                         i % bp->num_queues);
4512
4513         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4514 }
4515
4516 static void bnx2x_set_client_config(struct bnx2x *bp)
4517 {
4518         struct tstorm_eth_client_config tstorm_client = {0};
4519         int port = BP_PORT(bp);
4520         int i;
4521
4522         tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
4523         tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4524         tstorm_client.config_flags =
4525                                 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4526 #ifdef BCM_VLAN
4527         if (bp->rx_mode && bp->vlgrp) {
4528                 tstorm_client.config_flags |=
4529                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4530                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4531         }
4532 #endif
4533
4534         if (bp->flags & TPA_ENABLE_FLAG) {
4535                 tstorm_client.max_sges_for_packet =
4536                         BCM_PAGE_ALIGN(tstorm_client.mtu) >> BCM_PAGE_SHIFT;
4537                 tstorm_client.max_sges_for_packet =
4538                         ((tstorm_client.max_sges_for_packet +
4539                           PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4540                         PAGES_PER_SGE_SHIFT;
4541
4542                 tstorm_client.config_flags |=
4543                                 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4544         }
4545
4546         for_each_queue(bp, i) {
4547                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4548                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4549                        ((u32 *)&tstorm_client)[0]);
4550                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4551                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4552                        ((u32 *)&tstorm_client)[1]);
4553         }
4554
4555         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4556            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4557 }
4558
4559 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4560 {
4561         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4562         int mode = bp->rx_mode;
4563         int mask = (1 << BP_L_ID(bp));
4564         int func = BP_FUNC(bp);
4565         int i;
4566
4567         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
4568
4569         switch (mode) {
4570         case BNX2X_RX_MODE_NONE: /* no Rx */
4571                 tstorm_mac_filter.ucast_drop_all = mask;
4572                 tstorm_mac_filter.mcast_drop_all = mask;
4573                 tstorm_mac_filter.bcast_drop_all = mask;
4574                 break;
4575         case BNX2X_RX_MODE_NORMAL:
4576                 tstorm_mac_filter.bcast_accept_all = mask;
4577                 break;
4578         case BNX2X_RX_MODE_ALLMULTI:
4579                 tstorm_mac_filter.mcast_accept_all = mask;
4580                 tstorm_mac_filter.bcast_accept_all = mask;
4581                 break;
4582         case BNX2X_RX_MODE_PROMISC:
4583                 tstorm_mac_filter.ucast_accept_all = mask;
4584                 tstorm_mac_filter.mcast_accept_all = mask;
4585                 tstorm_mac_filter.bcast_accept_all = mask;
4586                 break;
4587         default:
4588                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4589                 break;
4590         }
4591
4592         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4593                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4594                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4595                        ((u32 *)&tstorm_mac_filter)[i]);
4596
4597 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4598                    ((u32 *)&tstorm_mac_filter)[i]); */
4599         }
4600
4601         if (mode != BNX2X_RX_MODE_NONE)
4602                 bnx2x_set_client_config(bp);
4603 }
4604
4605 static void bnx2x_init_internal_common(struct bnx2x *bp)
4606 {
4607         int i;
4608
4609         /* Zero this manually as its initialization is
4610            currently missing in the initTool */
4611         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4612                 REG_WR(bp, BAR_USTRORM_INTMEM +
4613                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4614 }
4615
4616 static void bnx2x_init_internal_port(struct bnx2x *bp)
4617 {
4618         int port = BP_PORT(bp);
4619
4620         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4621         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4622         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4623         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4624 }
4625
4626 static void bnx2x_init_internal_func(struct bnx2x *bp)
4627 {
4628         struct tstorm_eth_function_common_config tstorm_config = {0};
4629         struct stats_indication_flags stats_flags = {0};
4630         int port = BP_PORT(bp);
4631         int func = BP_FUNC(bp);
4632         int i;
4633         u16 max_agg_size;
4634
4635         if (is_multi(bp)) {
4636                 tstorm_config.config_flags = MULTI_FLAGS;
4637                 tstorm_config.rss_result_mask = MULTI_MASK;
4638         }
4639
4640         tstorm_config.leading_client_id = BP_L_ID(bp);
4641
4642         REG_WR(bp, BAR_TSTRORM_INTMEM +
4643                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4644                (*(u32 *)&tstorm_config));
4645
4646         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4647         bnx2x_set_storm_rx_mode(bp);
4648
4649         /* reset xstorm per client statistics */
4650         for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4651                 REG_WR(bp, BAR_XSTRORM_INTMEM +
4652                        XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4653                        i*4, 0);
4654         }
4655         /* reset tstorm per client statistics */
4656         for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4657                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4658                        TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4659                        i*4, 0);
4660         }
4661
4662         /* Init statistics related context */
4663         stats_flags.collect_eth = 1;
4664
4665         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4666                ((u32 *)&stats_flags)[0]);
4667         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4668                ((u32 *)&stats_flags)[1]);
4669
4670         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4671                ((u32 *)&stats_flags)[0]);
4672         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4673                ((u32 *)&stats_flags)[1]);
4674
4675         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4676                ((u32 *)&stats_flags)[0]);
4677         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4678                ((u32 *)&stats_flags)[1]);
4679
4680         REG_WR(bp, BAR_XSTRORM_INTMEM +
4681                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4682                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4683         REG_WR(bp, BAR_XSTRORM_INTMEM +
4684                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4685                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4686
4687         REG_WR(bp, BAR_TSTRORM_INTMEM +
4688                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4689                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4690         REG_WR(bp, BAR_TSTRORM_INTMEM +
4691                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4692                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4693
4694         if (CHIP_IS_E1H(bp)) {
4695                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4696                         IS_E1HMF(bp));
4697                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4698                         IS_E1HMF(bp));
4699                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4700                         IS_E1HMF(bp));
4701                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4702                         IS_E1HMF(bp));
4703
4704                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4705                          bp->e1hov);
4706         }
4707
4708         /* Init CQ ring mapping and aggregation size */
4709         max_agg_size = min((u32)(bp->rx_buf_use_size +
4710                                  8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4711                            (u32)0xffff);
4712         for_each_queue(bp, i) {
4713                 struct bnx2x_fastpath *fp = &bp->fp[i];
4714
4715                 REG_WR(bp, BAR_USTRORM_INTMEM +
4716                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4717                        U64_LO(fp->rx_comp_mapping));
4718                 REG_WR(bp, BAR_USTRORM_INTMEM +
4719                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4720                        U64_HI(fp->rx_comp_mapping));
4721
4722                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4723                          USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4724                          max_agg_size);
4725         }
4726 }
4727
4728 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4729 {
4730         switch (load_code) {
4731         case FW_MSG_CODE_DRV_LOAD_COMMON:
4732                 bnx2x_init_internal_common(bp);
4733                 /* no break */
4734
4735         case FW_MSG_CODE_DRV_LOAD_PORT:
4736                 bnx2x_init_internal_port(bp);
4737                 /* no break */
4738
4739         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4740                 bnx2x_init_internal_func(bp);
4741                 break;
4742
4743         default:
4744                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4745                 break;
4746         }
4747 }
4748
4749 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4750 {
4751         int i;
4752
4753         for_each_queue(bp, i) {
4754                 struct bnx2x_fastpath *fp = &bp->fp[i];
4755
4756                 fp->bp = bp;
4757                 fp->state = BNX2X_FP_STATE_CLOSED;
4758                 fp->index = i;
4759                 fp->cl_id = BP_L_ID(bp) + i;
4760                 fp->sb_id = fp->cl_id;
4761                 DP(NETIF_MSG_IFUP,
4762                    "bnx2x_init_sb(%p,%p) index %d  cl_id %d  sb %d\n",
4763                    bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4764                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4765                               FP_SB_ID(fp));
4766                 bnx2x_update_fpsb_idx(fp);
4767         }
4768
4769         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4770                           DEF_SB_ID);
4771         bnx2x_update_dsb_idx(bp);
4772         bnx2x_update_coalesce(bp);
4773         bnx2x_init_rx_rings(bp);
4774         bnx2x_init_tx_ring(bp);
4775         bnx2x_init_sp_ring(bp);
4776         bnx2x_init_context(bp);
4777         bnx2x_init_internal(bp, load_code);
4778         bnx2x_init_ind_table(bp);
4779         bnx2x_int_enable(bp);
4780 }
4781
4782 /* end of nic init */
4783
4784 /*
4785  * gzip service functions
4786  */
4787
4788 static int bnx2x_gunzip_init(struct bnx2x *bp)
4789 {
4790         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4791                                               &bp->gunzip_mapping);
4792         if (bp->gunzip_buf  == NULL)
4793                 goto gunzip_nomem1;
4794
4795         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4796         if (bp->strm  == NULL)
4797                 goto gunzip_nomem2;
4798
4799         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4800                                       GFP_KERNEL);
4801         if (bp->strm->workspace == NULL)
4802                 goto gunzip_nomem3;
4803
4804         return 0;
4805
4806 gunzip_nomem3:
4807         kfree(bp->strm);
4808         bp->strm = NULL;
4809
4810 gunzip_nomem2:
4811         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4812                             bp->gunzip_mapping);
4813         bp->gunzip_buf = NULL;
4814
4815 gunzip_nomem1:
4816         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4817                " un-compression\n", bp->dev->name);
4818         return -ENOMEM;
4819 }
4820
4821 static void bnx2x_gunzip_end(struct bnx2x *bp)
4822 {
4823         kfree(bp->strm->workspace);
4824
4825         kfree(bp->strm);
4826         bp->strm = NULL;
4827
4828         if (bp->gunzip_buf) {
4829                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4830                                     bp->gunzip_mapping);
4831                 bp->gunzip_buf = NULL;
4832         }
4833 }
4834
4835 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4836 {
4837         int n, rc;
4838
4839         /* check gzip header */
4840         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4841                 return -EINVAL;
4842
4843         n = 10;
4844
4845 #define FNAME                           0x8
4846
4847         if (zbuf[3] & FNAME)
4848                 while ((zbuf[n++] != 0) && (n < len));
4849
4850         bp->strm->next_in = zbuf + n;
4851         bp->strm->avail_in = len - n;
4852         bp->strm->next_out = bp->gunzip_buf;
4853         bp->strm->avail_out = FW_BUF_SIZE;
4854
4855         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4856         if (rc != Z_OK)
4857                 return rc;
4858
4859         rc = zlib_inflate(bp->strm, Z_FINISH);
4860         if ((rc != Z_OK) && (rc != Z_STREAM_END))
4861                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4862                        bp->dev->name, bp->strm->msg);
4863
4864         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4865         if (bp->gunzip_outlen & 0x3)
4866                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4867                                     " gunzip_outlen (%d) not aligned\n",
4868                        bp->dev->name, bp->gunzip_outlen);
4869         bp->gunzip_outlen >>= 2;
4870
4871         zlib_inflateEnd(bp->strm);
4872
4873         if (rc == Z_STREAM_END)
4874                 return 0;
4875
4876         return rc;
4877 }
4878
4879 /* nic load/unload */
4880
4881 /*
4882  * General service functions
4883  */
4884
4885 /* send a NIG loopback debug packet */
4886 static void bnx2x_lb_pckt(struct bnx2x *bp)
4887 {
4888         u32 wb_write[3];
4889
4890         /* Ethernet source and destination addresses */
4891         wb_write[0] = 0x55555555;
4892         wb_write[1] = 0x55555555;
4893         wb_write[2] = 0x20;             /* SOP */
4894         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4895
4896         /* NON-IP protocol */
4897         wb_write[0] = 0x09000000;
4898         wb_write[1] = 0x55555555;
4899         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
4900         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4901 }
4902
4903 /* some of the internal memories
4904  * are not directly readable from the driver
4905  * to test them we send debug packets
4906  */
4907 static int bnx2x_int_mem_test(struct bnx2x *bp)
4908 {
4909         int factor;
4910         int count, i;
4911         u32 val = 0;
4912
4913         if (CHIP_REV_IS_FPGA(bp))
4914                 factor = 120;
4915         else if (CHIP_REV_IS_EMUL(bp))
4916                 factor = 200;
4917         else
4918                 factor = 1;
4919
4920         DP(NETIF_MSG_HW, "start part1\n");
4921
4922         /* Disable inputs of parser neighbor blocks */
4923         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4924         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4925         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4926         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4927
4928         /*  Write 0 to parser credits for CFC search request */
4929         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4930
4931         /* send Ethernet packet */
4932         bnx2x_lb_pckt(bp);
4933
4934         /* TODO do i reset NIG statistic? */
4935         /* Wait until NIG register shows 1 packet of size 0x10 */
4936         count = 1000 * factor;
4937         while (count) {
4938
4939                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4940                 val = *bnx2x_sp(bp, wb_data[0]);
4941                 if (val == 0x10)
4942                         break;
4943
4944                 msleep(10);
4945                 count--;
4946         }
4947         if (val != 0x10) {
4948                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
4949                 return -1;
4950         }
4951
4952         /* Wait until PRS register shows 1 packet */
4953         count = 1000 * factor;
4954         while (count) {
4955                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4956                 if (val == 1)
4957                         break;
4958
4959                 msleep(10);
4960                 count--;
4961         }
4962         if (val != 0x1) {
4963                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4964                 return -2;
4965         }
4966
4967         /* Reset and init BRB, PRS */
4968         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4969         msleep(50);
4970         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4971         msleep(50);
4972         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4973         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4974
4975         DP(NETIF_MSG_HW, "part2\n");
4976
4977         /* Disable inputs of parser neighbor blocks */
4978         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4979         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4980         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4981         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
4982
4983         /* Write 0 to parser credits for CFC search request */
4984         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4985
4986         /* send 10 Ethernet packets */
4987         for (i = 0; i < 10; i++)
4988                 bnx2x_lb_pckt(bp);
4989
4990         /* Wait until NIG register shows 10 + 1
4991            packets of size 11*0x10 = 0xb0 */
4992         count = 1000 * factor;
4993         while (count) {
4994
4995                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4996                 val = *bnx2x_sp(bp, wb_data[0]);
4997                 if (val == 0xb0)
4998                         break;
4999
5000                 msleep(10);
5001                 count--;
5002         }
5003         if (val != 0xb0) {
5004                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5005                 return -3;
5006         }
5007
5008         /* Wait until PRS register shows 2 packets */
5009         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5010         if (val != 2)
5011                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5012
5013         /* Write 1 to parser credits for CFC search request */
5014         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5015
5016         /* Wait until PRS register shows 3 packets */
5017         msleep(10 * factor);
5018         /* Wait until NIG register shows 1 packet of size 0x10 */
5019         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5020         if (val != 3)
5021                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5022
5023         /* clear NIG EOP FIFO */
5024         for (i = 0; i < 11; i++)
5025                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5026         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5027         if (val != 1) {
5028                 BNX2X_ERR("clear of NIG failed\n");
5029                 return -4;
5030         }
5031
5032         /* Reset and init BRB, PRS, NIG */
5033         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5034         msleep(50);
5035         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5036         msleep(50);
5037         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5038         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5039 #ifndef BCM_ISCSI
5040         /* set NIC mode */
5041         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5042 #endif
5043
5044         /* Enable inputs of parser neighbor blocks */
5045         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5046         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5047         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5048         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5049
5050         DP(NETIF_MSG_HW, "done\n");
5051
5052         return 0; /* OK */
5053 }
5054
5055 static void enable_blocks_attention(struct bnx2x *bp)
5056 {
5057         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5058         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5059         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5060         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5061         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5062         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5063         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5064         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5065         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5066 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5067 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5068         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5069         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5070         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5071 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5072 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5073         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5074         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5075         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5076         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5077 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5078 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5079         if (CHIP_REV_IS_FPGA(bp))
5080                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5081         else
5082                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5083         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5084         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5085         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5086 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5087 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5088         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5089         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5090 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5091         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5092 }
5093
5094
5095 static int bnx2x_init_common(struct bnx2x *bp)
5096 {
5097         u32 val, i;
5098
5099         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5100
5101         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5102         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5103
5104         bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5105         if (CHIP_IS_E1H(bp))
5106                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5107
5108         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5109         msleep(30);
5110         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5111
5112         bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5113         if (CHIP_IS_E1(bp)) {
5114                 /* enable HW interrupt from PXP on USDM overflow
5115                    bit 16 on INT_MASK_0 */
5116                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5117         }
5118
5119         bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5120         bnx2x_init_pxp(bp);
5121
5122 #ifdef __BIG_ENDIAN
5123         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5124         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5125         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5126         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5127         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5128         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5129
5130 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5131         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5132         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5133         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5134         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5135 #endif
5136
5137         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5138 #ifdef BCM_ISCSI
5139         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5140         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5141         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5142 #endif
5143
5144         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5145                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5146
5147         /* let the HW do it's magic ... */
5148         msleep(100);
5149         /* finish PXP init */
5150         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5151         if (val != 1) {
5152                 BNX2X_ERR("PXP2 CFG failed\n");
5153                 return -EBUSY;
5154         }
5155         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5156         if (val != 1) {
5157                 BNX2X_ERR("PXP2 RD_INIT failed\n");
5158                 return -EBUSY;
5159         }
5160
5161         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5162         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5163
5164         bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5165
5166         /* clean the DMAE memory */
5167         bp->dmae_ready = 1;
5168         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5169
5170         bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5171         bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5172         bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5173         bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5174
5175         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5176         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5177         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5178         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5179
5180         bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5181         /* soft reset pulse */
5182         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5183         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5184
5185 #ifdef BCM_ISCSI
5186         bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5187 #endif
5188
5189         bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5190         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5191         if (!CHIP_REV_IS_SLOW(bp)) {
5192                 /* enable hw interrupt from doorbell Q */
5193                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5194         }
5195
5196         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5197         if (CHIP_REV_IS_SLOW(bp)) {
5198                 /* fix for emulation and FPGA for no pause */
5199                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5200                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5201                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5202                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5203         }
5204
5205         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5206         /* set NIC mode */
5207         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5208         if (CHIP_IS_E1H(bp))
5209                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5210
5211         bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5212         bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5213         bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5214         bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5215
5216         if (CHIP_IS_E1H(bp)) {
5217                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5218                                 STORM_INTMEM_SIZE_E1H/2);
5219                 bnx2x_init_fill(bp,
5220                                 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5221                                 0, STORM_INTMEM_SIZE_E1H/2);
5222                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5223                                 STORM_INTMEM_SIZE_E1H/2);
5224                 bnx2x_init_fill(bp,
5225                                 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5226                                 0, STORM_INTMEM_SIZE_E1H/2);
5227                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5228                                 STORM_INTMEM_SIZE_E1H/2);
5229                 bnx2x_init_fill(bp,
5230                                 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5231                                 0, STORM_INTMEM_SIZE_E1H/2);
5232                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5233                                 STORM_INTMEM_SIZE_E1H/2);
5234                 bnx2x_init_fill(bp,
5235                                 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5236                                 0, STORM_INTMEM_SIZE_E1H/2);
5237         } else { /* E1 */
5238                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5239                                 STORM_INTMEM_SIZE_E1);
5240                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5241                                 STORM_INTMEM_SIZE_E1);
5242                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5243                                 STORM_INTMEM_SIZE_E1);
5244                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5245                                 STORM_INTMEM_SIZE_E1);
5246         }
5247
5248         bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5249         bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5250         bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5251         bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5252
5253         /* sync semi rtc */
5254         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5255                0x80000000);
5256         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5257                0x80000000);
5258
5259         bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5260         bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5261         bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5262
5263         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5264         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5265                 REG_WR(bp, i, 0xc0cac01a);
5266                 /* TODO: replace with something meaningful */
5267         }
5268         if (CHIP_IS_E1H(bp))
5269                 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5270         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5271
5272         if (sizeof(union cdu_context) != 1024)
5273                 /* we currently assume that a context is 1024 bytes */
5274                 printk(KERN_ALERT PFX "please adjust the size of"
5275                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5276
5277         bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5278         val = (4 << 24) + (0 << 12) + 1024;
5279         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5280         if (CHIP_IS_E1(bp)) {
5281                 /* !!! fix pxp client crdit until excel update */
5282                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5283                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5284         }
5285
5286         bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5287         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5288
5289         bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5290         bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5291
5292         /* PXPCS COMMON comes here */
5293         /* Reset PCIE errors for debug */
5294         REG_WR(bp, 0x2814, 0xffffffff);
5295         REG_WR(bp, 0x3820, 0xffffffff);
5296
5297         /* EMAC0 COMMON comes here */
5298         /* EMAC1 COMMON comes here */
5299         /* DBU COMMON comes here */
5300         /* DBG COMMON comes here */
5301
5302         bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5303         if (CHIP_IS_E1H(bp)) {
5304                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5305                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5306         }
5307
5308         if (CHIP_REV_IS_SLOW(bp))
5309                 msleep(200);
5310
5311         /* finish CFC init */
5312         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5313         if (val != 1) {
5314                 BNX2X_ERR("CFC LL_INIT failed\n");
5315                 return -EBUSY;
5316         }
5317         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5318         if (val != 1) {
5319                 BNX2X_ERR("CFC AC_INIT failed\n");
5320                 return -EBUSY;
5321         }
5322         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5323         if (val != 1) {
5324                 BNX2X_ERR("CFC CAM_INIT failed\n");
5325                 return -EBUSY;
5326         }
5327         REG_WR(bp, CFC_REG_DEBUG0, 0);
5328
5329         /* read NIG statistic
5330            to see if this is our first up since powerup */
5331         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5332         val = *bnx2x_sp(bp, wb_data[0]);
5333
5334         /* do internal memory self test */
5335         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5336                 BNX2X_ERR("internal mem self test failed\n");
5337                 return -EBUSY;
5338         }
5339
5340         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5341         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5342                 /* Fan failure is indicated by SPIO 5 */
5343                 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5344                                MISC_REGISTERS_SPIO_INPUT_HI_Z);
5345
5346                 /* set to active low mode */
5347                 val = REG_RD(bp, MISC_REG_SPIO_INT);
5348                 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5349                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5350                 REG_WR(bp, MISC_REG_SPIO_INT, val);
5351
5352                 /* enable interrupt to signal the IGU */
5353                 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5354                 val |= (1 << MISC_REGISTERS_SPIO_5);
5355                 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5356                 break;
5357
5358         default:
5359                 break;
5360         }
5361
5362         /* clear PXP2 attentions */
5363         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5364
5365         enable_blocks_attention(bp);
5366
5367         if (bp->flags & TPA_ENABLE_FLAG) {
5368                 struct tstorm_eth_tpa_exist tmp = {0};
5369
5370                 tmp.tpa_exist = 1;
5371
5372                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
5373                        ((u32 *)&tmp)[0]);
5374                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
5375                        ((u32 *)&tmp)[1]);
5376         }
5377
5378         if (!BP_NOMCP(bp)) {
5379                 bnx2x_acquire_phy_lock(bp);
5380                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5381                 bnx2x_release_phy_lock(bp);
5382         } else
5383                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5384
5385         return 0;
5386 }
5387
5388 static int bnx2x_init_port(struct bnx2x *bp)
5389 {
5390         int port = BP_PORT(bp);
5391         u32 val;
5392
5393         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
5394
5395         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5396
5397         /* Port PXP comes here */
5398         /* Port PXP2 comes here */
5399 #ifdef BCM_ISCSI
5400         /* Port0  1
5401          * Port1  385 */
5402         i++;
5403         wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5404         wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5405         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5406         REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5407
5408         /* Port0  2
5409          * Port1  386 */
5410         i++;
5411         wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5412         wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5413         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5414         REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5415
5416         /* Port0  3
5417          * Port1  387 */
5418         i++;
5419         wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5420         wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5421         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5422         REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5423 #endif
5424         /* Port CMs come here */
5425
5426         /* Port QM comes here */
5427 #ifdef BCM_ISCSI
5428         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5429         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5430
5431         bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5432                              func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5433 #endif
5434         /* Port DQ comes here */
5435         /* Port BRB1 comes here */
5436         /* Port PRS comes here */
5437         /* Port TSDM comes here */
5438         /* Port CSDM comes here */
5439         /* Port USDM comes here */
5440         /* Port XSDM comes here */
5441         bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5442                              port ? TSEM_PORT1_END : TSEM_PORT0_END);
5443         bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5444                              port ? USEM_PORT1_END : USEM_PORT0_END);
5445         bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5446                              port ? CSEM_PORT1_END : CSEM_PORT0_END);
5447         bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5448                              port ? XSEM_PORT1_END : XSEM_PORT0_END);
5449         /* Port UPB comes here */
5450         /* Port XPB comes here */
5451
5452         bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5453                              port ? PBF_PORT1_END : PBF_PORT0_END);
5454
5455         /* configure PBF to work without PAUSE mtu 9000 */
5456         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5457
5458         /* update threshold */
5459         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5460         /* update init credit */
5461         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5462
5463         /* probe changes */
5464         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5465         msleep(5);
5466         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5467
5468 #ifdef BCM_ISCSI
5469         /* tell the searcher where the T2 table is */
5470         REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5471
5472         wb_write[0] = U64_LO(bp->t2_mapping);
5473         wb_write[1] = U64_HI(bp->t2_mapping);
5474         REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5475         wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5476         wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5477         REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5478
5479         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5480         /* Port SRCH comes here */
5481 #endif
5482         /* Port CDU comes here */
5483         /* Port CFC comes here */
5484
5485         if (CHIP_IS_E1(bp)) {
5486                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5487                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5488         }
5489         bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5490                              port ? HC_PORT1_END : HC_PORT0_END);
5491
5492         bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5493                                     MISC_AEU_PORT0_START,
5494                              port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5495         /* init aeu_mask_attn_func_0/1:
5496          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5497          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5498          *             bits 4-7 are used for "per vn group attention" */
5499         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5500                (IS_E1HMF(bp) ? 0xF7 : 0x7));
5501
5502         /* Port PXPCS comes here */
5503         /* Port EMAC0 comes here */
5504         /* Port EMAC1 comes here */
5505         /* Port DBU comes here */
5506         /* Port DBG comes here */
5507         bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5508                              port ? NIG_PORT1_END : NIG_PORT0_END);
5509
5510         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5511
5512         if (CHIP_IS_E1H(bp)) {
5513                 u32 wsum;
5514                 struct cmng_struct_per_port m_cmng_port;
5515                 int vn;
5516
5517                 /* 0x2 disable e1hov, 0x1 enable */
5518                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5519                        (IS_E1HMF(bp) ? 0x1 : 0x2));
5520
5521                 /* Init RATE SHAPING and FAIRNESS contexts.
5522                    Initialize as if there is 10G link. */
5523                 wsum = bnx2x_calc_vn_wsum(bp);
5524                 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5525                 if (IS_E1HMF(bp))
5526                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
5527                                 bnx2x_init_vn_minmax(bp, 2*vn + port,
5528                                         wsum, 10000, &m_cmng_port);
5529         }
5530
5531         /* Port MCP comes here */
5532         /* Port DMAE comes here */
5533
5534         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5535         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5536                 /* add SPIO 5 to group 0 */
5537                 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5538                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5539                 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5540                 break;
5541
5542         default:
5543                 break;
5544         }
5545
5546         bnx2x__link_reset(bp);
5547
5548         return 0;
5549 }
5550
5551 #define ILT_PER_FUNC            (768/2)
5552 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
5553 /* the phys address is shifted right 12 bits and has an added
5554    1=valid bit added to the 53rd bit
5555    then since this is a wide register(TM)
5556    we split it into two 32 bit writes
5557  */
5558 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5559 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
5560 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
5561 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
5562
5563 #define CNIC_ILT_LINES          0
5564
5565 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5566 {
5567         int reg;
5568
5569         if (CHIP_IS_E1H(bp))
5570                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5571         else /* E1 */
5572                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5573
5574         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5575 }
5576
5577 static int bnx2x_init_func(struct bnx2x *bp)
5578 {
5579         int port = BP_PORT(bp);
5580         int func = BP_FUNC(bp);
5581         int i;
5582
5583         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
5584
5585         i = FUNC_ILT_BASE(func);
5586
5587         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5588         if (CHIP_IS_E1H(bp)) {
5589                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5590                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5591         } else /* E1 */
5592                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5593                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5594
5595
5596         if (CHIP_IS_E1H(bp)) {
5597                 for (i = 0; i < 9; i++)
5598                         bnx2x_init_block(bp,
5599                                          cm_start[func][i], cm_end[func][i]);
5600
5601                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5602                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5603         }
5604
5605         /* HC init per function */
5606         if (CHIP_IS_E1H(bp)) {
5607                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5608
5609                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5610                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5611         }
5612         bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5613
5614         if (CHIP_IS_E1H(bp))
5615                 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5616
5617         /* Reset PCIE errors for debug */
5618         REG_WR(bp, 0x2114, 0xffffffff);
5619         REG_WR(bp, 0x2120, 0xffffffff);
5620
5621         return 0;
5622 }
5623
5624 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5625 {
5626         int i, rc = 0;
5627
5628         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
5629            BP_FUNC(bp), load_code);
5630
5631         bp->dmae_ready = 0;
5632         mutex_init(&bp->dmae_mutex);
5633         bnx2x_gunzip_init(bp);
5634
5635         switch (load_code) {
5636         case FW_MSG_CODE_DRV_LOAD_COMMON:
5637                 rc = bnx2x_init_common(bp);
5638                 if (rc)
5639                         goto init_hw_err;
5640                 /* no break */
5641
5642         case FW_MSG_CODE_DRV_LOAD_PORT:
5643                 bp->dmae_ready = 1;
5644                 rc = bnx2x_init_port(bp);
5645                 if (rc)
5646                         goto init_hw_err;
5647                 /* no break */
5648
5649         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5650                 bp->dmae_ready = 1;
5651                 rc = bnx2x_init_func(bp);
5652                 if (rc)
5653                         goto init_hw_err;
5654                 break;
5655
5656         default:
5657                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5658                 break;
5659         }
5660
5661         if (!BP_NOMCP(bp)) {
5662                 int func = BP_FUNC(bp);
5663
5664                 bp->fw_drv_pulse_wr_seq =
5665                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5666                                  DRV_PULSE_SEQ_MASK);
5667                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5668                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x  func_stx 0x%x\n",
5669                    bp->fw_drv_pulse_wr_seq, bp->func_stx);
5670         } else
5671                 bp->func_stx = 0;
5672
5673         /* this needs to be done before gunzip end */
5674         bnx2x_zero_def_sb(bp);
5675         for_each_queue(bp, i)
5676                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5677
5678 init_hw_err:
5679         bnx2x_gunzip_end(bp);
5680
5681         return rc;
5682 }
5683
5684 /* send the MCP a request, block until there is a reply */
5685 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5686 {
5687         int func = BP_FUNC(bp);
5688         u32 seq = ++bp->fw_seq;
5689         u32 rc = 0;
5690         u32 cnt = 1;
5691         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5692
5693         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5694         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5695
5696         do {
5697                 /* let the FW do it's magic ... */
5698                 msleep(delay);
5699
5700                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5701
5702                 /* Give the FW up to 2 second (200*10ms) */
5703         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5704
5705         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5706            cnt*delay, rc, seq);
5707
5708         /* is this a reply to our command? */
5709         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5710                 rc &= FW_MSG_CODE_MASK;
5711
5712         } else {
5713                 /* FW BUG! */
5714                 BNX2X_ERR("FW failed to respond!\n");
5715                 bnx2x_fw_dump(bp);
5716                 rc = 0;
5717         }
5718
5719         return rc;
5720 }
5721
5722 static void bnx2x_free_mem(struct bnx2x *bp)
5723 {
5724
5725 #define BNX2X_PCI_FREE(x, y, size) \
5726         do { \
5727                 if (x) { \
5728                         pci_free_consistent(bp->pdev, size, x, y); \
5729                         x = NULL; \
5730                         y = 0; \
5731                 } \
5732         } while (0)
5733
5734 #define BNX2X_FREE(x) \
5735         do { \
5736                 if (x) { \
5737                         vfree(x); \
5738                         x = NULL; \
5739                 } \
5740         } while (0)
5741
5742         int i;
5743
5744         /* fastpath */
5745         for_each_queue(bp, i) {
5746
5747                 /* Status blocks */
5748                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5749                                bnx2x_fp(bp, i, status_blk_mapping),
5750                                sizeof(struct host_status_block) +
5751                                sizeof(struct eth_tx_db_data));
5752
5753                 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5754                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5755                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5756                                bnx2x_fp(bp, i, tx_desc_mapping),
5757                                sizeof(struct eth_tx_bd) * NUM_TX_BD);
5758
5759                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5760                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5761                                bnx2x_fp(bp, i, rx_desc_mapping),
5762                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
5763
5764                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5765                                bnx2x_fp(bp, i, rx_comp_mapping),
5766                                sizeof(struct eth_fast_path_rx_cqe) *
5767                                NUM_RCQ_BD);
5768
5769                 /* SGE ring */
5770                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5771                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5772                                bnx2x_fp(bp, i, rx_sge_mapping),
5773                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5774         }
5775         /* end of fastpath */
5776
5777         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5778                        sizeof(struct host_def_status_block));
5779
5780         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5781                        sizeof(struct bnx2x_slowpath));
5782
5783 #ifdef BCM_ISCSI
5784         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5785         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5786         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5787         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5788 #endif
5789         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5790
5791 #undef BNX2X_PCI_FREE
5792 #undef BNX2X_KFREE
5793 }
5794
5795 static int bnx2x_alloc_mem(struct bnx2x *bp)
5796 {
5797
5798 #define BNX2X_PCI_ALLOC(x, y, size) \
5799         do { \
5800                 x = pci_alloc_consistent(bp->pdev, size, y); \
5801                 if (x == NULL) \
5802                         goto alloc_mem_err; \
5803                 memset(x, 0, size); \
5804         } while (0)
5805
5806 #define BNX2X_ALLOC(x, size) \
5807         do { \
5808                 x = vmalloc(size); \
5809                 if (x == NULL) \
5810                         goto alloc_mem_err; \
5811                 memset(x, 0, size); \
5812         } while (0)
5813
5814         int i;
5815
5816         /* fastpath */
5817         for_each_queue(bp, i) {
5818                 bnx2x_fp(bp, i, bp) = bp;
5819
5820                 /* Status blocks */
5821                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5822                                 &bnx2x_fp(bp, i, status_blk_mapping),
5823                                 sizeof(struct host_status_block) +
5824                                 sizeof(struct eth_tx_db_data));
5825
5826                 bnx2x_fp(bp, i, hw_tx_prods) =
5827                                 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5828
5829                 bnx2x_fp(bp, i, tx_prods_mapping) =
5830                                 bnx2x_fp(bp, i, status_blk_mapping) +
5831                                 sizeof(struct host_status_block);
5832
5833                 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5834                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5835                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5836                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5837                                 &bnx2x_fp(bp, i, tx_desc_mapping),
5838                                 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5839
5840                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5841                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5842                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5843                                 &bnx2x_fp(bp, i, rx_desc_mapping),
5844                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5845
5846                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5847                                 &bnx2x_fp(bp, i, rx_comp_mapping),
5848                                 sizeof(struct eth_fast_path_rx_cqe) *
5849                                 NUM_RCQ_BD);
5850
5851                 /* SGE ring */
5852                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5853                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5854                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5855                                 &bnx2x_fp(bp, i, rx_sge_mapping),
5856                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5857         }
5858         /* end of fastpath */
5859
5860         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5861                         sizeof(struct host_def_status_block));
5862
5863         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5864                         sizeof(struct bnx2x_slowpath));
5865
5866 #ifdef BCM_ISCSI
5867         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5868
5869         /* Initialize T1 */
5870         for (i = 0; i < 64*1024; i += 64) {
5871                 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5872                 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5873         }
5874
5875         /* allocate searcher T2 table
5876            we allocate 1/4 of alloc num for T2
5877           (which is not entered into the ILT) */
5878         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5879
5880         /* Initialize T2 */
5881         for (i = 0; i < 16*1024; i += 64)
5882                 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5883
5884         /* now fixup the last line in the block to point to the next block */
5885         *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5886
5887         /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5888         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5889
5890         /* QM queues (128*MAX_CONN) */
5891         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5892 #endif
5893
5894         /* Slow path ring */
5895         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5896
5897         return 0;
5898
5899 alloc_mem_err:
5900         bnx2x_free_mem(bp);
5901         return -ENOMEM;
5902
5903 #undef BNX2X_PCI_ALLOC
5904 #undef BNX2X_ALLOC
5905 }
5906
5907 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5908 {
5909         int i;
5910
5911         for_each_queue(bp, i) {
5912                 struct bnx2x_fastpath *fp = &bp->fp[i];
5913
5914                 u16 bd_cons = fp->tx_bd_cons;
5915                 u16 sw_prod = fp->tx_pkt_prod;
5916                 u16 sw_cons = fp->tx_pkt_cons;
5917
5918                 while (sw_cons != sw_prod) {
5919                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5920                         sw_cons++;
5921                 }
5922         }
5923 }
5924
5925 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5926 {
5927         int i, j;
5928
5929         for_each_queue(bp, j) {
5930                 struct bnx2x_fastpath *fp = &bp->fp[j];
5931
5932                 for (i = 0; i < NUM_RX_BD; i++) {
5933                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5934                         struct sk_buff *skb = rx_buf->skb;
5935
5936                         if (skb == NULL)
5937                                 continue;
5938
5939                         pci_unmap_single(bp->pdev,
5940                                          pci_unmap_addr(rx_buf, mapping),
5941                                          bp->rx_buf_use_size,
5942                                          PCI_DMA_FROMDEVICE);
5943
5944                         rx_buf->skb = NULL;
5945                         dev_kfree_skb(skb);
5946                 }
5947                 if (!fp->disable_tpa)
5948                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
5949                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
5950                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
5951         }
5952 }
5953
5954 static void bnx2x_free_skbs(struct bnx2x *bp)
5955 {
5956         bnx2x_free_tx_skbs(bp);
5957         bnx2x_free_rx_skbs(bp);
5958 }
5959
5960 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5961 {
5962         int i, offset = 1;
5963
5964         free_irq(bp->msix_table[0].vector, bp->dev);
5965         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
5966            bp->msix_table[0].vector);
5967
5968         for_each_queue(bp, i) {
5969                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
5970                    "state %x\n", i, bp->msix_table[i + offset].vector,
5971                    bnx2x_fp(bp, i, state));
5972
5973                 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
5974                         BNX2X_ERR("IRQ of fp #%d being freed while "
5975                                   "state != closed\n", i);
5976
5977                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
5978         }
5979 }
5980
5981 static void bnx2x_free_irq(struct bnx2x *bp)
5982 {
5983         if (bp->flags & USING_MSIX_FLAG) {
5984                 bnx2x_free_msix_irqs(bp);
5985                 pci_disable_msix(bp->pdev);
5986                 bp->flags &= ~USING_MSIX_FLAG;
5987
5988         } else
5989                 free_irq(bp->pdev->irq, bp->dev);
5990 }
5991
5992 static int bnx2x_enable_msix(struct bnx2x *bp)
5993 {
5994         int i, rc, offset;
5995
5996         bp->msix_table[0].entry = 0;
5997         offset = 1;
5998         DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
5999
6000         for_each_queue(bp, i) {
6001                 int igu_vec = offset + i + BP_L_ID(bp);
6002
6003                 bp->msix_table[i + offset].entry = igu_vec;
6004                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6005                    "(fastpath #%u)\n", i + offset, igu_vec, i);
6006         }
6007
6008         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6009                              bp->num_queues + offset);
6010         if (rc) {
6011                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6012                 return -1;
6013         }
6014         bp->flags |= USING_MSIX_FLAG;
6015
6016         return 0;
6017 }
6018
6019 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6020 {
6021         int i, rc, offset = 1;
6022
6023         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6024                          bp->dev->name, bp->dev);
6025         if (rc) {
6026                 BNX2X_ERR("request sp irq failed\n");
6027                 return -EBUSY;
6028         }
6029
6030         for_each_queue(bp, i) {
6031                 rc = request_irq(bp->msix_table[i + offset].vector,
6032                                  bnx2x_msix_fp_int, 0,
6033                                  bp->dev->name, &bp->fp[i]);
6034                 if (rc) {
6035                         BNX2X_ERR("request fp #%d irq failed  rc -%d\n",
6036                                   i + offset, -rc);
6037                         bnx2x_free_msix_irqs(bp);
6038                         return -EBUSY;
6039                 }
6040
6041                 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6042         }
6043
6044         return 0;
6045 }
6046
6047 static int bnx2x_req_irq(struct bnx2x *bp)
6048 {
6049         int rc;
6050
6051         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6052                          bp->dev->name, bp->dev);
6053         if (!rc)
6054                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6055
6056         return rc;
6057 }
6058
6059 /*
6060  * Init service functions
6061  */
6062
6063 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6064 {
6065         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6066         int port = BP_PORT(bp);
6067
6068         /* CAM allocation
6069          * unicasts 0-31:port0 32-63:port1
6070          * multicast 64-127:port0 128-191:port1
6071          */
6072         config->hdr.length_6b = 2;
6073         config->hdr.offset = port ? 31 : 0;
6074         config->hdr.client_id = BP_CL_ID(bp);
6075         config->hdr.reserved1 = 0;
6076
6077         /* primary MAC */
6078         config->config_table[0].cam_entry.msb_mac_addr =
6079                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6080         config->config_table[0].cam_entry.middle_mac_addr =
6081                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6082         config->config_table[0].cam_entry.lsb_mac_addr =
6083                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6084         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6085         if (set)
6086                 config->config_table[0].target_table_entry.flags = 0;
6087         else
6088                 CAM_INVALIDATE(config->config_table[0]);
6089         config->config_table[0].target_table_entry.client_id = 0;
6090         config->config_table[0].target_table_entry.vlan_id = 0;
6091
6092         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6093            (set ? "setting" : "clearing"),
6094            config->config_table[0].cam_entry.msb_mac_addr,
6095            config->config_table[0].cam_entry.middle_mac_addr,
6096            config->config_table[0].cam_entry.lsb_mac_addr);
6097
6098         /* broadcast */
6099         config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6100         config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6101         config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6102         config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6103         if (set)
6104                 config->config_table[1].target_table_entry.flags =
6105                                 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6106         else
6107                 CAM_INVALIDATE(config->config_table[1]);
6108         config->config_table[1].target_table_entry.client_id = 0;
6109         config->config_table[1].target_table_entry.vlan_id = 0;
6110
6111         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6112                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6113                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6114 }
6115
6116 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6117 {
6118         struct mac_configuration_cmd_e1h *config =
6119                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6120
6121         if (set && (bp->state != BNX2X_STATE_OPEN)) {
6122                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6123                 return;
6124         }
6125
6126         /* CAM allocation for E1H
6127          * unicasts: by func number
6128          * multicast: 20+FUNC*20, 20 each
6129          */
6130         config->hdr.length_6b = 1;
6131         config->hdr.offset = BP_FUNC(bp);
6132         config->hdr.client_id = BP_CL_ID(bp);
6133         config->hdr.reserved1 = 0;
6134
6135         /* primary MAC */
6136         config->config_table[0].msb_mac_addr =
6137                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6138         config->config_table[0].middle_mac_addr =
6139                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6140         config->config_table[0].lsb_mac_addr =
6141                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6142         config->config_table[0].client_id = BP_L_ID(bp);
6143         config->config_table[0].vlan_id = 0;
6144         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6145         if (set)
6146                 config->config_table[0].flags = BP_PORT(bp);
6147         else
6148                 config->config_table[0].flags =
6149                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6150
6151         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID %d\n",
6152            (set ? "setting" : "clearing"),
6153            config->config_table[0].msb_mac_addr,
6154            config->config_table[0].middle_mac_addr,
6155            config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6156
6157         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6158                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6159                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6160 }
6161
6162 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6163                              int *state_p, int poll)
6164 {
6165         /* can take a while if any port is running */
6166         int cnt = 500;
6167
6168         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6169            poll ? "polling" : "waiting", state, idx);
6170
6171         might_sleep();
6172         while (cnt--) {
6173                 if (poll) {
6174                         bnx2x_rx_int(bp->fp, 10);
6175                         /* if index is different from 0
6176                          * the reply for some commands will
6177                          * be on the non default queue
6178                          */
6179                         if (idx)
6180                                 bnx2x_rx_int(&bp->fp[idx], 10);
6181                 }
6182
6183                 mb(); /* state is changed by bnx2x_sp_event() */
6184                 if (*state_p == state)
6185                         return 0;
6186
6187                 msleep(1);
6188         }
6189
6190         /* timeout! */
6191         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6192                   poll ? "polling" : "waiting", state, idx);
6193 #ifdef BNX2X_STOP_ON_ERROR
6194         bnx2x_panic();
6195 #endif
6196
6197         return -EBUSY;
6198 }
6199
6200 static int bnx2x_setup_leading(struct bnx2x *bp)
6201 {
6202         int rc;
6203
6204         /* reset IGU state */
6205         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6206
6207         /* SETUP ramrod */
6208         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6209
6210         /* Wait for completion */
6211         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6212
6213         return rc;
6214 }
6215
6216 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6217 {
6218         /* reset IGU state */
6219         bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6220
6221         /* SETUP ramrod */
6222         bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6223         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6224
6225         /* Wait for completion */
6226         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6227                                  &(bp->fp[index].state), 0);
6228 }
6229
6230 static int bnx2x_poll(struct napi_struct *napi, int budget);
6231 static void bnx2x_set_rx_mode(struct net_device *dev);
6232
6233 /* must be called with rtnl_lock */
6234 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6235 {
6236         u32 load_code;
6237         int i, rc;
6238 #ifdef BNX2X_STOP_ON_ERROR
6239         if (unlikely(bp->panic))
6240                 return -EPERM;
6241 #endif
6242
6243         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6244
6245         /* Send LOAD_REQUEST command to MCP
6246            Returns the type of LOAD command:
6247            if it is the first port to be initialized
6248            common blocks should be initialized, otherwise - not
6249         */
6250         if (!BP_NOMCP(bp)) {
6251                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6252                 if (!load_code) {
6253                         BNX2X_ERR("MCP response failure, aborting\n");
6254                         return -EBUSY;
6255                 }
6256                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
6257                         return -EBUSY; /* other port in diagnostic mode */
6258
6259         } else {
6260                 int port = BP_PORT(bp);
6261
6262                 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6263                    load_count[0], load_count[1], load_count[2]);
6264                 load_count[0]++;
6265                 load_count[1 + port]++;
6266                 DP(NETIF_MSG_IFUP, "NO MCP new load counts       %d, %d, %d\n",
6267                    load_count[0], load_count[1], load_count[2]);
6268                 if (load_count[0] == 1)
6269                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6270                 else if (load_count[1 + port] == 1)
6271                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6272                 else
6273                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6274         }
6275
6276         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6277             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6278                 bp->port.pmf = 1;
6279         else
6280                 bp->port.pmf = 0;
6281         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6282
6283         /* if we can't use MSI-X we only need one fp,
6284          * so try to enable MSI-X with the requested number of fp's
6285          * and fallback to inta with one fp
6286          */
6287         if (use_inta) {
6288                 bp->num_queues = 1;
6289
6290         } else {
6291                 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6292                         /* user requested number */
6293                         bp->num_queues = use_multi;
6294
6295                 else if (use_multi)
6296                         bp->num_queues = min_t(u32, num_online_cpus(),
6297                                                BP_MAX_QUEUES(bp));
6298                 else
6299                         bp->num_queues = 1;
6300
6301                 if (bnx2x_enable_msix(bp)) {
6302                         /* failed to enable MSI-X */
6303                         bp->num_queues = 1;
6304                         if (use_multi)
6305                                 BNX2X_ERR("Multi requested but failed"
6306                                           " to enable MSI-X\n");
6307                 }
6308         }
6309         DP(NETIF_MSG_IFUP,
6310            "set number of queues to %d\n", bp->num_queues);
6311
6312         if (bnx2x_alloc_mem(bp))
6313                 return -ENOMEM;
6314
6315         for_each_queue(bp, i)
6316                 bnx2x_fp(bp, i, disable_tpa) =
6317                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
6318
6319         if (bp->flags & USING_MSIX_FLAG) {
6320                 rc = bnx2x_req_msix_irqs(bp);
6321                 if (rc) {
6322                         pci_disable_msix(bp->pdev);
6323                         goto load_error;
6324                 }
6325         } else {
6326                 bnx2x_ack_int(bp);
6327                 rc = bnx2x_req_irq(bp);
6328                 if (rc) {
6329                         BNX2X_ERR("IRQ request failed, aborting\n");
6330                         goto load_error;
6331                 }
6332         }
6333
6334         for_each_queue(bp, i)
6335                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6336                                bnx2x_poll, 128);
6337
6338         /* Initialize HW */
6339         rc = bnx2x_init_hw(bp, load_code);
6340         if (rc) {
6341                 BNX2X_ERR("HW init failed, aborting\n");
6342                 goto load_error;
6343         }
6344
6345         /* Setup NIC internals and enable interrupts */
6346         bnx2x_nic_init(bp, load_code);
6347
6348         /* Send LOAD_DONE command to MCP */
6349         if (!BP_NOMCP(bp)) {
6350                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6351                 if (!load_code) {
6352                         BNX2X_ERR("MCP response failure, aborting\n");
6353                         rc = -EBUSY;
6354                         goto load_int_disable;
6355                 }
6356         }
6357
6358         bnx2x_stats_init(bp);
6359
6360         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6361
6362         /* Enable Rx interrupt handling before sending the ramrod
6363            as it's completed on Rx FP queue */
6364         for_each_queue(bp, i)
6365                 napi_enable(&bnx2x_fp(bp, i, napi));
6366
6367         /* Enable interrupt handling */
6368         atomic_set(&bp->intr_sem, 0);
6369
6370         rc = bnx2x_setup_leading(bp);
6371         if (rc) {
6372                 BNX2X_ERR("Setup leading failed!\n");
6373                 goto load_stop_netif;
6374         }
6375
6376         if (CHIP_IS_E1H(bp))
6377                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6378                         BNX2X_ERR("!!!  mf_cfg function disabled\n");
6379                         bp->state = BNX2X_STATE_DISABLED;
6380                 }
6381
6382         if (bp->state == BNX2X_STATE_OPEN)
6383                 for_each_nondefault_queue(bp, i) {
6384                         rc = bnx2x_setup_multi(bp, i);
6385                         if (rc)
6386                                 goto load_stop_netif;
6387                 }
6388
6389         if (CHIP_IS_E1(bp))
6390                 bnx2x_set_mac_addr_e1(bp, 1);
6391         else
6392                 bnx2x_set_mac_addr_e1h(bp, 1);
6393
6394         if (bp->port.pmf)
6395                 bnx2x_initial_phy_init(bp);
6396
6397         /* Start fast path */
6398         switch (load_mode) {
6399         case LOAD_NORMAL:
6400                 /* Tx queue should be only reenabled */
6401                 netif_wake_queue(bp->dev);
6402                 bnx2x_set_rx_mode(bp->dev);
6403                 break;
6404
6405         case LOAD_OPEN:
6406                 netif_start_queue(bp->dev);
6407                 bnx2x_set_rx_mode(bp->dev);
6408                 if (bp->flags & USING_MSIX_FLAG)
6409                         printk(KERN_INFO PFX "%s: using MSI-X\n",
6410                                bp->dev->name);
6411                 break;
6412
6413         case LOAD_DIAG:
6414                 bnx2x_set_rx_mode(bp->dev);
6415                 bp->state = BNX2X_STATE_DIAG;
6416                 break;
6417
6418         default:
6419                 break;
6420         }
6421
6422         if (!bp->port.pmf)
6423                 bnx2x__link_status_update(bp);
6424
6425         /* start the timer */
6426         mod_timer(&bp->timer, jiffies + bp->current_interval);
6427
6428
6429         return 0;
6430
6431 load_stop_netif:
6432         for_each_queue(bp, i)
6433                 napi_disable(&bnx2x_fp(bp, i, napi));
6434
6435 load_int_disable:
6436         bnx2x_int_disable_sync(bp);
6437
6438         /* Release IRQs */
6439         bnx2x_free_irq(bp);
6440
6441         /* Free SKBs, SGEs, TPA pool and driver internals */
6442         bnx2x_free_skbs(bp);
6443         for_each_queue(bp, i)
6444                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6445 load_error:
6446         bnx2x_free_mem(bp);
6447
6448         /* TBD we really need to reset the chip
6449            if we want to recover from this */
6450         return rc;
6451 }
6452
6453 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6454 {
6455         int rc;
6456
6457         /* halt the connection */
6458         bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6459         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
6460
6461         /* Wait for completion */
6462         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6463                                &(bp->fp[index].state), 1);
6464         if (rc) /* timeout */
6465                 return rc;
6466
6467         /* delete cfc entry */
6468         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6469
6470         /* Wait for completion */
6471         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6472                                &(bp->fp[index].state), 1);
6473         return rc;
6474 }
6475
6476 static int bnx2x_stop_leading(struct bnx2x *bp)
6477 {
6478         u16 dsb_sp_prod_idx;
6479         /* if the other port is handling traffic,
6480            this can take a lot of time */
6481         int cnt = 500;
6482         int rc;
6483
6484         might_sleep();
6485
6486         /* Send HALT ramrod */
6487         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6488         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
6489
6490         /* Wait for completion */
6491         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6492                                &(bp->fp[0].state), 1);
6493         if (rc) /* timeout */
6494                 return rc;
6495
6496         dsb_sp_prod_idx = *bp->dsb_sp_prod;
6497
6498         /* Send PORT_DELETE ramrod */
6499         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6500
6501         /* Wait for completion to arrive on default status block
6502            we are going to reset the chip anyway
6503            so there is not much to do if this times out
6504          */
6505         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6506                 if (!cnt) {
6507                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6508                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6509                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
6510 #ifdef BNX2X_STOP_ON_ERROR
6511                         bnx2x_panic();
6512 #else
6513                         rc = -EBUSY;
6514 #endif
6515                         break;
6516                 }
6517                 cnt--;
6518                 msleep(1);
6519         }
6520         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6521         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6522
6523         return rc;
6524 }
6525
6526 static void bnx2x_reset_func(struct bnx2x *bp)
6527 {
6528         int port = BP_PORT(bp);
6529         int func = BP_FUNC(bp);
6530         int base, i;
6531
6532         /* Configure IGU */
6533         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6534         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6535
6536         REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6537
6538         /* Clear ILT */
6539         base = FUNC_ILT_BASE(func);
6540         for (i = base; i < base + ILT_PER_FUNC; i++)
6541                 bnx2x_ilt_wr(bp, i, 0);
6542 }
6543
6544 static void bnx2x_reset_port(struct bnx2x *bp)
6545 {
6546         int port = BP_PORT(bp);
6547         u32 val;
6548
6549         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6550
6551         /* Do not rcv packets to BRB */
6552         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6553         /* Do not direct rcv packets that are not for MCP to the BRB */
6554         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6555                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6556
6557         /* Configure AEU */
6558         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6559
6560         msleep(100);
6561         /* Check for BRB port occupancy */
6562         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6563         if (val)
6564                 DP(NETIF_MSG_IFDOWN,
6565                    "BRB1 is not empty  %d blocks are occupied\n", val);
6566
6567         /* TODO: Close Doorbell port? */
6568 }
6569
6570 static void bnx2x_reset_common(struct bnx2x *bp)
6571 {
6572         /* reset_common */
6573         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6574                0xd3ffff7f);
6575         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6576 }
6577
6578 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6579 {
6580         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
6581            BP_FUNC(bp), reset_code);
6582
6583         switch (reset_code) {
6584         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6585                 bnx2x_reset_port(bp);
6586                 bnx2x_reset_func(bp);
6587                 bnx2x_reset_common(bp);
6588                 break;
6589
6590         case FW_MSG_CODE_DRV_UNLOAD_PORT:
6591                 bnx2x_reset_port(bp);
6592                 bnx2x_reset_func(bp);
6593                 break;
6594
6595         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6596                 bnx2x_reset_func(bp);
6597                 break;
6598
6599         default:
6600                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6601                 break;
6602         }
6603 }
6604
6605 /* must be called with rtnl_lock */
6606 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6607 {
6608         int port = BP_PORT(bp);
6609         u32 reset_code = 0;
6610         int i, cnt, rc;
6611
6612         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6613
6614         bp->rx_mode = BNX2X_RX_MODE_NONE;
6615         bnx2x_set_storm_rx_mode(bp);
6616
6617         if (netif_running(bp->dev)) {
6618                 netif_tx_disable(bp->dev);
6619                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6620         }
6621
6622         del_timer_sync(&bp->timer);
6623         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6624                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6625         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6626
6627         /* Wait until tx fast path tasks complete */
6628         for_each_queue(bp, i) {
6629                 struct bnx2x_fastpath *fp = &bp->fp[i];
6630
6631                 cnt = 1000;
6632                 smp_rmb();
6633                 while (BNX2X_HAS_TX_WORK(fp)) {
6634
6635                         if (!netif_running(bp->dev))
6636                                 bnx2x_tx_int(fp, 1000);
6637
6638                         if (!cnt) {
6639                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
6640                                           i);
6641 #ifdef BNX2X_STOP_ON_ERROR
6642                                 bnx2x_panic();
6643                                 return -EBUSY;
6644 #else
6645                                 break;
6646 #endif
6647                         }
6648                         cnt--;
6649                         msleep(1);
6650                         smp_rmb();
6651                 }
6652         }
6653
6654         /* Give HW time to discard old tx messages */
6655         msleep(1);
6656
6657         for_each_queue(bp, i)
6658                 napi_disable(&bnx2x_fp(bp, i, napi));
6659         /* Disable interrupts after Tx and Rx are disabled on stack level */
6660         bnx2x_int_disable_sync(bp);
6661
6662         /* Release IRQs */
6663         bnx2x_free_irq(bp);
6664
6665         if (unload_mode == UNLOAD_NORMAL)
6666                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6667
6668         else if (bp->flags & NO_WOL_FLAG) {
6669                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6670                 if (CHIP_IS_E1H(bp))
6671                         REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6672
6673         } else if (bp->wol) {
6674                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6675                 u8 *mac_addr = bp->dev->dev_addr;
6676                 u32 val;
6677                 /* The mac address is written to entries 1-4 to
6678                    preserve entry 0 which is used by the PMF */
6679                 u8 entry = (BP_E1HVN(bp) + 1)*8;
6680
6681                 val = (mac_addr[0] << 8) | mac_addr[1];
6682                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6683
6684                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6685                       (mac_addr[4] << 8) | mac_addr[5];
6686                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6687
6688                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6689
6690         } else
6691                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6692
6693         if (CHIP_IS_E1(bp)) {
6694                 struct mac_configuration_cmd *config =
6695                                                 bnx2x_sp(bp, mcast_config);
6696
6697                 bnx2x_set_mac_addr_e1(bp, 0);
6698
6699                 for (i = 0; i < config->hdr.length_6b; i++)
6700                         CAM_INVALIDATE(config->config_table[i]);
6701
6702                 config->hdr.length_6b = i;
6703                 if (CHIP_REV_IS_SLOW(bp))
6704                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6705                 else
6706                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6707                 config->hdr.client_id = BP_CL_ID(bp);
6708                 config->hdr.reserved1 = 0;
6709
6710                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6711                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6712                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6713
6714         } else { /* E1H */
6715                 bnx2x_set_mac_addr_e1h(bp, 0);
6716
6717                 for (i = 0; i < MC_HASH_SIZE; i++)
6718                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6719         }
6720
6721         if (CHIP_IS_E1H(bp))
6722                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6723
6724         /* Close multi and leading connections
6725            Completions for ramrods are collected in a synchronous way */
6726         for_each_nondefault_queue(bp, i)
6727                 if (bnx2x_stop_multi(bp, i))
6728                         goto unload_error;
6729
6730         rc = bnx2x_stop_leading(bp);
6731         if (rc) {
6732                 BNX2X_ERR("Stop leading failed!\n");
6733 #ifdef BNX2X_STOP_ON_ERROR
6734                 return -EBUSY;
6735 #else
6736                 goto unload_error;
6737 #endif
6738         }
6739
6740 unload_error:
6741         if (!BP_NOMCP(bp))
6742                 reset_code = bnx2x_fw_command(bp, reset_code);
6743         else {
6744                 DP(NETIF_MSG_IFDOWN, "NO MCP load counts      %d, %d, %d\n",
6745                    load_count[0], load_count[1], load_count[2]);
6746                 load_count[0]--;
6747                 load_count[1 + port]--;
6748                 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts  %d, %d, %d\n",
6749                    load_count[0], load_count[1], load_count[2]);
6750                 if (load_count[0] == 0)
6751                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6752                 else if (load_count[1 + port] == 0)
6753                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6754                 else
6755                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6756         }
6757
6758         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6759             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6760                 bnx2x__link_reset(bp);
6761
6762         /* Reset the chip */
6763         bnx2x_reset_chip(bp, reset_code);
6764
6765         /* Report UNLOAD_DONE to MCP */
6766         if (!BP_NOMCP(bp))
6767                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6768
6769         /* Free SKBs, SGEs, TPA pool and driver internals */
6770         bnx2x_free_skbs(bp);
6771         for_each_queue(bp, i)
6772                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6773         bnx2x_free_mem(bp);
6774
6775         bp->state = BNX2X_STATE_CLOSED;
6776
6777         netif_carrier_off(bp->dev);
6778
6779         return 0;
6780 }
6781
6782 static void bnx2x_reset_task(struct work_struct *work)
6783 {
6784         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6785
6786 #ifdef BNX2X_STOP_ON_ERROR
6787         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6788                   " so reset not done to allow debug dump,\n"
6789          KERN_ERR " you will need to reboot when done\n");
6790         return;
6791 #endif
6792
6793         rtnl_lock();
6794
6795         if (!netif_running(bp->dev))
6796                 goto reset_task_exit;
6797
6798         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6799         bnx2x_nic_load(bp, LOAD_NORMAL);
6800
6801 reset_task_exit:
6802         rtnl_unlock();
6803 }
6804
6805 /* end of nic load/unload */
6806
6807 /* ethtool_ops */
6808
6809 /*
6810  * Init service functions
6811  */
6812
6813 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6814 {
6815         u32 val;
6816
6817         /* Check if there is any driver already loaded */
6818         val = REG_RD(bp, MISC_REG_UNPREPARED);
6819         if (val == 0x1) {
6820                 /* Check if it is the UNDI driver
6821                  * UNDI driver initializes CID offset for normal bell to 0x7
6822                  */
6823                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6824                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6825                 if (val == 0x7) {
6826                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6827                         /* save our func */
6828                         int func = BP_FUNC(bp);
6829                         u32 swap_en;
6830                         u32 swap_val;
6831
6832                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
6833
6834                         /* try unload UNDI on port 0 */
6835                         bp->func = 0;
6836                         bp->fw_seq =
6837                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6838                                 DRV_MSG_SEQ_NUMBER_MASK);
6839                         reset_code = bnx2x_fw_command(bp, reset_code);
6840
6841                         /* if UNDI is loaded on the other port */
6842                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6843
6844                                 /* send "DONE" for previous unload */
6845                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6846
6847                                 /* unload UNDI on port 1 */
6848                                 bp->func = 1;
6849                                 bp->fw_seq =
6850                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6851                                         DRV_MSG_SEQ_NUMBER_MASK);
6852                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6853
6854                                 bnx2x_fw_command(bp, reset_code);
6855                         }
6856
6857                         REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6858                                     HC_REG_CONFIG_0), 0x1000);
6859
6860                         /* close input traffic and wait for it */
6861                         /* Do not rcv packets to BRB */
6862                         REG_WR(bp,
6863                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6864                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6865                         /* Do not direct rcv packets that are not for MCP to
6866                          * the BRB */
6867                         REG_WR(bp,
6868                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6869                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6870                         /* clear AEU */
6871                         REG_WR(bp,
6872                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6873                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6874                         msleep(10);
6875
6876                         /* save NIG port swap info */
6877                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6878                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6879                         /* reset device */
6880                         REG_WR(bp,
6881                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6882                                0xd3ffffff);
6883                         REG_WR(bp,
6884                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6885                                0x1403);
6886                         /* take the NIG out of reset and restore swap values */
6887                         REG_WR(bp,
6888                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6889                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
6890                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6891                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6892
6893                         /* send unload done to the MCP */
6894                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6895
6896                         /* restore our func and fw_seq */
6897                         bp->func = func;
6898                         bp->fw_seq =
6899                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6900                                 DRV_MSG_SEQ_NUMBER_MASK);
6901                 }
6902                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6903         }
6904 }
6905
6906 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6907 {
6908         u32 val, val2, val3, val4, id;
6909         u16 pmc;
6910
6911         /* Get the chip revision id and number. */
6912         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6913         val = REG_RD(bp, MISC_REG_CHIP_NUM);
6914         id = ((val & 0xffff) << 16);
6915         val = REG_RD(bp, MISC_REG_CHIP_REV);
6916         id |= ((val & 0xf) << 12);
6917         val = REG_RD(bp, MISC_REG_CHIP_METAL);
6918         id |= ((val & 0xff) << 4);
6919         REG_RD(bp, MISC_REG_BOND_ID);
6920         id |= (val & 0xf);
6921         bp->common.chip_id = id;
6922         bp->link_params.chip_id = bp->common.chip_id;
6923         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6924
6925         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6926         bp->common.flash_size = (NVRAM_1MB_SIZE <<
6927                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
6928         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6929                        bp->common.flash_size, bp->common.flash_size);
6930
6931         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6932         bp->link_params.shmem_base = bp->common.shmem_base;
6933         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6934
6935         if (!bp->common.shmem_base ||
6936             (bp->common.shmem_base < 0xA0000) ||
6937             (bp->common.shmem_base >= 0xC0000)) {
6938                 BNX2X_DEV_INFO("MCP not active\n");
6939                 bp->flags |= NO_MCP_FLAG;
6940                 return;
6941         }
6942
6943         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6944         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6945                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6946                 BNX2X_ERR("BAD MCP validity signature\n");
6947
6948         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6949         bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
6950
6951         BNX2X_DEV_INFO("hw_config 0x%08x  board 0x%08x\n",
6952                        bp->common.hw_config, bp->common.board);
6953
6954         bp->link_params.hw_led_mode = ((bp->common.hw_config &
6955                                         SHARED_HW_CFG_LED_MODE_MASK) >>
6956                                        SHARED_HW_CFG_LED_MODE_SHIFT);
6957
6958         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6959         bp->common.bc_ver = val;
6960         BNX2X_DEV_INFO("bc_ver %X\n", val);
6961         if (val < BNX2X_BC_VER) {
6962                 /* for now only warn
6963                  * later we might need to enforce this */
6964                 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
6965                           " please upgrade BC\n", BNX2X_BC_VER, val);
6966         }
6967
6968         if (BP_E1HVN(bp) == 0) {
6969                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
6970                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
6971         } else {
6972                 /* no WOL capability for E1HVN != 0 */
6973                 bp->flags |= NO_WOL_FLAG;
6974         }
6975         BNX2X_DEV_INFO("%sWoL capable\n",
6976                        (bp->flags & NO_WOL_FLAG) ? "Not " : "");
6977
6978         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6979         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6980         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6981         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6982
6983         printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
6984                val, val2, val3, val4);
6985 }
6986
6987 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6988                                                     u32 switch_cfg)
6989 {
6990         int port = BP_PORT(bp);
6991         u32 ext_phy_type;
6992
6993         switch (switch_cfg) {
6994         case SWITCH_CFG_1G:
6995                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
6996
6997                 ext_phy_type =
6998                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6999                 switch (ext_phy_type) {
7000                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7001                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7002                                        ext_phy_type);
7003
7004                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7005                                                SUPPORTED_10baseT_Full |
7006                                                SUPPORTED_100baseT_Half |
7007                                                SUPPORTED_100baseT_Full |
7008                                                SUPPORTED_1000baseT_Full |
7009                                                SUPPORTED_2500baseX_Full |
7010                                                SUPPORTED_TP |
7011                                                SUPPORTED_FIBRE |
7012                                                SUPPORTED_Autoneg |
7013                                                SUPPORTED_Pause |
7014                                                SUPPORTED_Asym_Pause);
7015                         break;
7016
7017                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7018                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7019                                        ext_phy_type);
7020
7021                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7022                                                SUPPORTED_10baseT_Full |
7023                                                SUPPORTED_100baseT_Half |
7024                                                SUPPORTED_100baseT_Full |
7025                                                SUPPORTED_1000baseT_Full |
7026                                                SUPPORTED_TP |
7027                                                SUPPORTED_FIBRE |
7028                                                SUPPORTED_Autoneg |
7029                                                SUPPORTED_Pause |
7030                                                SUPPORTED_Asym_Pause);
7031                         break;
7032
7033                 default:
7034                         BNX2X_ERR("NVRAM config error. "
7035                                   "BAD SerDes ext_phy_config 0x%x\n",
7036                                   bp->link_params.ext_phy_config);
7037                         return;
7038                 }
7039
7040                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7041                                            port*0x10);
7042                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7043                 break;
7044
7045         case SWITCH_CFG_10G:
7046                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7047
7048                 ext_phy_type =
7049                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7050                 switch (ext_phy_type) {
7051                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7052                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7053                                        ext_phy_type);
7054
7055                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7056                                                SUPPORTED_10baseT_Full |
7057                                                SUPPORTED_100baseT_Half |
7058                                                SUPPORTED_100baseT_Full |
7059                                                SUPPORTED_1000baseT_Full |
7060                                                SUPPORTED_2500baseX_Full |
7061                                                SUPPORTED_10000baseT_Full |
7062                                                SUPPORTED_TP |
7063                                                SUPPORTED_FIBRE |
7064                                                SUPPORTED_Autoneg |
7065                                                SUPPORTED_Pause |
7066                                                SUPPORTED_Asym_Pause);
7067                         break;
7068
7069                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7070                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7071                                        ext_phy_type);
7072
7073                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7074                                                SUPPORTED_FIBRE |
7075                                                SUPPORTED_Pause |
7076                                                SUPPORTED_Asym_Pause);
7077                         break;
7078
7079                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7080                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7081                                        ext_phy_type);
7082
7083                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7084                                                SUPPORTED_1000baseT_Full |
7085                                                SUPPORTED_FIBRE |
7086                                                SUPPORTED_Pause |
7087                                                SUPPORTED_Asym_Pause);
7088                         break;
7089
7090                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7091                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7092                                        ext_phy_type);
7093
7094                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7095                                                SUPPORTED_1000baseT_Full |
7096                                                SUPPORTED_FIBRE |
7097                                                SUPPORTED_Autoneg |
7098                                                SUPPORTED_Pause |
7099                                                SUPPORTED_Asym_Pause);
7100                         break;
7101
7102                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7103                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7104                                        ext_phy_type);
7105
7106                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7107                                                SUPPORTED_2500baseX_Full |
7108                                                SUPPORTED_1000baseT_Full |
7109                                                SUPPORTED_FIBRE |
7110                                                SUPPORTED_Autoneg |
7111                                                SUPPORTED_Pause |
7112                                                SUPPORTED_Asym_Pause);
7113                         break;
7114
7115                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7116                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7117                                        ext_phy_type);
7118
7119                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7120                                                SUPPORTED_TP |
7121                                                SUPPORTED_Autoneg |
7122                                                SUPPORTED_Pause |
7123                                                SUPPORTED_Asym_Pause);
7124                         break;
7125
7126                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7127                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7128                                   bp->link_params.ext_phy_config);
7129                         break;
7130
7131                 default:
7132                         BNX2X_ERR("NVRAM config error. "
7133                                   "BAD XGXS ext_phy_config 0x%x\n",
7134                                   bp->link_params.ext_phy_config);
7135                         return;
7136                 }
7137
7138                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7139                                            port*0x18);
7140                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7141
7142                 break;
7143
7144         default:
7145                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7146                           bp->port.link_config);
7147                 return;
7148         }
7149         bp->link_params.phy_addr = bp->port.phy_addr;
7150
7151         /* mask what we support according to speed_cap_mask */
7152         if (!(bp->link_params.speed_cap_mask &
7153                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7154                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7155
7156         if (!(bp->link_params.speed_cap_mask &
7157                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7158                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7159
7160         if (!(bp->link_params.speed_cap_mask &
7161                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7162                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7163
7164         if (!(bp->link_params.speed_cap_mask &
7165                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7166                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7167
7168         if (!(bp->link_params.speed_cap_mask &
7169                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7170                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7171                                         SUPPORTED_1000baseT_Full);
7172
7173         if (!(bp->link_params.speed_cap_mask &
7174                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7175                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7176
7177         if (!(bp->link_params.speed_cap_mask &
7178                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7179                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7180
7181         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7182 }
7183
7184 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7185 {
7186         bp->link_params.req_duplex = DUPLEX_FULL;
7187
7188         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7189         case PORT_FEATURE_LINK_SPEED_AUTO:
7190                 if (bp->port.supported & SUPPORTED_Autoneg) {
7191                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7192                         bp->port.advertising = bp->port.supported;
7193                 } else {
7194                         u32 ext_phy_type =
7195                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7196
7197                         if ((ext_phy_type ==
7198                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7199                             (ext_phy_type ==
7200                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7201                                 /* force 10G, no AN */
7202                                 bp->link_params.req_line_speed = SPEED_10000;
7203                                 bp->port.advertising =
7204                                                 (ADVERTISED_10000baseT_Full |
7205                                                  ADVERTISED_FIBRE);
7206                                 break;
7207                         }
7208                         BNX2X_ERR("NVRAM config error. "
7209                                   "Invalid link_config 0x%x"
7210                                   "  Autoneg not supported\n",
7211                                   bp->port.link_config);
7212                         return;
7213                 }
7214                 break;
7215
7216         case PORT_FEATURE_LINK_SPEED_10M_FULL:
7217                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7218                         bp->link_params.req_line_speed = SPEED_10;
7219                         bp->port.advertising = (ADVERTISED_10baseT_Full |
7220                                                 ADVERTISED_TP);
7221                 } else {
7222                         BNX2X_ERR("NVRAM config error. "
7223                                   "Invalid link_config 0x%x"
7224                                   "  speed_cap_mask 0x%x\n",
7225                                   bp->port.link_config,
7226                                   bp->link_params.speed_cap_mask);
7227                         return;
7228                 }
7229                 break;
7230
7231         case PORT_FEATURE_LINK_SPEED_10M_HALF:
7232                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7233                         bp->link_params.req_line_speed = SPEED_10;
7234                         bp->link_params.req_duplex = DUPLEX_HALF;
7235                         bp->port.advertising = (ADVERTISED_10baseT_Half |
7236                                                 ADVERTISED_TP);
7237                 } else {
7238                         BNX2X_ERR("NVRAM config error. "
7239                                   "Invalid link_config 0x%x"
7240                                   "  speed_cap_mask 0x%x\n",
7241                                   bp->port.link_config,
7242                                   bp->link_params.speed_cap_mask);
7243                         return;
7244                 }
7245                 break;
7246
7247         case PORT_FEATURE_LINK_SPEED_100M_FULL:
7248                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7249                         bp->link_params.req_line_speed = SPEED_100;
7250                         bp->port.advertising = (ADVERTISED_100baseT_Full |
7251                                                 ADVERTISED_TP);
7252                 } else {
7253                         BNX2X_ERR("NVRAM config error. "
7254                                   "Invalid link_config 0x%x"
7255                                   "  speed_cap_mask 0x%x\n",
7256                                   bp->port.link_config,
7257                                   bp->link_params.speed_cap_mask);
7258                         return;
7259                 }
7260                 break;
7261
7262         case PORT_FEATURE_LINK_SPEED_100M_HALF:
7263                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7264                         bp->link_params.req_line_speed = SPEED_100;
7265                         bp->link_params.req_duplex = DUPLEX_HALF;
7266                         bp->port.advertising = (ADVERTISED_100baseT_Half |
7267                                                 ADVERTISED_TP);
7268                 } else {
7269                         BNX2X_ERR("NVRAM config error. "
7270                                   "Invalid link_config 0x%x"
7271                                   "  speed_cap_mask 0x%x\n",
7272                                   bp->port.link_config,
7273                                   bp->link_params.speed_cap_mask);
7274                         return;
7275                 }
7276                 break;
7277
7278         case PORT_FEATURE_LINK_SPEED_1G:
7279                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7280                         bp->link_params.req_line_speed = SPEED_1000;
7281                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
7282                                                 ADVERTISED_TP);
7283                 } else {
7284                         BNX2X_ERR("NVRAM config error. "
7285                                   "Invalid link_config 0x%x"
7286                                   "  speed_cap_mask 0x%x\n",
7287                                   bp->port.link_config,
7288                                   bp->link_params.speed_cap_mask);
7289                         return;
7290                 }
7291                 break;
7292
7293         case PORT_FEATURE_LINK_SPEED_2_5G:
7294                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7295                         bp->link_params.req_line_speed = SPEED_2500;
7296                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
7297                                                 ADVERTISED_TP);
7298                 } else {
7299                         BNX2X_ERR("NVRAM config error. "
7300                                   "Invalid link_config 0x%x"
7301                                   "  speed_cap_mask 0x%x\n",
7302                                   bp->port.link_config,
7303                                   bp->link_params.speed_cap_mask);
7304                         return;
7305                 }
7306                 break;
7307
7308         case PORT_FEATURE_LINK_SPEED_10G_CX4:
7309         case PORT_FEATURE_LINK_SPEED_10G_KX4:
7310         case PORT_FEATURE_LINK_SPEED_10G_KR:
7311                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7312                         bp->link_params.req_line_speed = SPEED_10000;
7313                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
7314                                                 ADVERTISED_FIBRE);
7315                 } else {
7316                         BNX2X_ERR("NVRAM config error. "
7317                                   "Invalid link_config 0x%x"
7318                                   "  speed_cap_mask 0x%x\n",
7319                                   bp->port.link_config,
7320                                   bp->link_params.speed_cap_mask);
7321                         return;
7322                 }
7323                 break;
7324
7325         default:
7326                 BNX2X_ERR("NVRAM config error. "
7327                           "BAD link speed link_config 0x%x\n",
7328                           bp->port.link_config);
7329                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7330                 bp->port.advertising = bp->port.supported;
7331                 break;
7332         }
7333
7334         bp->link_params.req_flow_ctrl = (bp->port.link_config &
7335                                          PORT_FEATURE_FLOW_CONTROL_MASK);
7336         if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
7337             !(bp->port.supported & SUPPORTED_Autoneg))
7338                 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
7339
7340         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
7341                        "  advertising 0x%x\n",
7342                        bp->link_params.req_line_speed,
7343                        bp->link_params.req_duplex,
7344                        bp->link_params.req_flow_ctrl, bp->port.advertising);
7345 }
7346
7347 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7348 {
7349         int port = BP_PORT(bp);
7350         u32 val, val2;
7351
7352         bp->link_params.bp = bp;
7353         bp->link_params.port = port;
7354
7355         bp->link_params.serdes_config =
7356                 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7357         bp->link_params.lane_config =
7358                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7359         bp->link_params.ext_phy_config =
7360                 SHMEM_RD(bp,
7361                          dev_info.port_hw_config[port].external_phy_config);
7362         bp->link_params.speed_cap_mask =
7363                 SHMEM_RD(bp,
7364                          dev_info.port_hw_config[port].speed_capability_mask);
7365
7366         bp->port.link_config =
7367                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7368
7369         BNX2X_DEV_INFO("serdes_config 0x%08x  lane_config 0x%08x\n"
7370              KERN_INFO "  ext_phy_config 0x%08x  speed_cap_mask 0x%08x"
7371                        "  link_config 0x%08x\n",
7372                        bp->link_params.serdes_config,
7373                        bp->link_params.lane_config,
7374                        bp->link_params.ext_phy_config,
7375                        bp->link_params.speed_cap_mask, bp->port.link_config);
7376
7377         bp->link_params.switch_cfg = (bp->port.link_config &
7378                                       PORT_FEATURE_CONNECTED_SWITCH_MASK);
7379         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7380
7381         bnx2x_link_settings_requested(bp);
7382
7383         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7384         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7385         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7386         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7387         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7388         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7389         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
7390         bp->dev->dev_addr[5] = (u8)(val & 0xff);
7391         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7392         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7393 }
7394
7395 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7396 {
7397         int func = BP_FUNC(bp);
7398         u32 val, val2;
7399         int rc = 0;
7400
7401         bnx2x_get_common_hwinfo(bp);
7402
7403         bp->e1hov = 0;
7404         bp->e1hmf = 0;
7405         if (CHIP_IS_E1H(bp)) {
7406                 bp->mf_config =
7407                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7408
7409                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7410                        FUNC_MF_CFG_E1HOV_TAG_MASK);
7411                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7412
7413                         bp->e1hov = val;
7414                         bp->e1hmf = 1;
7415                         BNX2X_DEV_INFO("MF mode  E1HOV for func %d is %d "
7416                                        "(0x%04x)\n",
7417                                        func, bp->e1hov, bp->e1hov);
7418                 } else {
7419                         BNX2X_DEV_INFO("Single function mode\n");
7420                         if (BP_E1HVN(bp)) {
7421                                 BNX2X_ERR("!!!  No valid E1HOV for func %d,"
7422                                           "  aborting\n", func);
7423                                 rc = -EPERM;
7424                         }
7425                 }
7426         }
7427
7428         if (!BP_NOMCP(bp)) {
7429                 bnx2x_get_port_hwinfo(bp);
7430
7431                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7432                               DRV_MSG_SEQ_NUMBER_MASK);
7433                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7434         }
7435
7436         if (IS_E1HMF(bp)) {
7437                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7438                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
7439                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7440                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7441                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7442                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7443                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7444                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7445                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
7446                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
7447                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7448                                ETH_ALEN);
7449                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7450                                ETH_ALEN);
7451                 }
7452
7453                 return rc;
7454         }
7455
7456         if (BP_NOMCP(bp)) {
7457                 /* only supposed to happen on emulation/FPGA */
7458                 BNX2X_ERR("warning random MAC workaround active\n");
7459                 random_ether_addr(bp->dev->dev_addr);
7460                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7461         }
7462
7463         return rc;
7464 }
7465
7466 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7467 {
7468         int func = BP_FUNC(bp);
7469         int rc;
7470
7471         /* Disable interrupt handling until HW is initialized */
7472         atomic_set(&bp->intr_sem, 1);
7473
7474         mutex_init(&bp->port.phy_mutex);
7475
7476         INIT_WORK(&bp->sp_task, bnx2x_sp_task);
7477         INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7478
7479         rc = bnx2x_get_hwinfo(bp);
7480
7481         /* need to reset chip if undi was active */
7482         if (!BP_NOMCP(bp))
7483                 bnx2x_undi_unload(bp);
7484
7485         if (CHIP_REV_IS_FPGA(bp))
7486                 printk(KERN_ERR PFX "FPGA detected\n");
7487
7488         if (BP_NOMCP(bp) && (func == 0))
7489                 printk(KERN_ERR PFX
7490                        "MCP disabled, must load devices in order!\n");
7491
7492         /* Set TPA flags */
7493         if (disable_tpa) {
7494                 bp->flags &= ~TPA_ENABLE_FLAG;
7495                 bp->dev->features &= ~NETIF_F_LRO;
7496         } else {
7497                 bp->flags |= TPA_ENABLE_FLAG;
7498                 bp->dev->features |= NETIF_F_LRO;
7499         }
7500
7501
7502         bp->tx_ring_size = MAX_TX_AVAIL;
7503         bp->rx_ring_size = MAX_RX_AVAIL;
7504
7505         bp->rx_csum = 1;
7506         bp->rx_offset = 0;
7507
7508         bp->tx_ticks = 50;
7509         bp->rx_ticks = 25;
7510
7511         bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7512         bp->current_interval = (poll ? poll : bp->timer_interval);
7513
7514         init_timer(&bp->timer);
7515         bp->timer.expires = jiffies + bp->current_interval;
7516         bp->timer.data = (unsigned long) bp;
7517         bp->timer.function = bnx2x_timer;
7518
7519         return rc;
7520 }
7521
7522 /*
7523  * ethtool service functions
7524  */
7525
7526 /* All ethtool functions called with rtnl_lock */
7527
7528 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7529 {
7530         struct bnx2x *bp = netdev_priv(dev);
7531
7532         cmd->supported = bp->port.supported;
7533         cmd->advertising = bp->port.advertising;
7534
7535         if (netif_carrier_ok(dev)) {
7536                 cmd->speed = bp->link_vars.line_speed;
7537                 cmd->duplex = bp->link_vars.duplex;
7538         } else {
7539                 cmd->speed = bp->link_params.req_line_speed;
7540                 cmd->duplex = bp->link_params.req_duplex;
7541         }
7542         if (IS_E1HMF(bp)) {
7543                 u16 vn_max_rate;
7544
7545                 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7546                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7547                 if (vn_max_rate < cmd->speed)
7548                         cmd->speed = vn_max_rate;
7549         }
7550
7551         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7552                 u32 ext_phy_type =
7553                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7554
7555                 switch (ext_phy_type) {
7556                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7557                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7558                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7559                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7560                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7561                         cmd->port = PORT_FIBRE;
7562                         break;
7563
7564                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7565                         cmd->port = PORT_TP;
7566                         break;
7567
7568                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7569                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7570                                   bp->link_params.ext_phy_config);
7571                         break;
7572
7573                 default:
7574                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7575                            bp->link_params.ext_phy_config);
7576                         break;
7577                 }
7578         } else
7579                 cmd->port = PORT_TP;
7580
7581         cmd->phy_address = bp->port.phy_addr;
7582         cmd->transceiver = XCVR_INTERNAL;
7583
7584         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
7585                 cmd->autoneg = AUTONEG_ENABLE;
7586         else
7587                 cmd->autoneg = AUTONEG_DISABLE;
7588
7589         cmd->maxtxpkt = 0;
7590         cmd->maxrxpkt = 0;
7591
7592         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7593            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
7594            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
7595            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
7596            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7597            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7598            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7599
7600         return 0;
7601 }
7602
7603 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7604 {
7605         struct bnx2x *bp = netdev_priv(dev);
7606         u32 advertising;
7607
7608         if (IS_E1HMF(bp))
7609                 return 0;
7610
7611         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7612            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
7613            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
7614            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
7615            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7616            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7617            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7618
7619         if (cmd->autoneg == AUTONEG_ENABLE) {
7620                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7621                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
7622                         return -EINVAL;
7623                 }
7624
7625                 /* advertise the requested speed and duplex if supported */
7626                 cmd->advertising &= bp->port.supported;
7627
7628                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7629                 bp->link_params.req_duplex = DUPLEX_FULL;
7630                 bp->port.advertising |= (ADVERTISED_Autoneg |
7631                                          cmd->advertising);
7632
7633         } else { /* forced speed */
7634                 /* advertise the requested speed and duplex if supported */
7635                 switch (cmd->speed) {
7636                 case SPEED_10:
7637                         if (cmd->duplex == DUPLEX_FULL) {
7638                                 if (!(bp->port.supported &
7639                                       SUPPORTED_10baseT_Full)) {
7640                                         DP(NETIF_MSG_LINK,
7641                                            "10M full not supported\n");
7642                                         return -EINVAL;
7643                                 }
7644
7645                                 advertising = (ADVERTISED_10baseT_Full |
7646                                                ADVERTISED_TP);
7647                         } else {
7648                                 if (!(bp->port.supported &
7649                                       SUPPORTED_10baseT_Half)) {
7650                                         DP(NETIF_MSG_LINK,
7651                                            "10M half not supported\n");
7652                                         return -EINVAL;
7653                                 }
7654
7655                                 advertising = (ADVERTISED_10baseT_Half |
7656                                                ADVERTISED_TP);
7657                         }
7658                         break;
7659
7660                 case SPEED_100:
7661                         if (cmd->duplex == DUPLEX_FULL) {
7662                                 if (!(bp->port.supported &
7663                                                 SUPPORTED_100baseT_Full)) {
7664                                         DP(NETIF_MSG_LINK,
7665                                            "100M full not supported\n");
7666                                         return -EINVAL;
7667                                 }
7668
7669                                 advertising = (ADVERTISED_100baseT_Full |
7670                                                ADVERTISED_TP);
7671                         } else {
7672                                 if (!(bp->port.supported &
7673                                                 SUPPORTED_100baseT_Half)) {
7674                                         DP(NETIF_MSG_LINK,
7675                                            "100M half not supported\n");
7676                                         return -EINVAL;
7677                                 }
7678
7679                                 advertising = (ADVERTISED_100baseT_Half |
7680                                                ADVERTISED_TP);
7681                         }
7682                         break;
7683
7684                 case SPEED_1000:
7685                         if (cmd->duplex != DUPLEX_FULL) {
7686                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
7687                                 return -EINVAL;
7688                         }
7689
7690                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
7691                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
7692                                 return -EINVAL;
7693                         }
7694
7695                         advertising = (ADVERTISED_1000baseT_Full |
7696                                        ADVERTISED_TP);
7697                         break;
7698
7699                 case SPEED_2500:
7700                         if (cmd->duplex != DUPLEX_FULL) {
7701                                 DP(NETIF_MSG_LINK,
7702                                    "2.5G half not supported\n");
7703                                 return -EINVAL;
7704                         }
7705
7706                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
7707                                 DP(NETIF_MSG_LINK,
7708                                    "2.5G full not supported\n");
7709                                 return -EINVAL;
7710                         }
7711
7712                         advertising = (ADVERTISED_2500baseX_Full |
7713                                        ADVERTISED_TP);
7714                         break;
7715
7716                 case SPEED_10000:
7717                         if (cmd->duplex != DUPLEX_FULL) {
7718                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
7719                                 return -EINVAL;
7720                         }
7721
7722                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
7723                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
7724                                 return -EINVAL;
7725                         }
7726
7727                         advertising = (ADVERTISED_10000baseT_Full |
7728                                        ADVERTISED_FIBRE);
7729                         break;
7730
7731                 default:
7732                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
7733                         return -EINVAL;
7734                 }
7735
7736                 bp->link_params.req_line_speed = cmd->speed;
7737                 bp->link_params.req_duplex = cmd->duplex;
7738                 bp->port.advertising = advertising;
7739         }
7740
7741         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
7742            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
7743            bp->link_params.req_line_speed, bp->link_params.req_duplex,
7744            bp->port.advertising);
7745
7746         if (netif_running(dev)) {
7747                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7748                 bnx2x_link_set(bp);
7749         }
7750
7751         return 0;
7752 }
7753
7754 #define PHY_FW_VER_LEN                  10
7755
7756 static void bnx2x_get_drvinfo(struct net_device *dev,
7757                               struct ethtool_drvinfo *info)
7758 {
7759         struct bnx2x *bp = netdev_priv(dev);
7760         u8 phy_fw_ver[PHY_FW_VER_LEN];
7761
7762         strcpy(info->driver, DRV_MODULE_NAME);
7763         strcpy(info->version, DRV_MODULE_VERSION);
7764
7765         phy_fw_ver[0] = '\0';
7766         if (bp->port.pmf) {
7767                 bnx2x_acquire_phy_lock(bp);
7768                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7769                                              (bp->state != BNX2X_STATE_CLOSED),
7770                                              phy_fw_ver, PHY_FW_VER_LEN);
7771                 bnx2x_release_phy_lock(bp);
7772         }
7773
7774         snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7775                  (bp->common.bc_ver & 0xff0000) >> 16,
7776                  (bp->common.bc_ver & 0xff00) >> 8,
7777                  (bp->common.bc_ver & 0xff),
7778                  ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
7779         strcpy(info->bus_info, pci_name(bp->pdev));
7780         info->n_stats = BNX2X_NUM_STATS;
7781         info->testinfo_len = BNX2X_NUM_TESTS;
7782         info->eedump_len = bp->common.flash_size;
7783         info->regdump_len = 0;
7784 }
7785
7786 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7787 {
7788         struct bnx2x *bp = netdev_priv(dev);
7789
7790         if (bp->flags & NO_WOL_FLAG) {
7791                 wol->supported = 0;
7792                 wol->wolopts = 0;
7793         } else {
7794                 wol->supported = WAKE_MAGIC;
7795                 if (bp->wol)
7796                         wol->wolopts = WAKE_MAGIC;
7797                 else
7798                         wol->wolopts = 0;
7799         }
7800         memset(&wol->sopass, 0, sizeof(wol->sopass));
7801 }
7802
7803 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7804 {
7805         struct bnx2x *bp = netdev_priv(dev);
7806
7807         if (wol->wolopts & ~WAKE_MAGIC)
7808                 return -EINVAL;
7809
7810         if (wol->wolopts & WAKE_MAGIC) {
7811                 if (bp->flags & NO_WOL_FLAG)
7812                         return -EINVAL;
7813
7814                 bp->wol = 1;
7815         } else
7816                 bp->wol = 0;
7817
7818         return 0;
7819 }
7820
7821 static u32 bnx2x_get_msglevel(struct net_device *dev)
7822 {
7823         struct bnx2x *bp = netdev_priv(dev);
7824
7825         return bp->msglevel;
7826 }
7827
7828 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7829 {
7830         struct bnx2x *bp = netdev_priv(dev);
7831
7832         if (capable(CAP_NET_ADMIN))
7833                 bp->msglevel = level;
7834 }
7835
7836 static int bnx2x_nway_reset(struct net_device *dev)
7837 {
7838         struct bnx2x *bp = netdev_priv(dev);
7839
7840         if (!bp->port.pmf)
7841                 return 0;
7842
7843         if (netif_running(dev)) {
7844                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7845                 bnx2x_link_set(bp);
7846         }
7847
7848         return 0;
7849 }
7850
7851 static int bnx2x_get_eeprom_len(struct net_device *dev)
7852 {
7853         struct bnx2x *bp = netdev_priv(dev);
7854
7855         return bp->common.flash_size;
7856 }
7857
7858 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7859 {
7860         int port = BP_PORT(bp);
7861         int count, i;
7862         u32 val = 0;
7863
7864         /* adjust timeout for emulation/FPGA */
7865         count = NVRAM_TIMEOUT_COUNT;
7866         if (CHIP_REV_IS_SLOW(bp))
7867                 count *= 100;
7868
7869         /* request access to nvram interface */
7870         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7871                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7872
7873         for (i = 0; i < count*10; i++) {
7874                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7875                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7876                         break;
7877
7878                 udelay(5);
7879         }
7880
7881         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
7882                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
7883                 return -EBUSY;
7884         }
7885
7886         return 0;
7887 }
7888
7889 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7890 {
7891         int port = BP_PORT(bp);
7892         int count, i;
7893         u32 val = 0;
7894
7895         /* adjust timeout for emulation/FPGA */
7896         count = NVRAM_TIMEOUT_COUNT;
7897         if (CHIP_REV_IS_SLOW(bp))
7898                 count *= 100;
7899
7900         /* relinquish nvram interface */
7901         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7902                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7903
7904         for (i = 0; i < count*10; i++) {
7905                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7906                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7907                         break;
7908
7909                 udelay(5);
7910         }
7911
7912         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
7913                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
7914                 return -EBUSY;
7915         }
7916
7917         return 0;
7918 }
7919
7920 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7921 {
7922         u32 val;
7923
7924         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7925
7926         /* enable both bits, even on read */
7927         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7928                (val | MCPR_NVM_ACCESS_ENABLE_EN |
7929                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
7930 }
7931
7932 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7933 {
7934         u32 val;
7935
7936         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7937
7938         /* disable both bits, even after read */
7939         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7940                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7941                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7942 }
7943
7944 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7945                                   u32 cmd_flags)
7946 {
7947         int count, i, rc;
7948         u32 val;
7949
7950         /* build the command word */
7951         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
7952
7953         /* need to clear DONE bit separately */
7954         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7955
7956         /* address of the NVRAM to read from */
7957         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7958                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7959
7960         /* issue a read command */
7961         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7962
7963         /* adjust timeout for emulation/FPGA */
7964         count = NVRAM_TIMEOUT_COUNT;
7965         if (CHIP_REV_IS_SLOW(bp))
7966                 count *= 100;
7967
7968         /* wait for completion */
7969         *ret_val = 0;
7970         rc = -EBUSY;
7971         for (i = 0; i < count; i++) {
7972                 udelay(5);
7973                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
7974
7975                 if (val & MCPR_NVM_COMMAND_DONE) {
7976                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
7977                         /* we read nvram data in cpu order
7978                          * but ethtool sees it as an array of bytes
7979                          * converting to big-endian will do the work */
7980                         val = cpu_to_be32(val);
7981                         *ret_val = val;
7982                         rc = 0;
7983                         break;
7984                 }
7985         }
7986
7987         return rc;
7988 }
7989
7990 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
7991                             int buf_size)
7992 {
7993         int rc;
7994         u32 cmd_flags;
7995         u32 val;
7996
7997         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
7998                 DP(BNX2X_MSG_NVM,
7999                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8000                    offset, buf_size);
8001                 return -EINVAL;
8002         }
8003
8004         if (offset + buf_size > bp->common.flash_size) {
8005                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8006                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8007                    offset, buf_size, bp->common.flash_size);
8008                 return -EINVAL;
8009         }
8010
8011         /* request access to nvram interface */
8012         rc = bnx2x_acquire_nvram_lock(bp);
8013         if (rc)
8014                 return rc;
8015
8016         /* enable access to nvram interface */
8017         bnx2x_enable_nvram_access(bp);
8018
8019         /* read the first word(s) */
8020         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8021         while ((buf_size > sizeof(u32)) && (rc == 0)) {
8022                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8023                 memcpy(ret_buf, &val, 4);
8024
8025                 /* advance to the next dword */
8026                 offset += sizeof(u32);
8027                 ret_buf += sizeof(u32);
8028                 buf_size -= sizeof(u32);
8029                 cmd_flags = 0;
8030         }
8031
8032         if (rc == 0) {
8033                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8034                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8035                 memcpy(ret_buf, &val, 4);
8036         }
8037
8038         /* disable access to nvram interface */
8039         bnx2x_disable_nvram_access(bp);
8040         bnx2x_release_nvram_lock(bp);
8041
8042         return rc;
8043 }
8044
8045 static int bnx2x_get_eeprom(struct net_device *dev,
8046                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8047 {
8048         struct bnx2x *bp = netdev_priv(dev);
8049         int rc;
8050
8051         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8052            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8053            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8054            eeprom->len, eeprom->len);
8055
8056         /* parameters already validated in ethtool_get_eeprom */
8057
8058         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8059
8060         return rc;
8061 }
8062
8063 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8064                                    u32 cmd_flags)
8065 {
8066         int count, i, rc;
8067
8068         /* build the command word */
8069         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8070
8071         /* need to clear DONE bit separately */
8072         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8073
8074         /* write the data */
8075         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8076
8077         /* address of the NVRAM to write to */
8078         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8079                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8080
8081         /* issue the write command */
8082         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8083
8084         /* adjust timeout for emulation/FPGA */
8085         count = NVRAM_TIMEOUT_COUNT;
8086         if (CHIP_REV_IS_SLOW(bp))
8087                 count *= 100;
8088
8089         /* wait for completion */
8090         rc = -EBUSY;
8091         for (i = 0; i < count; i++) {
8092                 udelay(5);
8093                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8094                 if (val & MCPR_NVM_COMMAND_DONE) {
8095                         rc = 0;
8096                         break;
8097                 }
8098         }
8099
8100         return rc;
8101 }
8102
8103 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
8104
8105 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8106                               int buf_size)
8107 {
8108         int rc;
8109         u32 cmd_flags;
8110         u32 align_offset;
8111         u32 val;
8112
8113         if (offset + buf_size > bp->common.flash_size) {
8114                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8115                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8116                    offset, buf_size, bp->common.flash_size);
8117                 return -EINVAL;
8118         }
8119
8120         /* request access to nvram interface */
8121         rc = bnx2x_acquire_nvram_lock(bp);
8122         if (rc)
8123                 return rc;
8124
8125         /* enable access to nvram interface */
8126         bnx2x_enable_nvram_access(bp);
8127
8128         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8129         align_offset = (offset & ~0x03);
8130         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8131
8132         if (rc == 0) {
8133                 val &= ~(0xff << BYTE_OFFSET(offset));
8134                 val |= (*data_buf << BYTE_OFFSET(offset));
8135
8136                 /* nvram data is returned as an array of bytes
8137                  * convert it back to cpu order */
8138                 val = be32_to_cpu(val);
8139
8140                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8141                                              cmd_flags);
8142         }
8143
8144         /* disable access to nvram interface */
8145         bnx2x_disable_nvram_access(bp);
8146         bnx2x_release_nvram_lock(bp);
8147
8148         return rc;
8149 }
8150
8151 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8152                              int buf_size)
8153 {
8154         int rc;
8155         u32 cmd_flags;
8156         u32 val;
8157         u32 written_so_far;
8158
8159         if (buf_size == 1)      /* ethtool */
8160                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8161
8162         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8163                 DP(BNX2X_MSG_NVM,
8164                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8165                    offset, buf_size);
8166                 return -EINVAL;
8167         }
8168
8169         if (offset + buf_size > bp->common.flash_size) {
8170                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8171                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8172                    offset, buf_size, bp->common.flash_size);
8173                 return -EINVAL;
8174         }
8175
8176         /* request access to nvram interface */
8177         rc = bnx2x_acquire_nvram_lock(bp);
8178         if (rc)
8179                 return rc;
8180
8181         /* enable access to nvram interface */
8182         bnx2x_enable_nvram_access(bp);
8183
8184         written_so_far = 0;
8185         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8186         while ((written_so_far < buf_size) && (rc == 0)) {
8187                 if (written_so_far == (buf_size - sizeof(u32)))
8188                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8189                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8190                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8191                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8192                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8193
8194                 memcpy(&val, data_buf, 4);
8195
8196                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8197
8198                 /* advance to the next dword */
8199                 offset += sizeof(u32);
8200                 data_buf += sizeof(u32);
8201                 written_so_far += sizeof(u32);
8202                 cmd_flags = 0;
8203         }
8204
8205         /* disable access to nvram interface */
8206         bnx2x_disable_nvram_access(bp);
8207         bnx2x_release_nvram_lock(bp);
8208
8209         return rc;
8210 }
8211
8212 static int bnx2x_set_eeprom(struct net_device *dev,
8213                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8214 {
8215         struct bnx2x *bp = netdev_priv(dev);
8216         int rc;
8217
8218         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8219            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8220            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8221            eeprom->len, eeprom->len);
8222
8223         /* parameters already validated in ethtool_set_eeprom */
8224
8225         /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8226         if (eeprom->magic == 0x00504859)
8227                 if (bp->port.pmf) {
8228
8229                         bnx2x_acquire_phy_lock(bp);
8230                         rc = bnx2x_flash_download(bp, BP_PORT(bp),
8231                                              bp->link_params.ext_phy_config,
8232                                              (bp->state != BNX2X_STATE_CLOSED),
8233                                              eebuf, eeprom->len);
8234                         if ((bp->state == BNX2X_STATE_OPEN) ||
8235                             (bp->state == BNX2X_STATE_DISABLED)) {
8236                                 rc |= bnx2x_link_reset(&bp->link_params,
8237                                                        &bp->link_vars);
8238                                 rc |= bnx2x_phy_init(&bp->link_params,
8239                                                      &bp->link_vars);
8240                         }
8241                         bnx2x_release_phy_lock(bp);
8242
8243                 } else /* Only the PMF can access the PHY */
8244                         return -EINVAL;
8245         else
8246                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8247
8248         return rc;
8249 }
8250
8251 static int bnx2x_get_coalesce(struct net_device *dev,
8252                               struct ethtool_coalesce *coal)
8253 {
8254         struct bnx2x *bp = netdev_priv(dev);
8255
8256         memset(coal, 0, sizeof(struct ethtool_coalesce));
8257
8258         coal->rx_coalesce_usecs = bp->rx_ticks;
8259         coal->tx_coalesce_usecs = bp->tx_ticks;
8260
8261         return 0;
8262 }
8263
8264 static int bnx2x_set_coalesce(struct net_device *dev,
8265                               struct ethtool_coalesce *coal)
8266 {
8267         struct bnx2x *bp = netdev_priv(dev);
8268
8269         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8270         if (bp->rx_ticks > 3000)
8271                 bp->rx_ticks = 3000;
8272
8273         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8274         if (bp->tx_ticks > 0x3000)
8275                 bp->tx_ticks = 0x3000;
8276
8277         if (netif_running(dev))
8278                 bnx2x_update_coalesce(bp);
8279
8280         return 0;
8281 }
8282
8283 static void bnx2x_get_ringparam(struct net_device *dev,
8284                                 struct ethtool_ringparam *ering)
8285 {
8286         struct bnx2x *bp = netdev_priv(dev);
8287
8288         ering->rx_max_pending = MAX_RX_AVAIL;
8289         ering->rx_mini_max_pending = 0;
8290         ering->rx_jumbo_max_pending = 0;
8291
8292         ering->rx_pending = bp->rx_ring_size;
8293         ering->rx_mini_pending = 0;
8294         ering->rx_jumbo_pending = 0;
8295
8296         ering->tx_max_pending = MAX_TX_AVAIL;
8297         ering->tx_pending = bp->tx_ring_size;
8298 }
8299
8300 static int bnx2x_set_ringparam(struct net_device *dev,
8301                                struct ethtool_ringparam *ering)
8302 {
8303         struct bnx2x *bp = netdev_priv(dev);
8304         int rc = 0;
8305
8306         if ((ering->rx_pending > MAX_RX_AVAIL) ||
8307             (ering->tx_pending > MAX_TX_AVAIL) ||
8308             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8309                 return -EINVAL;
8310
8311         bp->rx_ring_size = ering->rx_pending;
8312         bp->tx_ring_size = ering->tx_pending;
8313
8314         if (netif_running(dev)) {
8315                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8316                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8317         }
8318
8319         return rc;
8320 }
8321
8322 static void bnx2x_get_pauseparam(struct net_device *dev,
8323                                  struct ethtool_pauseparam *epause)
8324 {
8325         struct bnx2x *bp = netdev_priv(dev);
8326
8327         epause->autoneg = (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
8328                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8329
8330         epause->rx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_RX) ==
8331                             FLOW_CTRL_RX);
8332         epause->tx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_TX) ==
8333                             FLOW_CTRL_TX);
8334
8335         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8336            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
8337            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8338 }
8339
8340 static int bnx2x_set_pauseparam(struct net_device *dev,
8341                                 struct ethtool_pauseparam *epause)
8342 {
8343         struct bnx2x *bp = netdev_priv(dev);
8344
8345         if (IS_E1HMF(bp))
8346                 return 0;
8347
8348         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8349            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
8350            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8351
8352         bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8353
8354         if (epause->rx_pause)
8355                 bp->link_params.req_flow_ctrl |= FLOW_CTRL_RX;
8356
8357         if (epause->tx_pause)
8358                 bp->link_params.req_flow_ctrl |= FLOW_CTRL_TX;
8359
8360         if (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO)
8361                 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
8362
8363         if (epause->autoneg) {
8364                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8365                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
8366                         return -EINVAL;
8367                 }
8368
8369                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8370                         bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8371         }
8372
8373         DP(NETIF_MSG_LINK,
8374            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
8375
8376         if (netif_running(dev)) {
8377                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8378                 bnx2x_link_set(bp);
8379         }
8380
8381         return 0;
8382 }
8383
8384 static int bnx2x_set_flags(struct net_device *dev, u32 data)
8385 {
8386         struct bnx2x *bp = netdev_priv(dev);
8387         int changed = 0;
8388         int rc = 0;
8389
8390         /* TPA requires Rx CSUM offloading */
8391         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8392                 if (!(dev->features & NETIF_F_LRO)) {
8393                         dev->features |= NETIF_F_LRO;
8394                         bp->flags |= TPA_ENABLE_FLAG;
8395                         changed = 1;
8396                 }
8397
8398         } else if (dev->features & NETIF_F_LRO) {
8399                 dev->features &= ~NETIF_F_LRO;
8400                 bp->flags &= ~TPA_ENABLE_FLAG;
8401                 changed = 1;
8402         }
8403
8404         if (changed && netif_running(dev)) {
8405                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8406                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8407         }
8408
8409         return rc;
8410 }
8411
8412 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8413 {
8414         struct bnx2x *bp = netdev_priv(dev);
8415
8416         return bp->rx_csum;
8417 }
8418
8419 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8420 {
8421         struct bnx2x *bp = netdev_priv(dev);
8422         int rc = 0;
8423
8424         bp->rx_csum = data;
8425
8426         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8427            TPA'ed packets will be discarded due to wrong TCP CSUM */
8428         if (!data) {
8429                 u32 flags = ethtool_op_get_flags(dev);
8430
8431                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8432         }
8433
8434         return rc;
8435 }
8436
8437 static int bnx2x_set_tso(struct net_device *dev, u32 data)
8438 {
8439         if (data) {
8440                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8441                 dev->features |= NETIF_F_TSO6;
8442         } else {
8443                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8444                 dev->features &= ~NETIF_F_TSO6;
8445         }
8446
8447         return 0;
8448 }
8449
8450 static const struct {
8451         char string[ETH_GSTRING_LEN];
8452 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8453         { "register_test (offline)" },
8454         { "memory_test (offline)" },
8455         { "loopback_test (offline)" },
8456         { "nvram_test (online)" },
8457         { "interrupt_test (online)" },
8458         { "link_test (online)" },
8459         { "idle check (online)" },
8460         { "MC errors (online)" }
8461 };
8462
8463 static int bnx2x_self_test_count(struct net_device *dev)
8464 {
8465         return BNX2X_NUM_TESTS;
8466 }
8467
8468 static int bnx2x_test_registers(struct bnx2x *bp)
8469 {
8470         int idx, i, rc = -ENODEV;
8471         u32 wr_val = 0;
8472         int port = BP_PORT(bp);
8473         static const struct {
8474                 u32  offset0;
8475                 u32  offset1;
8476                 u32  mask;
8477         } reg_tbl[] = {
8478 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
8479                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
8480                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
8481                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
8482                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
8483                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
8484                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
8485                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
8486                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
8487                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
8488 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
8489                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
8490                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
8491                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
8492                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
8493                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8494                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
8495                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
8496                 { NIG_REG_EGRESS_MNG0_FIFO,           20, 0xffffffff },
8497                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
8498 /* 20 */        { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
8499                 { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
8500                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
8501                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
8502                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
8503                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
8504                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
8505                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
8506                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
8507                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
8508 /* 30 */        { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
8509                 { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
8510                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
8511                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
8512                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8513                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
8514                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8515                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
8516
8517                 { 0xffffffff, 0, 0x00000000 }
8518         };
8519
8520         if (!netif_running(bp->dev))
8521                 return rc;
8522
8523         /* Repeat the test twice:
8524            First by writing 0x00000000, second by writing 0xffffffff */
8525         for (idx = 0; idx < 2; idx++) {
8526
8527                 switch (idx) {
8528                 case 0:
8529                         wr_val = 0;
8530                         break;
8531                 case 1:
8532                         wr_val = 0xffffffff;
8533                         break;
8534                 }
8535
8536                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8537                         u32 offset, mask, save_val, val;
8538
8539                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8540                         mask = reg_tbl[i].mask;
8541
8542                         save_val = REG_RD(bp, offset);
8543
8544                         REG_WR(bp, offset, wr_val);
8545                         val = REG_RD(bp, offset);
8546
8547                         /* Restore the original register's value */
8548                         REG_WR(bp, offset, save_val);
8549
8550                         /* verify that value is as expected value */
8551                         if ((val & mask) != (wr_val & mask))
8552                                 goto test_reg_exit;
8553                 }
8554         }
8555
8556         rc = 0;
8557
8558 test_reg_exit:
8559         return rc;
8560 }
8561
8562 static int bnx2x_test_memory(struct bnx2x *bp)
8563 {
8564         int i, j, rc = -ENODEV;
8565         u32 val;
8566         static const struct {
8567                 u32 offset;
8568                 int size;
8569         } mem_tbl[] = {
8570                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
8571                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8572                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
8573                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
8574                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
8575                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
8576                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
8577
8578                 { 0xffffffff, 0 }
8579         };
8580         static const struct {
8581                 char *name;
8582                 u32 offset;
8583                 u32 e1_mask;
8584                 u32 e1h_mask;
8585         } prty_tbl[] = {
8586                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
8587                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
8588                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
8589                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
8590                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
8591                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
8592
8593                 { NULL, 0xffffffff, 0, 0 }
8594         };
8595
8596         if (!netif_running(bp->dev))
8597                 return rc;
8598
8599         /* Go through all the memories */
8600         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8601                 for (j = 0; j < mem_tbl[i].size; j++)
8602                         REG_RD(bp, mem_tbl[i].offset + j*4);
8603
8604         /* Check the parity status */
8605         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8606                 val = REG_RD(bp, prty_tbl[i].offset);
8607                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8608                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
8609                         DP(NETIF_MSG_HW,
8610                            "%s is 0x%x\n", prty_tbl[i].name, val);
8611                         goto test_mem_exit;
8612                 }
8613         }
8614
8615         rc = 0;
8616
8617 test_mem_exit:
8618         return rc;
8619 }
8620
8621 static void bnx2x_netif_start(struct bnx2x *bp)
8622 {
8623         int i;
8624
8625         if (atomic_dec_and_test(&bp->intr_sem)) {
8626                 if (netif_running(bp->dev)) {
8627                         bnx2x_int_enable(bp);
8628                         for_each_queue(bp, i)
8629                                 napi_enable(&bnx2x_fp(bp, i, napi));
8630                         if (bp->state == BNX2X_STATE_OPEN)
8631                                 netif_wake_queue(bp->dev);
8632                 }
8633         }
8634 }
8635
8636 static void bnx2x_netif_stop(struct bnx2x *bp)
8637 {
8638         int i;
8639
8640         if (netif_running(bp->dev)) {
8641                 netif_tx_disable(bp->dev);
8642                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
8643                 for_each_queue(bp, i)
8644                         napi_disable(&bnx2x_fp(bp, i, napi));
8645         }
8646         bnx2x_int_disable_sync(bp);
8647 }
8648
8649 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8650 {
8651         int cnt = 1000;
8652
8653         if (link_up)
8654                 while (bnx2x_link_test(bp) && cnt--)
8655                         msleep(10);
8656 }
8657
8658 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8659 {
8660         unsigned int pkt_size, num_pkts, i;
8661         struct sk_buff *skb;
8662         unsigned char *packet;
8663         struct bnx2x_fastpath *fp = &bp->fp[0];
8664         u16 tx_start_idx, tx_idx;
8665         u16 rx_start_idx, rx_idx;
8666         u16 pkt_prod;
8667         struct sw_tx_bd *tx_buf;
8668         struct eth_tx_bd *tx_bd;
8669         dma_addr_t mapping;
8670         union eth_rx_cqe *cqe;
8671         u8 cqe_fp_flags;
8672         struct sw_rx_bd *rx_buf;
8673         u16 len;
8674         int rc = -ENODEV;
8675
8676         if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8677                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8678                 bnx2x_acquire_phy_lock(bp);
8679                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8680                 bnx2x_release_phy_lock(bp);
8681
8682         } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8683                 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8684                 bnx2x_acquire_phy_lock(bp);
8685                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8686                 bnx2x_release_phy_lock(bp);
8687                 /* wait until link state is restored */
8688                 bnx2x_wait_for_link(bp, link_up);
8689
8690         } else
8691                 return -EINVAL;
8692
8693         pkt_size = 1514;
8694         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8695         if (!skb) {
8696                 rc = -ENOMEM;
8697                 goto test_loopback_exit;
8698         }
8699         packet = skb_put(skb, pkt_size);
8700         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8701         memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8702         for (i = ETH_HLEN; i < pkt_size; i++)
8703                 packet[i] = (unsigned char) (i & 0xff);
8704
8705         num_pkts = 0;
8706         tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8707         rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8708
8709         pkt_prod = fp->tx_pkt_prod++;
8710         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8711         tx_buf->first_bd = fp->tx_bd_prod;
8712         tx_buf->skb = skb;
8713
8714         tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8715         mapping = pci_map_single(bp->pdev, skb->data,
8716                                  skb_headlen(skb), PCI_DMA_TODEVICE);
8717         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8718         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8719         tx_bd->nbd = cpu_to_le16(1);
8720         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8721         tx_bd->vlan = cpu_to_le16(pkt_prod);
8722         tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8723                                        ETH_TX_BD_FLAGS_END_BD);
8724         tx_bd->general_data = ((UNICAST_ADDRESS <<
8725                                 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8726
8727         fp->hw_tx_prods->bds_prod =
8728                 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8729         mb(); /* FW restriction: must not reorder writing nbd and packets */
8730         fp->hw_tx_prods->packets_prod =
8731                 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8732         DOORBELL(bp, FP_IDX(fp), 0);
8733
8734         mmiowb();
8735
8736         num_pkts++;
8737         fp->tx_bd_prod++;
8738         bp->dev->trans_start = jiffies;
8739
8740         udelay(100);
8741
8742         tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8743         if (tx_idx != tx_start_idx + num_pkts)
8744                 goto test_loopback_exit;
8745
8746         rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8747         if (rx_idx != rx_start_idx + num_pkts)
8748                 goto test_loopback_exit;
8749
8750         cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8751         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8752         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8753                 goto test_loopback_rx_exit;
8754
8755         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8756         if (len != pkt_size)
8757                 goto test_loopback_rx_exit;
8758
8759         rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8760         skb = rx_buf->skb;
8761         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8762         for (i = ETH_HLEN; i < pkt_size; i++)
8763                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8764                         goto test_loopback_rx_exit;
8765
8766         rc = 0;
8767
8768 test_loopback_rx_exit:
8769         bp->dev->last_rx = jiffies;
8770
8771         fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8772         fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8773         fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8774         fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8775
8776         /* Update producers */
8777         bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8778                              fp->rx_sge_prod);
8779         mmiowb(); /* keep prod updates ordered */
8780
8781 test_loopback_exit:
8782         bp->link_params.loopback_mode = LOOPBACK_NONE;
8783
8784         return rc;
8785 }
8786
8787 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8788 {
8789         int rc = 0;
8790
8791         if (!netif_running(bp->dev))
8792                 return BNX2X_LOOPBACK_FAILED;
8793
8794         bnx2x_netif_stop(bp);
8795
8796         if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8797                 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8798                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8799         }
8800
8801         if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8802                 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8803                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8804         }
8805
8806         bnx2x_netif_start(bp);
8807
8808         return rc;
8809 }
8810
8811 #define CRC32_RESIDUAL                  0xdebb20e3
8812
8813 static int bnx2x_test_nvram(struct bnx2x *bp)
8814 {
8815         static const struct {
8816                 int offset;
8817                 int size;
8818         } nvram_tbl[] = {
8819                 {     0,  0x14 }, /* bootstrap */
8820                 {  0x14,  0xec }, /* dir */
8821                 { 0x100, 0x350 }, /* manuf_info */
8822                 { 0x450,  0xf0 }, /* feature_info */
8823                 { 0x640,  0x64 }, /* upgrade_key_info */
8824                 { 0x6a4,  0x64 },
8825                 { 0x708,  0x70 }, /* manuf_key_info */
8826                 { 0x778,  0x70 },
8827                 {     0,     0 }
8828         };
8829         u32 buf[0x350 / 4];
8830         u8 *data = (u8 *)buf;
8831         int i, rc;
8832         u32 magic, csum;
8833
8834         rc = bnx2x_nvram_read(bp, 0, data, 4);
8835         if (rc) {
8836                 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8837                 goto test_nvram_exit;
8838         }
8839
8840         magic = be32_to_cpu(buf[0]);
8841         if (magic != 0x669955aa) {
8842                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8843                 rc = -ENODEV;
8844                 goto test_nvram_exit;
8845         }
8846
8847         for (i = 0; nvram_tbl[i].size; i++) {
8848
8849                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8850                                       nvram_tbl[i].size);
8851                 if (rc) {
8852                         DP(NETIF_MSG_PROBE,
8853                            "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8854                         goto test_nvram_exit;
8855                 }
8856
8857                 csum = ether_crc_le(nvram_tbl[i].size, data);
8858                 if (csum != CRC32_RESIDUAL) {
8859                         DP(NETIF_MSG_PROBE,
8860                            "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8861                         rc = -ENODEV;
8862                         goto test_nvram_exit;
8863                 }
8864         }
8865
8866 test_nvram_exit:
8867         return rc;
8868 }
8869
8870 static int bnx2x_test_intr(struct bnx2x *bp)
8871 {
8872         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8873         int i, rc;
8874
8875         if (!netif_running(bp->dev))
8876                 return -ENODEV;
8877
8878         config->hdr.length_6b = 0;
8879         config->hdr.offset = 0;
8880         config->hdr.client_id = BP_CL_ID(bp);
8881         config->hdr.reserved1 = 0;
8882
8883         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8884                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8885                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8886         if (rc == 0) {
8887                 bp->set_mac_pending++;
8888                 for (i = 0; i < 10; i++) {
8889                         if (!bp->set_mac_pending)
8890                                 break;
8891                         msleep_interruptible(10);
8892                 }
8893                 if (i == 10)
8894                         rc = -ENODEV;
8895         }
8896
8897         return rc;
8898 }
8899
8900 static void bnx2x_self_test(struct net_device *dev,
8901                             struct ethtool_test *etest, u64 *buf)
8902 {
8903         struct bnx2x *bp = netdev_priv(dev);
8904
8905         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8906
8907         if (!netif_running(dev))
8908                 return;
8909
8910         /* offline tests are not supported in MF mode */
8911         if (IS_E1HMF(bp))
8912                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8913
8914         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8915                 u8 link_up;
8916
8917                 link_up = bp->link_vars.link_up;
8918                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8919                 bnx2x_nic_load(bp, LOAD_DIAG);
8920                 /* wait until link state is restored */
8921                 bnx2x_wait_for_link(bp, link_up);
8922
8923                 if (bnx2x_test_registers(bp) != 0) {
8924                         buf[0] = 1;
8925                         etest->flags |= ETH_TEST_FL_FAILED;
8926                 }
8927                 if (bnx2x_test_memory(bp) != 0) {
8928                         buf[1] = 1;
8929                         etest->flags |= ETH_TEST_FL_FAILED;
8930                 }
8931                 buf[2] = bnx2x_test_loopback(bp, link_up);
8932                 if (buf[2] != 0)
8933                         etest->flags |= ETH_TEST_FL_FAILED;
8934
8935                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8936                 bnx2x_nic_load(bp, LOAD_NORMAL);
8937                 /* wait until link state is restored */
8938                 bnx2x_wait_for_link(bp, link_up);
8939         }
8940         if (bnx2x_test_nvram(bp) != 0) {
8941                 buf[3] = 1;
8942                 etest->flags |= ETH_TEST_FL_FAILED;
8943         }
8944         if (bnx2x_test_intr(bp) != 0) {
8945                 buf[4] = 1;
8946                 etest->flags |= ETH_TEST_FL_FAILED;
8947         }
8948         if (bp->port.pmf)
8949                 if (bnx2x_link_test(bp) != 0) {
8950                         buf[5] = 1;
8951                         etest->flags |= ETH_TEST_FL_FAILED;
8952                 }
8953         buf[7] = bnx2x_mc_assert(bp);
8954         if (buf[7] != 0)
8955                 etest->flags |= ETH_TEST_FL_FAILED;
8956
8957 #ifdef BNX2X_EXTRA_DEBUG
8958         bnx2x_panic_dump(bp);
8959 #endif
8960 }
8961
8962 static const struct {
8963         long offset;
8964         int size;
8965         u32 flags;
8966 #define STATS_FLAGS_PORT                1
8967 #define STATS_FLAGS_FUNC                2
8968         u8 string[ETH_GSTRING_LEN];
8969 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
8970 /* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
8971                                 8, STATS_FLAGS_FUNC, "rx_bytes" },
8972         { STATS_OFFSET32(error_bytes_received_hi),
8973                                 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
8974         { STATS_OFFSET32(total_bytes_transmitted_hi),
8975                                 8, STATS_FLAGS_FUNC, "tx_bytes" },
8976         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8977                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
8978         { STATS_OFFSET32(total_unicast_packets_received_hi),
8979                                 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
8980         { STATS_OFFSET32(total_multicast_packets_received_hi),
8981                                 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
8982         { STATS_OFFSET32(total_broadcast_packets_received_hi),
8983                                 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
8984         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
8985                                 8, STATS_FLAGS_FUNC, "tx_packets" },
8986         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
8987                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
8988 /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
8989                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
8990         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
8991                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
8992         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
8993                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
8994         { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
8995                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
8996         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
8997                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
8998         { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
8999                                 8, STATS_FLAGS_PORT, "tx_deferred" },
9000         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9001                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9002         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9003                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9004         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9005                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9006         { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9007                                 8, STATS_FLAGS_PORT, "rx_fragments" },
9008 /* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9009                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
9010         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9011                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9012         { STATS_OFFSET32(jabber_packets_received),
9013                                 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
9014         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9015                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9016         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9017                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9018         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9019                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9020         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9021                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9022         { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9023                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9024         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9025                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9026         { STATS_OFFSET32(etherstatspktsover1522octets_hi),
9027                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9028 /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
9029                                 8, STATS_FLAGS_PORT, "rx_xon_frames" },
9030         { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
9031                                 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9032         { STATS_OFFSET32(tx_stat_outxonsent_hi),
9033                                 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9034         { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9035                                 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
9036         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9037                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9038         { STATS_OFFSET32(mac_filter_discard),
9039                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9040         { STATS_OFFSET32(no_buff_discard),
9041                                 4, STATS_FLAGS_FUNC, "rx_discards" },
9042         { STATS_OFFSET32(xxoverflow_discard),
9043                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9044         { STATS_OFFSET32(brb_drop_hi),
9045                                 8, STATS_FLAGS_PORT, "brb_discard" },
9046         { STATS_OFFSET32(brb_truncate_hi),
9047                                 8, STATS_FLAGS_PORT, "brb_truncate" },
9048 /* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9049                                 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9050         { STATS_OFFSET32(rx_skb_alloc_failed),
9051                                 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9052 /* 42 */{ STATS_OFFSET32(hw_csum_err),
9053                                 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
9054 };
9055
9056 #define IS_NOT_E1HMF_STAT(bp, i) \
9057                 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9058
9059 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9060 {
9061         struct bnx2x *bp = netdev_priv(dev);
9062         int i, j;
9063
9064         switch (stringset) {
9065         case ETH_SS_STATS:
9066                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9067                         if (IS_NOT_E1HMF_STAT(bp, i))
9068                                 continue;
9069                         strcpy(buf + j*ETH_GSTRING_LEN,
9070                                bnx2x_stats_arr[i].string);
9071                         j++;
9072                 }
9073                 break;
9074
9075         case ETH_SS_TEST:
9076                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9077                 break;
9078         }
9079 }
9080
9081 static int bnx2x_get_stats_count(struct net_device *dev)
9082 {
9083         struct bnx2x *bp = netdev_priv(dev);
9084         int i, num_stats = 0;
9085
9086         for (i = 0; i < BNX2X_NUM_STATS; i++) {
9087                 if (IS_NOT_E1HMF_STAT(bp, i))
9088                         continue;
9089                 num_stats++;
9090         }
9091         return num_stats;
9092 }
9093
9094 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9095                                     struct ethtool_stats *stats, u64 *buf)
9096 {
9097         struct bnx2x *bp = netdev_priv(dev);
9098         u32 *hw_stats = (u32 *)&bp->eth_stats;
9099         int i, j;
9100
9101         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9102                 if (IS_NOT_E1HMF_STAT(bp, i))
9103                         continue;
9104
9105                 if (bnx2x_stats_arr[i].size == 0) {
9106                         /* skip this counter */
9107                         buf[j] = 0;
9108                         j++;
9109                         continue;
9110                 }
9111                 if (bnx2x_stats_arr[i].size == 4) {
9112                         /* 4-byte counter */
9113                         buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9114                         j++;
9115                         continue;
9116                 }
9117                 /* 8-byte counter */
9118                 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9119                                   *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9120                 j++;
9121         }
9122 }
9123
9124 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9125 {
9126         struct bnx2x *bp = netdev_priv(dev);
9127         int port = BP_PORT(bp);
9128         int i;
9129
9130         if (!netif_running(dev))
9131                 return 0;
9132
9133         if (!bp->port.pmf)
9134                 return 0;
9135
9136         if (data == 0)
9137                 data = 2;
9138
9139         for (i = 0; i < (data * 2); i++) {
9140                 if ((i % 2) == 0)
9141                         bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9142                                       bp->link_params.hw_led_mode,
9143                                       bp->link_params.chip_id);
9144                 else
9145                         bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9146                                       bp->link_params.hw_led_mode,
9147                                       bp->link_params.chip_id);
9148
9149                 msleep_interruptible(500);
9150                 if (signal_pending(current))
9151                         break;
9152         }
9153
9154         if (bp->link_vars.link_up)
9155                 bnx2x_set_led(bp, port, LED_MODE_OPER,
9156                               bp->link_vars.line_speed,
9157                               bp->link_params.hw_led_mode,
9158                               bp->link_params.chip_id);
9159
9160         return 0;
9161 }
9162
9163 static struct ethtool_ops bnx2x_ethtool_ops = {
9164         .get_settings           = bnx2x_get_settings,
9165         .set_settings           = bnx2x_set_settings,
9166         .get_drvinfo            = bnx2x_get_drvinfo,
9167         .get_wol                = bnx2x_get_wol,
9168         .set_wol                = bnx2x_set_wol,
9169         .get_msglevel           = bnx2x_get_msglevel,
9170         .set_msglevel           = bnx2x_set_msglevel,
9171         .nway_reset             = bnx2x_nway_reset,
9172         .get_link               = ethtool_op_get_link,
9173         .get_eeprom_len         = bnx2x_get_eeprom_len,
9174         .get_eeprom             = bnx2x_get_eeprom,
9175         .set_eeprom             = bnx2x_set_eeprom,
9176         .get_coalesce           = bnx2x_get_coalesce,
9177         .set_coalesce           = bnx2x_set_coalesce,
9178         .get_ringparam          = bnx2x_get_ringparam,
9179         .set_ringparam          = bnx2x_set_ringparam,
9180         .get_pauseparam         = bnx2x_get_pauseparam,
9181         .set_pauseparam         = bnx2x_set_pauseparam,
9182         .get_rx_csum            = bnx2x_get_rx_csum,
9183         .set_rx_csum            = bnx2x_set_rx_csum,
9184         .get_tx_csum            = ethtool_op_get_tx_csum,
9185         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
9186         .set_flags              = bnx2x_set_flags,
9187         .get_flags              = ethtool_op_get_flags,
9188         .get_sg                 = ethtool_op_get_sg,
9189         .set_sg                 = ethtool_op_set_sg,
9190         .get_tso                = ethtool_op_get_tso,
9191         .set_tso                = bnx2x_set_tso,
9192         .self_test_count        = bnx2x_self_test_count,
9193         .self_test              = bnx2x_self_test,
9194         .get_strings            = bnx2x_get_strings,
9195         .phys_id                = bnx2x_phys_id,
9196         .get_stats_count        = bnx2x_get_stats_count,
9197         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
9198 };
9199
9200 /* end of ethtool_ops */
9201
9202 /****************************************************************************
9203 * General service functions
9204 ****************************************************************************/
9205
9206 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9207 {
9208         u16 pmcsr;
9209
9210         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9211
9212         switch (state) {
9213         case PCI_D0:
9214                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9215                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9216                                        PCI_PM_CTRL_PME_STATUS));
9217
9218                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9219                         /* delay required during transition out of D3hot */
9220                         msleep(20);
9221                 break;
9222
9223         case PCI_D3hot:
9224                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9225                 pmcsr |= 3;
9226
9227                 if (bp->wol)
9228                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9229
9230                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9231                                       pmcsr);
9232
9233                 /* No more memory access after this point until
9234                 * device is brought back to D0.
9235                 */
9236                 break;
9237
9238         default:
9239                 return -EINVAL;
9240         }
9241         return 0;
9242 }
9243
9244 /*
9245  * net_device service functions
9246  */
9247
9248 static int bnx2x_poll(struct napi_struct *napi, int budget)
9249 {
9250         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9251                                                  napi);
9252         struct bnx2x *bp = fp->bp;
9253         int work_done = 0;
9254
9255 #ifdef BNX2X_STOP_ON_ERROR
9256         if (unlikely(bp->panic))
9257                 goto poll_panic;
9258 #endif
9259
9260         prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9261         prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9262         prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9263
9264         bnx2x_update_fpsb_idx(fp);
9265
9266         if (BNX2X_HAS_TX_WORK(fp))
9267                 bnx2x_tx_int(fp, budget);
9268
9269         if (BNX2X_HAS_RX_WORK(fp))
9270                 work_done = bnx2x_rx_int(fp, budget);
9271
9272         rmb(); /* BNX2X_HAS_WORK() reads the status block */
9273
9274         /* must not complete if we consumed full budget */
9275         if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9276
9277 #ifdef BNX2X_STOP_ON_ERROR
9278 poll_panic:
9279 #endif
9280                 netif_rx_complete(bp->dev, napi);
9281
9282                 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9283                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9284                 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
9285                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9286         }
9287         return work_done;
9288 }
9289
9290
9291 /* we split the first BD into headers and data BDs
9292  * to ease the pain of our fellow microcode engineers
9293  * we use one mapping for both BDs
9294  * So far this has only been observed to happen
9295  * in Other Operating Systems(TM)
9296  */
9297 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9298                                    struct bnx2x_fastpath *fp,
9299                                    struct eth_tx_bd **tx_bd, u16 hlen,
9300                                    u16 bd_prod, int nbd)
9301 {
9302         struct eth_tx_bd *h_tx_bd = *tx_bd;
9303         struct eth_tx_bd *d_tx_bd;
9304         dma_addr_t mapping;
9305         int old_len = le16_to_cpu(h_tx_bd->nbytes);
9306
9307         /* first fix first BD */
9308         h_tx_bd->nbd = cpu_to_le16(nbd);
9309         h_tx_bd->nbytes = cpu_to_le16(hlen);
9310
9311         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9312            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9313            h_tx_bd->addr_lo, h_tx_bd->nbd);
9314
9315         /* now get a new data BD
9316          * (after the pbd) and fill it */
9317         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9318         d_tx_bd = &fp->tx_desc_ring[bd_prod];
9319
9320         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9321                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9322
9323         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9324         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9325         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9326         d_tx_bd->vlan = 0;
9327         /* this marks the BD as one that has no individual mapping
9328          * the FW ignores this flag in a BD not marked start
9329          */
9330         d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9331         DP(NETIF_MSG_TX_QUEUED,
9332            "TSO split data size is %d (%x:%x)\n",
9333            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9334
9335         /* update tx_bd for marking the last BD flag */
9336         *tx_bd = d_tx_bd;
9337
9338         return bd_prod;
9339 }
9340
9341 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9342 {
9343         if (fix > 0)
9344                 csum = (u16) ~csum_fold(csum_sub(csum,
9345                                 csum_partial(t_header - fix, fix, 0)));
9346
9347         else if (fix < 0)
9348                 csum = (u16) ~csum_fold(csum_add(csum,
9349                                 csum_partial(t_header, -fix, 0)));
9350
9351         return swab16(csum);
9352 }
9353
9354 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9355 {
9356         u32 rc;
9357
9358         if (skb->ip_summed != CHECKSUM_PARTIAL)
9359                 rc = XMIT_PLAIN;
9360
9361         else {
9362                 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9363                         rc = XMIT_CSUM_V6;
9364                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9365                                 rc |= XMIT_CSUM_TCP;
9366
9367                 } else {
9368                         rc = XMIT_CSUM_V4;
9369                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9370                                 rc |= XMIT_CSUM_TCP;
9371                 }
9372         }
9373
9374         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9375                 rc |= XMIT_GSO_V4;
9376
9377         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9378                 rc |= XMIT_GSO_V6;
9379
9380         return rc;
9381 }
9382
9383 /* check if packet requires linearization (packet is too fragmented) */
9384 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9385                              u32 xmit_type)
9386 {
9387         int to_copy = 0;
9388         int hlen = 0;
9389         int first_bd_sz = 0;
9390
9391         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9392         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9393
9394                 if (xmit_type & XMIT_GSO) {
9395                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9396                         /* Check if LSO packet needs to be copied:
9397                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9398                         int wnd_size = MAX_FETCH_BD - 3;
9399                         /* Number of windows to check */
9400                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9401                         int wnd_idx = 0;
9402                         int frag_idx = 0;
9403                         u32 wnd_sum = 0;
9404
9405                         /* Headers length */
9406                         hlen = (int)(skb_transport_header(skb) - skb->data) +
9407                                 tcp_hdrlen(skb);
9408
9409                         /* Amount of data (w/o headers) on linear part of SKB*/
9410                         first_bd_sz = skb_headlen(skb) - hlen;
9411
9412                         wnd_sum  = first_bd_sz;
9413
9414                         /* Calculate the first sum - it's special */
9415                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9416                                 wnd_sum +=
9417                                         skb_shinfo(skb)->frags[frag_idx].size;
9418
9419                         /* If there was data on linear skb data - check it */
9420                         if (first_bd_sz > 0) {
9421                                 if (unlikely(wnd_sum < lso_mss)) {
9422                                         to_copy = 1;
9423                                         goto exit_lbl;
9424                                 }
9425
9426                                 wnd_sum -= first_bd_sz;
9427                         }
9428
9429                         /* Others are easier: run through the frag list and
9430                            check all windows */
9431                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9432                                 wnd_sum +=
9433                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9434
9435                                 if (unlikely(wnd_sum < lso_mss)) {
9436                                         to_copy = 1;
9437                                         break;
9438                                 }
9439                                 wnd_sum -=
9440                                         skb_shinfo(skb)->frags[wnd_idx].size;
9441                         }
9442
9443                 } else {
9444                         /* in non-LSO too fragmented packet should always
9445                            be linearized */
9446                         to_copy = 1;
9447                 }
9448         }
9449
9450 exit_lbl:
9451         if (unlikely(to_copy))
9452                 DP(NETIF_MSG_TX_QUEUED,
9453                    "Linearization IS REQUIRED for %s packet. "
9454                    "num_frags %d  hlen %d  first_bd_sz %d\n",
9455                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9456                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9457
9458         return to_copy;
9459 }
9460
9461 /* called with netif_tx_lock
9462  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9463  * netif_wake_queue()
9464  */
9465 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9466 {
9467         struct bnx2x *bp = netdev_priv(dev);
9468         struct bnx2x_fastpath *fp;
9469         struct sw_tx_bd *tx_buf;
9470         struct eth_tx_bd *tx_bd;
9471         struct eth_tx_parse_bd *pbd = NULL;
9472         u16 pkt_prod, bd_prod;
9473         int nbd, fp_index;
9474         dma_addr_t mapping;
9475         u32 xmit_type = bnx2x_xmit_type(bp, skb);
9476         int vlan_off = (bp->e1hov ? 4 : 0);
9477         int i;
9478         u8 hlen = 0;
9479
9480 #ifdef BNX2X_STOP_ON_ERROR
9481         if (unlikely(bp->panic))
9482                 return NETDEV_TX_BUSY;
9483 #endif
9484
9485         fp_index = (smp_processor_id() % bp->num_queues);
9486         fp = &bp->fp[fp_index];
9487
9488         if (unlikely(bnx2x_tx_avail(bp->fp) <
9489                                         (skb_shinfo(skb)->nr_frags + 3))) {
9490                 bp->eth_stats.driver_xoff++,
9491                 netif_stop_queue(dev);
9492                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9493                 return NETDEV_TX_BUSY;
9494         }
9495
9496         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
9497            "  gso type %x  xmit_type %x\n",
9498            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9499            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9500
9501         /* First, check if we need to linearize the skb
9502            (due to FW restrictions) */
9503         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9504                 /* Statistics of linearization */
9505                 bp->lin_cnt++;
9506                 if (skb_linearize(skb) != 0) {
9507                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9508                            "silently dropping this SKB\n");
9509                         dev_kfree_skb_any(skb);
9510                         return NETDEV_TX_OK;
9511                 }
9512         }
9513
9514         /*
9515         Please read carefully. First we use one BD which we mark as start,
9516         then for TSO or xsum we have a parsing info BD,
9517         and only then we have the rest of the TSO BDs.
9518         (don't forget to mark the last one as last,
9519         and to unmap only AFTER you write to the BD ...)
9520         And above all, all pdb sizes are in words - NOT DWORDS!
9521         */
9522
9523         pkt_prod = fp->tx_pkt_prod++;
9524         bd_prod = TX_BD(fp->tx_bd_prod);
9525
9526         /* get a tx_buf and first BD */
9527         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9528         tx_bd = &fp->tx_desc_ring[bd_prod];
9529
9530         tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9531         tx_bd->general_data = (UNICAST_ADDRESS <<
9532                                ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9533         /* header nbd */
9534         tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
9535
9536         /* remember the first BD of the packet */
9537         tx_buf->first_bd = fp->tx_bd_prod;
9538         tx_buf->skb = skb;
9539
9540         DP(NETIF_MSG_TX_QUEUED,
9541            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
9542            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9543
9544         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9545                 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9546                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9547                 vlan_off += 4;
9548         } else
9549                 tx_bd->vlan = cpu_to_le16(pkt_prod);
9550
9551         if (xmit_type) {
9552
9553                 /* turn on parsing and get a BD */
9554                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9555                 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9556
9557                 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9558         }
9559
9560         if (xmit_type & XMIT_CSUM) {
9561                 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
9562
9563                 /* for now NS flag is not used in Linux */
9564                 pbd->global_data = (hlen |
9565                                     ((skb->protocol == ntohs(ETH_P_8021Q)) <<
9566                                      ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9567
9568                 pbd->ip_hlen = (skb_transport_header(skb) -
9569                                 skb_network_header(skb)) / 2;
9570
9571                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
9572
9573                 pbd->total_hlen = cpu_to_le16(hlen);
9574                 hlen = hlen*2 - vlan_off;
9575
9576                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9577
9578                 if (xmit_type & XMIT_CSUM_V4)
9579                         tx_bd->bd_flags.as_bitfield |=
9580                                                 ETH_TX_BD_FLAGS_IP_CSUM;
9581                 else
9582                         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9583
9584                 if (xmit_type & XMIT_CSUM_TCP) {
9585                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9586
9587                 } else {
9588                         s8 fix = SKB_CS_OFF(skb); /* signed! */
9589
9590                         pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9591                         pbd->cs_offset = fix / 2;
9592
9593                         DP(NETIF_MSG_TX_QUEUED,
9594                            "hlen %d  offset %d  fix %d  csum before fix %x\n",
9595                            le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9596                            SKB_CS(skb));
9597
9598                         /* HW bug: fixup the CSUM */
9599                         pbd->tcp_pseudo_csum =
9600                                 bnx2x_csum_fix(skb_transport_header(skb),
9601                                                SKB_CS(skb), fix);
9602
9603                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9604                            pbd->tcp_pseudo_csum);
9605                 }
9606         }
9607
9608         mapping = pci_map_single(bp->pdev, skb->data,
9609                                  skb_headlen(skb), PCI_DMA_TODEVICE);
9610
9611         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9612         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9613         nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2);
9614         tx_bd->nbd = cpu_to_le16(nbd);
9615         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9616
9617         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
9618            "  nbytes %d  flags %x  vlan %x\n",
9619            tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9620            le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9621            le16_to_cpu(tx_bd->vlan));
9622
9623         if (xmit_type & XMIT_GSO) {
9624
9625                 DP(NETIF_MSG_TX_QUEUED,
9626                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
9627                    skb->len, hlen, skb_headlen(skb),
9628                    skb_shinfo(skb)->gso_size);
9629
9630                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9631
9632                 if (unlikely(skb_headlen(skb) > hlen))
9633                         bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9634                                                  bd_prod, ++nbd);
9635
9636                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9637                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9638                 pbd->tcp_flags = pbd_tcp_flags(skb);
9639
9640                 if (xmit_type & XMIT_GSO_V4) {
9641                         pbd->ip_id = swab16(ip_hdr(skb)->id);
9642                         pbd->tcp_pseudo_csum =
9643                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9644                                                           ip_hdr(skb)->daddr,
9645                                                           0, IPPROTO_TCP, 0));
9646
9647                 } else
9648                         pbd->tcp_pseudo_csum =
9649                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9650                                                         &ipv6_hdr(skb)->daddr,
9651                                                         0, IPPROTO_TCP, 0));
9652
9653                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9654         }
9655
9656         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9657                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9658
9659                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9660                 tx_bd = &fp->tx_desc_ring[bd_prod];
9661
9662                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9663                                        frag->size, PCI_DMA_TODEVICE);
9664
9665                 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9666                 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9667                 tx_bd->nbytes = cpu_to_le16(frag->size);
9668                 tx_bd->vlan = cpu_to_le16(pkt_prod);
9669                 tx_bd->bd_flags.as_bitfield = 0;
9670
9671                 DP(NETIF_MSG_TX_QUEUED,
9672                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d  flags %x\n",
9673                    i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9674                    le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
9675         }
9676
9677         /* now at last mark the BD as the last BD */
9678         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9679
9680         DP(NETIF_MSG_TX_QUEUED, "last bd @%p  flags %x\n",
9681            tx_bd, tx_bd->bd_flags.as_bitfield);
9682
9683         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9684
9685         /* now send a tx doorbell, counting the next BD
9686          * if the packet contains or ends with it
9687          */
9688         if (TX_BD_POFF(bd_prod) < nbd)
9689                 nbd++;
9690
9691         if (pbd)
9692                 DP(NETIF_MSG_TX_QUEUED,
9693                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
9694                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
9695                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9696                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9697                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
9698
9699         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
9700
9701         fp->hw_tx_prods->bds_prod =
9702                 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
9703         mb(); /* FW restriction: must not reorder writing nbd and packets */
9704         fp->hw_tx_prods->packets_prod =
9705                 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9706         DOORBELL(bp, FP_IDX(fp), 0);
9707
9708         mmiowb();
9709
9710         fp->tx_bd_prod += nbd;
9711         dev->trans_start = jiffies;
9712
9713         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9714                 netif_stop_queue(dev);
9715                 bp->eth_stats.driver_xoff++;
9716                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9717                         netif_wake_queue(dev);
9718         }
9719         fp->tx_pkt++;
9720
9721         return NETDEV_TX_OK;
9722 }
9723
9724 /* called with rtnl_lock */
9725 static int bnx2x_open(struct net_device *dev)
9726 {
9727         struct bnx2x *bp = netdev_priv(dev);
9728
9729         bnx2x_set_power_state(bp, PCI_D0);
9730
9731         return bnx2x_nic_load(bp, LOAD_OPEN);
9732 }
9733
9734 /* called with rtnl_lock */
9735 static int bnx2x_close(struct net_device *dev)
9736 {
9737         struct bnx2x *bp = netdev_priv(dev);
9738
9739         /* Unload the driver, release IRQs */
9740         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9741         if (atomic_read(&bp->pdev->enable_cnt) == 1)
9742                 if (!CHIP_REV_IS_SLOW(bp))
9743                         bnx2x_set_power_state(bp, PCI_D3hot);
9744
9745         return 0;
9746 }
9747
9748 /* called with netif_tx_lock from set_multicast */
9749 static void bnx2x_set_rx_mode(struct net_device *dev)
9750 {
9751         struct bnx2x *bp = netdev_priv(dev);
9752         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9753         int port = BP_PORT(bp);
9754
9755         if (bp->state != BNX2X_STATE_OPEN) {
9756                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9757                 return;
9758         }
9759
9760         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9761
9762         if (dev->flags & IFF_PROMISC)
9763                 rx_mode = BNX2X_RX_MODE_PROMISC;
9764
9765         else if ((dev->flags & IFF_ALLMULTI) ||
9766                  ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9767                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9768
9769         else { /* some multicasts */
9770                 if (CHIP_IS_E1(bp)) {
9771                         int i, old, offset;
9772                         struct dev_mc_list *mclist;
9773                         struct mac_configuration_cmd *config =
9774                                                 bnx2x_sp(bp, mcast_config);
9775
9776                         for (i = 0, mclist = dev->mc_list;
9777                              mclist && (i < dev->mc_count);
9778                              i++, mclist = mclist->next) {
9779
9780                                 config->config_table[i].
9781                                         cam_entry.msb_mac_addr =
9782                                         swab16(*(u16 *)&mclist->dmi_addr[0]);
9783                                 config->config_table[i].
9784                                         cam_entry.middle_mac_addr =
9785                                         swab16(*(u16 *)&mclist->dmi_addr[2]);
9786                                 config->config_table[i].
9787                                         cam_entry.lsb_mac_addr =
9788                                         swab16(*(u16 *)&mclist->dmi_addr[4]);
9789                                 config->config_table[i].cam_entry.flags =
9790                                                         cpu_to_le16(port);
9791                                 config->config_table[i].
9792                                         target_table_entry.flags = 0;
9793                                 config->config_table[i].
9794                                         target_table_entry.client_id = 0;
9795                                 config->config_table[i].
9796                                         target_table_entry.vlan_id = 0;
9797
9798                                 DP(NETIF_MSG_IFUP,
9799                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9800                                    config->config_table[i].
9801                                                 cam_entry.msb_mac_addr,
9802                                    config->config_table[i].
9803                                                 cam_entry.middle_mac_addr,
9804                                    config->config_table[i].
9805                                                 cam_entry.lsb_mac_addr);
9806                         }
9807                         old = config->hdr.length_6b;
9808                         if (old > i) {
9809                                 for (; i < old; i++) {
9810                                         if (CAM_IS_INVALID(config->
9811                                                            config_table[i])) {
9812                                                 i--; /* already invalidated */
9813                                                 break;
9814                                         }
9815                                         /* invalidate */
9816                                         CAM_INVALIDATE(config->
9817                                                        config_table[i]);
9818                                 }
9819                         }
9820
9821                         if (CHIP_REV_IS_SLOW(bp))
9822                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9823                         else
9824                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
9825
9826                         config->hdr.length_6b = i;
9827                         config->hdr.offset = offset;
9828                         config->hdr.client_id = BP_CL_ID(bp);
9829                         config->hdr.reserved1 = 0;
9830
9831                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9832                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9833                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9834                                       0);
9835                 } else { /* E1H */
9836                         /* Accept one or more multicasts */
9837                         struct dev_mc_list *mclist;
9838                         u32 mc_filter[MC_HASH_SIZE];
9839                         u32 crc, bit, regidx;
9840                         int i;
9841
9842                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9843
9844                         for (i = 0, mclist = dev->mc_list;
9845                              mclist && (i < dev->mc_count);
9846                              i++, mclist = mclist->next) {
9847
9848                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: "
9849                                    "%02x:%02x:%02x:%02x:%02x:%02x\n",
9850                                    mclist->dmi_addr[0], mclist->dmi_addr[1],
9851                                    mclist->dmi_addr[2], mclist->dmi_addr[3],
9852                                    mclist->dmi_addr[4], mclist->dmi_addr[5]);
9853
9854                                 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9855                                 bit = (crc >> 24) & 0xff;
9856                                 regidx = bit >> 5;
9857                                 bit &= 0x1f;
9858                                 mc_filter[regidx] |= (1 << bit);
9859                         }
9860
9861                         for (i = 0; i < MC_HASH_SIZE; i++)
9862                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9863                                        mc_filter[i]);
9864                 }
9865         }
9866
9867         bp->rx_mode = rx_mode;
9868         bnx2x_set_storm_rx_mode(bp);
9869 }
9870
9871 /* called with rtnl_lock */
9872 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9873 {
9874         struct sockaddr *addr = p;
9875         struct bnx2x *bp = netdev_priv(dev);
9876
9877         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
9878                 return -EINVAL;
9879
9880         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9881         if (netif_running(dev)) {
9882                 if (CHIP_IS_E1(bp))
9883                         bnx2x_set_mac_addr_e1(bp, 1);
9884                 else
9885                         bnx2x_set_mac_addr_e1h(bp, 1);
9886         }
9887
9888         return 0;
9889 }
9890
9891 /* called with rtnl_lock */
9892 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9893 {
9894         struct mii_ioctl_data *data = if_mii(ifr);
9895         struct bnx2x *bp = netdev_priv(dev);
9896         int port = BP_PORT(bp);
9897         int err;
9898
9899         switch (cmd) {
9900         case SIOCGMIIPHY:
9901                 data->phy_id = bp->port.phy_addr;
9902
9903                 /* fallthrough */
9904
9905         case SIOCGMIIREG: {
9906                 u16 mii_regval;
9907
9908                 if (!netif_running(dev))
9909                         return -EAGAIN;
9910
9911                 mutex_lock(&bp->port.phy_mutex);
9912                 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
9913                                       DEFAULT_PHY_DEV_ADDR,
9914                                       (data->reg_num & 0x1f), &mii_regval);
9915                 data->val_out = mii_regval;
9916                 mutex_unlock(&bp->port.phy_mutex);
9917                 return err;
9918         }
9919
9920         case SIOCSMIIREG:
9921                 if (!capable(CAP_NET_ADMIN))
9922                         return -EPERM;
9923
9924                 if (!netif_running(dev))
9925                         return -EAGAIN;
9926
9927                 mutex_lock(&bp->port.phy_mutex);
9928                 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
9929                                        DEFAULT_PHY_DEV_ADDR,
9930                                        (data->reg_num & 0x1f), data->val_in);
9931                 mutex_unlock(&bp->port.phy_mutex);
9932                 return err;
9933
9934         default:
9935                 /* do nothing */
9936                 break;
9937         }
9938
9939         return -EOPNOTSUPP;
9940 }
9941
9942 /* called with rtnl_lock */
9943 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9944 {
9945         struct bnx2x *bp = netdev_priv(dev);
9946         int rc = 0;
9947
9948         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9949             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9950                 return -EINVAL;
9951
9952         /* This does not race with packet allocation
9953          * because the actual alloc size is
9954          * only updated as part of load
9955          */
9956         dev->mtu = new_mtu;
9957
9958         if (netif_running(dev)) {
9959                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9960                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9961         }
9962
9963         return rc;
9964 }
9965
9966 static void bnx2x_tx_timeout(struct net_device *dev)
9967 {
9968         struct bnx2x *bp = netdev_priv(dev);
9969
9970 #ifdef BNX2X_STOP_ON_ERROR
9971         if (!bp->panic)
9972                 bnx2x_panic();
9973 #endif
9974         /* This allows the netif to be shutdown gracefully before resetting */
9975         schedule_work(&bp->reset_task);
9976 }
9977
9978 #ifdef BCM_VLAN
9979 /* called with rtnl_lock */
9980 static void bnx2x_vlan_rx_register(struct net_device *dev,
9981                                    struct vlan_group *vlgrp)
9982 {
9983         struct bnx2x *bp = netdev_priv(dev);
9984
9985         bp->vlgrp = vlgrp;
9986         if (netif_running(dev))
9987                 bnx2x_set_client_config(bp);
9988 }
9989
9990 #endif
9991
9992 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9993 static void poll_bnx2x(struct net_device *dev)
9994 {
9995         struct bnx2x *bp = netdev_priv(dev);
9996
9997         disable_irq(bp->pdev->irq);
9998         bnx2x_interrupt(bp->pdev->irq, dev);
9999         enable_irq(bp->pdev->irq);
10000 }
10001 #endif
10002
10003 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10004                                     struct net_device *dev)
10005 {
10006         struct bnx2x *bp;
10007         int rc;
10008
10009         SET_NETDEV_DEV(dev, &pdev->dev);
10010         bp = netdev_priv(dev);
10011
10012         bp->dev = dev;
10013         bp->pdev = pdev;
10014         bp->flags = 0;
10015         bp->func = PCI_FUNC(pdev->devfn);
10016
10017         rc = pci_enable_device(pdev);
10018         if (rc) {
10019                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10020                 goto err_out;
10021         }
10022
10023         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10024                 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10025                        " aborting\n");
10026                 rc = -ENODEV;
10027                 goto err_out_disable;
10028         }
10029
10030         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10031                 printk(KERN_ERR PFX "Cannot find second PCI device"
10032                        " base address, aborting\n");
10033                 rc = -ENODEV;
10034                 goto err_out_disable;
10035         }
10036
10037         if (atomic_read(&pdev->enable_cnt) == 1) {
10038                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10039                 if (rc) {
10040                         printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10041                                " aborting\n");
10042                         goto err_out_disable;
10043                 }
10044
10045                 pci_set_master(pdev);
10046                 pci_save_state(pdev);
10047         }
10048
10049         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10050         if (bp->pm_cap == 0) {
10051                 printk(KERN_ERR PFX "Cannot find power management"
10052                        " capability, aborting\n");
10053                 rc = -EIO;
10054                 goto err_out_release;
10055         }
10056
10057         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10058         if (bp->pcie_cap == 0) {
10059                 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10060                        " aborting\n");
10061                 rc = -EIO;
10062                 goto err_out_release;
10063         }
10064
10065         if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10066                 bp->flags |= USING_DAC_FLAG;
10067                 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10068                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10069                                " failed, aborting\n");
10070                         rc = -EIO;
10071                         goto err_out_release;
10072                 }
10073
10074         } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10075                 printk(KERN_ERR PFX "System does not support DMA,"
10076                        " aborting\n");
10077                 rc = -EIO;
10078                 goto err_out_release;
10079         }
10080
10081         dev->mem_start = pci_resource_start(pdev, 0);
10082         dev->base_addr = dev->mem_start;
10083         dev->mem_end = pci_resource_end(pdev, 0);
10084
10085         dev->irq = pdev->irq;
10086
10087         bp->regview = ioremap_nocache(dev->base_addr,
10088                                       pci_resource_len(pdev, 0));
10089         if (!bp->regview) {
10090                 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10091                 rc = -ENOMEM;
10092                 goto err_out_release;
10093         }
10094
10095         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10096                                         min_t(u64, BNX2X_DB_SIZE,
10097                                               pci_resource_len(pdev, 2)));
10098         if (!bp->doorbells) {
10099                 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10100                 rc = -ENOMEM;
10101                 goto err_out_unmap;
10102         }
10103
10104         bnx2x_set_power_state(bp, PCI_D0);
10105
10106         /* clean indirect addresses */
10107         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10108                                PCICFG_VENDOR_ID_OFFSET);
10109         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10110         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10111         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10112         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10113
10114         dev->hard_start_xmit = bnx2x_start_xmit;
10115         dev->watchdog_timeo = TX_TIMEOUT;
10116
10117         dev->ethtool_ops = &bnx2x_ethtool_ops;
10118         dev->open = bnx2x_open;
10119         dev->stop = bnx2x_close;
10120         dev->set_multicast_list = bnx2x_set_rx_mode;
10121         dev->set_mac_address = bnx2x_change_mac_addr;
10122         dev->do_ioctl = bnx2x_ioctl;
10123         dev->change_mtu = bnx2x_change_mtu;
10124         dev->tx_timeout = bnx2x_tx_timeout;
10125 #ifdef BCM_VLAN
10126         dev->vlan_rx_register = bnx2x_vlan_rx_register;
10127 #endif
10128 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10129         dev->poll_controller = poll_bnx2x;
10130 #endif
10131         dev->features |= NETIF_F_SG;
10132         dev->features |= NETIF_F_HW_CSUM;
10133         if (bp->flags & USING_DAC_FLAG)
10134                 dev->features |= NETIF_F_HIGHDMA;
10135 #ifdef BCM_VLAN
10136         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10137 #endif
10138         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10139         dev->features |= NETIF_F_TSO6;
10140
10141         return 0;
10142
10143 err_out_unmap:
10144         if (bp->regview) {
10145                 iounmap(bp->regview);
10146                 bp->regview = NULL;
10147         }
10148         if (bp->doorbells) {
10149                 iounmap(bp->doorbells);
10150                 bp->doorbells = NULL;
10151         }
10152
10153 err_out_release:
10154         if (atomic_read(&pdev->enable_cnt) == 1)
10155                 pci_release_regions(pdev);
10156
10157 err_out_disable:
10158         pci_disable_device(pdev);
10159         pci_set_drvdata(pdev, NULL);
10160
10161 err_out:
10162         return rc;
10163 }
10164
10165 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10166 {
10167         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10168
10169         val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10170         return val;
10171 }
10172
10173 /* return value of 1=2.5GHz 2=5GHz */
10174 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10175 {
10176         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10177
10178         val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10179         return val;
10180 }
10181
10182 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10183                                     const struct pci_device_id *ent)
10184 {
10185         static int version_printed;
10186         struct net_device *dev = NULL;
10187         struct bnx2x *bp;
10188         int rc;
10189         DECLARE_MAC_BUF(mac);
10190
10191         if (version_printed++ == 0)
10192                 printk(KERN_INFO "%s", version);
10193
10194         /* dev zeroed in init_etherdev */
10195         dev = alloc_etherdev(sizeof(*bp));
10196         if (!dev) {
10197                 printk(KERN_ERR PFX "Cannot allocate net device\n");
10198                 return -ENOMEM;
10199         }
10200
10201         netif_carrier_off(dev);
10202
10203         bp = netdev_priv(dev);
10204         bp->msglevel = debug;
10205
10206         rc = bnx2x_init_dev(pdev, dev);
10207         if (rc < 0) {
10208                 free_netdev(dev);
10209                 return rc;
10210         }
10211
10212         rc = register_netdev(dev);
10213         if (rc) {
10214                 dev_err(&pdev->dev, "Cannot register net device\n");
10215                 goto init_one_exit;
10216         }
10217
10218         pci_set_drvdata(pdev, dev);
10219
10220         rc = bnx2x_init_bp(bp);
10221         if (rc) {
10222                 unregister_netdev(dev);
10223                 goto init_one_exit;
10224         }
10225
10226         bp->common.name = board_info[ent->driver_data].name;
10227         printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10228                " IRQ %d, ", dev->name, bp->common.name,
10229                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10230                bnx2x_get_pcie_width(bp),
10231                (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10232                dev->base_addr, bp->pdev->irq);
10233         printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
10234         return 0;
10235
10236 init_one_exit:
10237         if (bp->regview)
10238                 iounmap(bp->regview);
10239
10240         if (bp->doorbells)
10241                 iounmap(bp->doorbells);
10242
10243         free_netdev(dev);
10244
10245         if (atomic_read(&pdev->enable_cnt) == 1)
10246                 pci_release_regions(pdev);
10247
10248         pci_disable_device(pdev);
10249         pci_set_drvdata(pdev, NULL);
10250
10251         return rc;
10252 }
10253
10254 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10255 {
10256         struct net_device *dev = pci_get_drvdata(pdev);
10257         struct bnx2x *bp;
10258
10259         if (!dev) {
10260                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10261                 return;
10262         }
10263         bp = netdev_priv(dev);
10264
10265         unregister_netdev(dev);
10266
10267         if (bp->regview)
10268                 iounmap(bp->regview);
10269
10270         if (bp->doorbells)
10271                 iounmap(bp->doorbells);
10272
10273         free_netdev(dev);
10274
10275         if (atomic_read(&pdev->enable_cnt) == 1)
10276                 pci_release_regions(pdev);
10277
10278         pci_disable_device(pdev);
10279         pci_set_drvdata(pdev, NULL);
10280 }
10281
10282 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10283 {
10284         struct net_device *dev = pci_get_drvdata(pdev);
10285         struct bnx2x *bp;
10286
10287         if (!dev) {
10288                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10289                 return -ENODEV;
10290         }
10291         bp = netdev_priv(dev);
10292
10293         rtnl_lock();
10294
10295         pci_save_state(pdev);
10296
10297         if (!netif_running(dev)) {
10298                 rtnl_unlock();
10299                 return 0;
10300         }
10301
10302         netif_device_detach(dev);
10303
10304         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10305
10306         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10307
10308         rtnl_unlock();
10309
10310         return 0;
10311 }
10312
10313 static int bnx2x_resume(struct pci_dev *pdev)
10314 {
10315         struct net_device *dev = pci_get_drvdata(pdev);
10316         struct bnx2x *bp;
10317         int rc;
10318
10319         if (!dev) {
10320                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10321                 return -ENODEV;
10322         }
10323         bp = netdev_priv(dev);
10324
10325         rtnl_lock();
10326
10327         pci_restore_state(pdev);
10328
10329         if (!netif_running(dev)) {
10330                 rtnl_unlock();
10331                 return 0;
10332         }
10333
10334         bnx2x_set_power_state(bp, PCI_D0);
10335         netif_device_attach(dev);
10336
10337         rc = bnx2x_nic_load(bp, LOAD_OPEN);
10338
10339         rtnl_unlock();
10340
10341         return rc;
10342 }
10343
10344 /**
10345  * bnx2x_io_error_detected - called when PCI error is detected
10346  * @pdev: Pointer to PCI device
10347  * @state: The current pci connection state
10348  *
10349  * This function is called after a PCI bus error affecting
10350  * this device has been detected.
10351  */
10352 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10353                                                 pci_channel_state_t state)
10354 {
10355         struct net_device *dev = pci_get_drvdata(pdev);
10356         struct bnx2x *bp = netdev_priv(dev);
10357
10358         rtnl_lock();
10359
10360         netif_device_detach(dev);
10361
10362         if (netif_running(dev))
10363                 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10364
10365         pci_disable_device(pdev);
10366
10367         rtnl_unlock();
10368
10369         /* Request a slot reset */
10370         return PCI_ERS_RESULT_NEED_RESET;
10371 }
10372
10373 /**
10374  * bnx2x_io_slot_reset - called after the PCI bus has been reset
10375  * @pdev: Pointer to PCI device
10376  *
10377  * Restart the card from scratch, as if from a cold-boot.
10378  */
10379 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10380 {
10381         struct net_device *dev = pci_get_drvdata(pdev);
10382         struct bnx2x *bp = netdev_priv(dev);
10383
10384         rtnl_lock();
10385
10386         if (pci_enable_device(pdev)) {
10387                 dev_err(&pdev->dev,
10388                         "Cannot re-enable PCI device after reset\n");
10389                 rtnl_unlock();
10390                 return PCI_ERS_RESULT_DISCONNECT;
10391         }
10392
10393         pci_set_master(pdev);
10394         pci_restore_state(pdev);
10395
10396         if (netif_running(dev))
10397                 bnx2x_set_power_state(bp, PCI_D0);
10398
10399         rtnl_unlock();
10400
10401         return PCI_ERS_RESULT_RECOVERED;
10402 }
10403
10404 /**
10405  * bnx2x_io_resume - called when traffic can start flowing again
10406  * @pdev: Pointer to PCI device
10407  *
10408  * This callback is called when the error recovery driver tells us that
10409  * its OK to resume normal operation.
10410  */
10411 static void bnx2x_io_resume(struct pci_dev *pdev)
10412 {
10413         struct net_device *dev = pci_get_drvdata(pdev);
10414         struct bnx2x *bp = netdev_priv(dev);
10415
10416         rtnl_lock();
10417
10418         if (netif_running(dev))
10419                 bnx2x_nic_load(bp, LOAD_OPEN);
10420
10421         netif_device_attach(dev);
10422
10423         rtnl_unlock();
10424 }
10425
10426 static struct pci_error_handlers bnx2x_err_handler = {
10427         .error_detected = bnx2x_io_error_detected,
10428         .slot_reset = bnx2x_io_slot_reset,
10429         .resume = bnx2x_io_resume,
10430 };
10431
10432 static struct pci_driver bnx2x_pci_driver = {
10433         .name        = DRV_MODULE_NAME,
10434         .id_table    = bnx2x_pci_tbl,
10435         .probe       = bnx2x_init_one,
10436         .remove      = __devexit_p(bnx2x_remove_one),
10437         .suspend     = bnx2x_suspend,
10438         .resume      = bnx2x_resume,
10439         .err_handler = &bnx2x_err_handler,
10440 };
10441
10442 static int __init bnx2x_init(void)
10443 {
10444         return pci_register_driver(&bnx2x_pci_driver);
10445 }
10446
10447 static void __exit bnx2x_cleanup(void)
10448 {
10449         pci_unregister_driver(&bnx2x_pci_driver);
10450 }
10451
10452 module_init(bnx2x_init);
10453 module_exit(bnx2x_cleanup);
10454