bnx2x: Not dropping packets with L3/L4 checksum error
[safe/jmp/linux-2.6] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2008 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #ifdef NETIF_F_HW_VLAN_TX
42         #include <linux/if_vlan.h>
43 #endif
44 #include <net/ip.h>
45 #include <net/tcp.h>
46 #include <net/checksum.h>
47 #include <linux/version.h>
48 #include <net/ip6_checksum.h>
49 #include <linux/workqueue.h>
50 #include <linux/crc32.h>
51 #include <linux/crc32c.h>
52 #include <linux/prefetch.h>
53 #include <linux/zlib.h>
54 #include <linux/io.h>
55
56 #include "bnx2x_reg.h"
57 #include "bnx2x_fw_defs.h"
58 #include "bnx2x_hsi.h"
59 #include "bnx2x_link.h"
60 #include "bnx2x.h"
61 #include "bnx2x_init.h"
62
63 #define DRV_MODULE_VERSION      "1.45.6"
64 #define DRV_MODULE_RELDATE      "2008/06/23"
65 #define BNX2X_BC_VER            0x040200
66
67 /* Time in jiffies before concluding the transmitter is hung */
68 #define TX_TIMEOUT              (5*HZ)
69
70 static char version[] __devinitdata =
71         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
72         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
73
74 MODULE_AUTHOR("Eliezer Tamir");
75 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
76 MODULE_LICENSE("GPL");
77 MODULE_VERSION(DRV_MODULE_VERSION);
78
79 static int disable_tpa;
80 static int use_inta;
81 static int poll;
82 static int debug;
83 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
84 static int use_multi;
85
86 module_param(disable_tpa, int, 0);
87 module_param(use_inta, int, 0);
88 module_param(poll, int, 0);
89 module_param(debug, int, 0);
90 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
91 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
92 MODULE_PARM_DESC(poll, "use polling (for debug)");
93 MODULE_PARM_DESC(debug, "default debug msglevel");
94
95 #ifdef BNX2X_MULTI
96 module_param(use_multi, int, 0);
97 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
98 #endif
99
100 enum bnx2x_board_type {
101         BCM57710 = 0,
102         BCM57711 = 1,
103         BCM57711E = 2,
104 };
105
106 /* indexed by board_type, above */
107 static struct {
108         char *name;
109 } board_info[] __devinitdata = {
110         { "Broadcom NetXtreme II BCM57710 XGb" },
111         { "Broadcom NetXtreme II BCM57711 XGb" },
112         { "Broadcom NetXtreme II BCM57711E XGb" }
113 };
114
115
116 static const struct pci_device_id bnx2x_pci_tbl[] = {
117         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
119         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
123         { 0 }
124 };
125
126 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
127
128 /****************************************************************************
129 * General service functions
130 ****************************************************************************/
131
132 /* used only at init
133  * locking is done by mcp
134  */
135 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
136 {
137         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140                                PCICFG_VENDOR_ID_OFFSET);
141 }
142
143 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
144 {
145         u32 val;
146
147         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150                                PCICFG_VENDOR_ID_OFFSET);
151
152         return val;
153 }
154
155 static const u32 dmae_reg_go_c[] = {
156         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160 };
161
162 /* copy command into DMAE command memory and set DMAE command go */
163 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
164                             int idx)
165 {
166         u32 cmd_offset;
167         int i;
168
169         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
172
173                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
175         }
176         REG_WR(bp, dmae_reg_go_c[idx], 1);
177 }
178
179 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
180                       u32 len32)
181 {
182         struct dmae_command *dmae = &bp->init_dmae;
183         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
184         int cnt = 200;
185
186         if (!bp->dmae_ready) {
187                 u32 *data = bnx2x_sp(bp, wb_data[0]);
188
189                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
190                    "  using indirect\n", dst_addr, len32);
191                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
192                 return;
193         }
194
195         mutex_lock(&bp->dmae_mutex);
196
197         memset(dmae, 0, sizeof(struct dmae_command));
198
199         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
202 #ifdef __BIG_ENDIAN
203                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
204 #else
205                         DMAE_CMD_ENDIANITY_DW_SWAP |
206 #endif
207                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
209         dmae->src_addr_lo = U64_LO(dma_addr);
210         dmae->src_addr_hi = U64_HI(dma_addr);
211         dmae->dst_addr_lo = dst_addr >> 2;
212         dmae->dst_addr_hi = 0;
213         dmae->len = len32;
214         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
216         dmae->comp_val = DMAE_COMP_VAL;
217
218         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
219            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
220                     "dst_addr [%x:%08x (%08x)]\n"
221            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
222            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
225         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
226            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
228
229         *wb_comp = 0;
230
231         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
232
233         udelay(5);
234
235         while (*wb_comp != DMAE_COMP_VAL) {
236                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237
238                 /* adjust delay for emulation/FPGA */
239                 if (CHIP_REV_IS_SLOW(bp))
240                         msleep(100);
241                 else
242                         udelay(5);
243
244                 if (!cnt) {
245                         BNX2X_ERR("dmae timeout!\n");
246                         break;
247                 }
248                 cnt--;
249         }
250
251         mutex_unlock(&bp->dmae_mutex);
252 }
253
254 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
255 {
256         struct dmae_command *dmae = &bp->init_dmae;
257         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
258         int cnt = 200;
259
260         if (!bp->dmae_ready) {
261                 u32 *data = bnx2x_sp(bp, wb_data[0]);
262                 int i;
263
264                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
265                    "  using indirect\n", src_addr, len32);
266                 for (i = 0; i < len32; i++)
267                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
268                 return;
269         }
270
271         mutex_lock(&bp->dmae_mutex);
272
273         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
274         memset(dmae, 0, sizeof(struct dmae_command));
275
276         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
277                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
278                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
279 #ifdef __BIG_ENDIAN
280                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
281 #else
282                         DMAE_CMD_ENDIANITY_DW_SWAP |
283 #endif
284                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
285                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
286         dmae->src_addr_lo = src_addr >> 2;
287         dmae->src_addr_hi = 0;
288         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
289         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
290         dmae->len = len32;
291         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
292         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
293         dmae->comp_val = DMAE_COMP_VAL;
294
295         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
296            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
297                     "dst_addr [%x:%08x (%08x)]\n"
298            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
299            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
300            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
301            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
302
303         *wb_comp = 0;
304
305         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
306
307         udelay(5);
308
309         while (*wb_comp != DMAE_COMP_VAL) {
310
311                 /* adjust delay for emulation/FPGA */
312                 if (CHIP_REV_IS_SLOW(bp))
313                         msleep(100);
314                 else
315                         udelay(5);
316
317                 if (!cnt) {
318                         BNX2X_ERR("dmae timeout!\n");
319                         break;
320                 }
321                 cnt--;
322         }
323         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
324            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
325            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
326
327         mutex_unlock(&bp->dmae_mutex);
328 }
329
330 /* used only for slowpath so not inlined */
331 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
332 {
333         u32 wb_write[2];
334
335         wb_write[0] = val_hi;
336         wb_write[1] = val_lo;
337         REG_WR_DMAE(bp, reg, wb_write, 2);
338 }
339
340 #ifdef USE_WB_RD
341 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
342 {
343         u32 wb_data[2];
344
345         REG_RD_DMAE(bp, reg, wb_data, 2);
346
347         return HILO_U64(wb_data[0], wb_data[1]);
348 }
349 #endif
350
351 static int bnx2x_mc_assert(struct bnx2x *bp)
352 {
353         char last_idx;
354         int i, rc = 0;
355         u32 row0, row1, row2, row3;
356
357         /* XSTORM */
358         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
359                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
360         if (last_idx)
361                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
362
363         /* print the asserts */
364         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
365
366                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367                               XSTORM_ASSERT_LIST_OFFSET(i));
368                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
370                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
372                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
373                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
374
375                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
376                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
377                                   " 0x%08x 0x%08x 0x%08x\n",
378                                   i, row3, row2, row1, row0);
379                         rc++;
380                 } else {
381                         break;
382                 }
383         }
384
385         /* TSTORM */
386         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
387                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
388         if (last_idx)
389                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
390
391         /* print the asserts */
392         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
393
394                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395                               TSTORM_ASSERT_LIST_OFFSET(i));
396                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
398                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
400                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
401                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
402
403                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
404                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
405                                   " 0x%08x 0x%08x 0x%08x\n",
406                                   i, row3, row2, row1, row0);
407                         rc++;
408                 } else {
409                         break;
410                 }
411         }
412
413         /* CSTORM */
414         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
415                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
416         if (last_idx)
417                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
418
419         /* print the asserts */
420         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
421
422                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423                               CSTORM_ASSERT_LIST_OFFSET(i));
424                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
426                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
428                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
429                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
430
431                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
432                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
433                                   " 0x%08x 0x%08x 0x%08x\n",
434                                   i, row3, row2, row1, row0);
435                         rc++;
436                 } else {
437                         break;
438                 }
439         }
440
441         /* USTORM */
442         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
443                            USTORM_ASSERT_LIST_INDEX_OFFSET);
444         if (last_idx)
445                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
446
447         /* print the asserts */
448         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
449
450                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
451                               USTORM_ASSERT_LIST_OFFSET(i));
452                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
453                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
454                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
455                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
456                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
457                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
458
459                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
460                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
461                                   " 0x%08x 0x%08x 0x%08x\n",
462                                   i, row3, row2, row1, row0);
463                         rc++;
464                 } else {
465                         break;
466                 }
467         }
468
469         return rc;
470 }
471
472 static void bnx2x_fw_dump(struct bnx2x *bp)
473 {
474         u32 mark, offset;
475         u32 data[9];
476         int word;
477
478         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
479         mark = ((mark + 0x3) & ~0x3);
480         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
481
482         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
483                 for (word = 0; word < 8; word++)
484                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
485                                                   offset + 4*word));
486                 data[8] = 0x0;
487                 printk(KERN_CONT "%s", (char *)data);
488         }
489         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
490                 for (word = 0; word < 8; word++)
491                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
492                                                   offset + 4*word));
493                 data[8] = 0x0;
494                 printk(KERN_CONT "%s", (char *)data);
495         }
496         printk("\n" KERN_ERR PFX "end of fw dump\n");
497 }
498
499 static void bnx2x_panic_dump(struct bnx2x *bp)
500 {
501         int i;
502         u16 j, start, end;
503
504         BNX2X_ERR("begin crash dump -----------------\n");
505
506         for_each_queue(bp, i) {
507                 struct bnx2x_fastpath *fp = &bp->fp[i];
508                 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
509
510                 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
511                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
512                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
513                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
514                 BNX2X_ERR("          rx_comp_prod(%x)  rx_comp_cons(%x)"
515                           "  *rx_cons_sb(%x)  *rx_bd_cons_sb(%x)"
516                           "  rx_sge_prod(%x)  last_max_sge(%x)\n",
517                           fp->rx_comp_prod, fp->rx_comp_cons,
518                           le16_to_cpu(*fp->rx_cons_sb),
519                           le16_to_cpu(*fp->rx_bd_cons_sb),
520                           fp->rx_sge_prod, fp->last_max_sge);
521                 BNX2X_ERR("          fp_c_idx(%x)  fp_u_idx(%x)"
522                           "  bd data(%x,%x)  rx_alloc_failed(%lx)\n",
523                           fp->fp_c_idx, fp->fp_u_idx, hw_prods->packets_prod,
524                           hw_prods->bds_prod, fp->rx_alloc_failed);
525
526                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
527                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
528                 for (j = start; j < end; j++) {
529                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
530
531                         BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
532                                   sw_bd->skb, sw_bd->first_bd);
533                 }
534
535                 start = TX_BD(fp->tx_bd_cons - 10);
536                 end = TX_BD(fp->tx_bd_cons + 254);
537                 for (j = start; j < end; j++) {
538                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
539
540                         BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
541                                   j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
542                 }
543
544                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
545                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
546                 for (j = start; j < end; j++) {
547                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
548                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
549
550                         BNX2X_ERR("rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
551                                   j, rx_bd[1], rx_bd[0], sw_bd->skb);
552                 }
553
554                 start = 0;
555                 end = RX_SGE_CNT*NUM_RX_SGE_PAGES;
556                 for (j = start; j < end; j++) {
557                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
558                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
559
560                         BNX2X_ERR("rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
561                                   j, rx_sge[1], rx_sge[0], sw_page->page);
562                 }
563
564                 start = RCQ_BD(fp->rx_comp_cons - 10);
565                 end = RCQ_BD(fp->rx_comp_cons + 503);
566                 for (j = start; j < end; j++) {
567                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
568
569                         BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
570                                   j, cqe[0], cqe[1], cqe[2], cqe[3]);
571                 }
572         }
573
574         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
575                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
576                   "  spq_prod_idx(%u)\n",
577                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
578                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
579
580         bnx2x_fw_dump(bp);
581         bnx2x_mc_assert(bp);
582         BNX2X_ERR("end crash dump -----------------\n");
583
584         bp->stats_state = STATS_STATE_DISABLED;
585         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
586 }
587
588 static void bnx2x_int_enable(struct bnx2x *bp)
589 {
590         int port = BP_PORT(bp);
591         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
592         u32 val = REG_RD(bp, addr);
593         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
594
595         if (msix) {
596                 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
597                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
598                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
599         } else {
600                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
601                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
602                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
603                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
604
605                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
606                    val, port, addr, msix);
607
608                 REG_WR(bp, addr, val);
609
610                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
611         }
612
613         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
614            val, port, addr, msix);
615
616         REG_WR(bp, addr, val);
617
618         if (CHIP_IS_E1H(bp)) {
619                 /* init leading/trailing edge */
620                 if (IS_E1HMF(bp)) {
621                         val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
622                         if (bp->port.pmf)
623                                 /* enable nig attention */
624                                 val |= 0x0100;
625                 } else
626                         val = 0xffff;
627
628                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
629                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
630         }
631 }
632
633 static void bnx2x_int_disable(struct bnx2x *bp)
634 {
635         int port = BP_PORT(bp);
636         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
637         u32 val = REG_RD(bp, addr);
638
639         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
640                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
641                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
642                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
643
644         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
645            val, port, addr);
646
647         REG_WR(bp, addr, val);
648         if (REG_RD(bp, addr) != val)
649                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
650 }
651
652 static void bnx2x_int_disable_sync(struct bnx2x *bp)
653 {
654         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
655         int i;
656
657         /* disable interrupt handling */
658         atomic_inc(&bp->intr_sem);
659         /* prevent the HW from sending interrupts */
660         bnx2x_int_disable(bp);
661
662         /* make sure all ISRs are done */
663         if (msix) {
664                 for_each_queue(bp, i)
665                         synchronize_irq(bp->msix_table[i].vector);
666
667                 /* one more for the Slow Path IRQ */
668                 synchronize_irq(bp->msix_table[i].vector);
669         } else
670                 synchronize_irq(bp->pdev->irq);
671
672         /* make sure sp_task is not running */
673         cancel_work_sync(&bp->sp_task);
674 }
675
676 /* fast path */
677
678 /*
679  * General service functions
680  */
681
682 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
683                                 u8 storm, u16 index, u8 op, u8 update)
684 {
685         u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
686         struct igu_ack_register igu_ack;
687
688         igu_ack.status_block_index = index;
689         igu_ack.sb_id_and_flags =
690                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
691                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
692                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
693                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
694
695         DP(BNX2X_MSG_OFF, "write 0x%08x to IGU addr 0x%x\n",
696            (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr);
697         REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack));
698 }
699
700 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
701 {
702         struct host_status_block *fpsb = fp->status_blk;
703         u16 rc = 0;
704
705         barrier(); /* status block is written to by the chip */
706         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
707                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
708                 rc |= 1;
709         }
710         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
711                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
712                 rc |= 2;
713         }
714         return rc;
715 }
716
717 static inline int bnx2x_has_work(struct bnx2x_fastpath *fp)
718 {
719         u16 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
720
721         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
722                 rx_cons_sb++;
723
724         if ((fp->rx_comp_cons != rx_cons_sb) ||
725             (fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) ||
726             (fp->tx_pkt_prod != fp->tx_pkt_cons))
727                 return 1;
728
729         return 0;
730 }
731
732 static u16 bnx2x_ack_int(struct bnx2x *bp)
733 {
734         u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
735         u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr);
736
737         DP(BNX2X_MSG_OFF, "read 0x%08x from IGU addr 0x%x\n",
738            result, BAR_IGU_INTMEM + igu_addr);
739
740 #ifdef IGU_DEBUG
741 #warning IGU_DEBUG active
742         if (result == 0) {
743                 BNX2X_ERR("read %x from IGU\n", result);
744                 REG_WR(bp, TM_REG_TIMER_SOFT_RST, 0);
745         }
746 #endif
747         return result;
748 }
749
750
751 /*
752  * fast path service functions
753  */
754
755 /* free skb in the packet ring at pos idx
756  * return idx of last bd freed
757  */
758 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
759                              u16 idx)
760 {
761         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
762         struct eth_tx_bd *tx_bd;
763         struct sk_buff *skb = tx_buf->skb;
764         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
765         int nbd;
766
767         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
768            idx, tx_buf, skb);
769
770         /* unmap first bd */
771         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
772         tx_bd = &fp->tx_desc_ring[bd_idx];
773         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
774                          BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
775
776         nbd = le16_to_cpu(tx_bd->nbd) - 1;
777         new_cons = nbd + tx_buf->first_bd;
778 #ifdef BNX2X_STOP_ON_ERROR
779         if (nbd > (MAX_SKB_FRAGS + 2)) {
780                 BNX2X_ERR("BAD nbd!\n");
781                 bnx2x_panic();
782         }
783 #endif
784
785         /* Skip a parse bd and the TSO split header bd
786            since they have no mapping */
787         if (nbd)
788                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
789
790         if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
791                                            ETH_TX_BD_FLAGS_TCP_CSUM |
792                                            ETH_TX_BD_FLAGS_SW_LSO)) {
793                 if (--nbd)
794                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
795                 tx_bd = &fp->tx_desc_ring[bd_idx];
796                 /* is this a TSO split header bd? */
797                 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
798                         if (--nbd)
799                                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
800                 }
801         }
802
803         /* now free frags */
804         while (nbd > 0) {
805
806                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
807                 tx_bd = &fp->tx_desc_ring[bd_idx];
808                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
809                                BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
810                 if (--nbd)
811                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
812         }
813
814         /* release skb */
815         WARN_ON(!skb);
816         dev_kfree_skb(skb);
817         tx_buf->first_bd = 0;
818         tx_buf->skb = NULL;
819
820         return new_cons;
821 }
822
823 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
824 {
825         s16 used;
826         u16 prod;
827         u16 cons;
828
829         barrier(); /* Tell compiler that prod and cons can change */
830         prod = fp->tx_bd_prod;
831         cons = fp->tx_bd_cons;
832
833         /* NUM_TX_RINGS = number of "next-page" entries
834            It will be used as a threshold */
835         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
836
837 #ifdef BNX2X_STOP_ON_ERROR
838         WARN_ON(used < 0);
839         WARN_ON(used > fp->bp->tx_ring_size);
840         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
841 #endif
842
843         return (s16)(fp->bp->tx_ring_size) - used;
844 }
845
846 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
847 {
848         struct bnx2x *bp = fp->bp;
849         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
850         int done = 0;
851
852 #ifdef BNX2X_STOP_ON_ERROR
853         if (unlikely(bp->panic))
854                 return;
855 #endif
856
857         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
858         sw_cons = fp->tx_pkt_cons;
859
860         while (sw_cons != hw_cons) {
861                 u16 pkt_cons;
862
863                 pkt_cons = TX_BD(sw_cons);
864
865                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
866
867                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
868                    hw_cons, sw_cons, pkt_cons);
869
870 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
871                         rmb();
872                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
873                 }
874 */
875                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
876                 sw_cons++;
877                 done++;
878
879                 if (done == work)
880                         break;
881         }
882
883         fp->tx_pkt_cons = sw_cons;
884         fp->tx_bd_cons = bd_cons;
885
886         /* Need to make the tx_cons update visible to start_xmit()
887          * before checking for netif_queue_stopped().  Without the
888          * memory barrier, there is a small possibility that start_xmit()
889          * will miss it and cause the queue to be stopped forever.
890          */
891         smp_mb();
892
893         /* TBD need a thresh? */
894         if (unlikely(netif_queue_stopped(bp->dev))) {
895
896                 netif_tx_lock(bp->dev);
897
898                 if (netif_queue_stopped(bp->dev) &&
899                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
900                         netif_wake_queue(bp->dev);
901
902                 netif_tx_unlock(bp->dev);
903         }
904 }
905
906 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
907                            union eth_rx_cqe *rr_cqe)
908 {
909         struct bnx2x *bp = fp->bp;
910         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
911         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
912
913         DP(BNX2X_MSG_SP,
914            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
915            FP_IDX(fp), cid, command, bp->state,
916            rr_cqe->ramrod_cqe.ramrod_type);
917
918         bp->spq_left++;
919
920         if (FP_IDX(fp)) {
921                 switch (command | fp->state) {
922                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
923                                                 BNX2X_FP_STATE_OPENING):
924                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
925                            cid);
926                         fp->state = BNX2X_FP_STATE_OPEN;
927                         break;
928
929                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
930                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
931                            cid);
932                         fp->state = BNX2X_FP_STATE_HALTED;
933                         break;
934
935                 default:
936                         BNX2X_ERR("unexpected MC reply (%d)  "
937                                   "fp->state is %x\n", command, fp->state);
938                         break;
939                 }
940                 mb(); /* force bnx2x_wait_ramrod() to see the change */
941                 return;
942         }
943
944         switch (command | bp->state) {
945         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
946                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
947                 bp->state = BNX2X_STATE_OPEN;
948                 break;
949
950         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
951                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
952                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
953                 fp->state = BNX2X_FP_STATE_HALTED;
954                 break;
955
956         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
957                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
958                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
959                 break;
960
961         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
962         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
963                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
964                 bp->set_mac_pending = 0;
965                 break;
966
967         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
968                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
969                 break;
970
971         default:
972                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
973                           command, bp->state);
974                 break;
975         }
976         mb(); /* force bnx2x_wait_ramrod() to see the change */
977 }
978
979 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
980                                      struct bnx2x_fastpath *fp, u16 index)
981 {
982         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
983         struct page *page = sw_buf->page;
984         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
985
986         /* Skip "next page" elements */
987         if (!page)
988                 return;
989
990         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
991                        BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
992         __free_pages(page, PAGES_PER_SGE_SHIFT);
993
994         sw_buf->page = NULL;
995         sge->addr_hi = 0;
996         sge->addr_lo = 0;
997 }
998
999 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1000                                            struct bnx2x_fastpath *fp, int last)
1001 {
1002         int i;
1003
1004         for (i = 0; i < last; i++)
1005                 bnx2x_free_rx_sge(bp, fp, i);
1006 }
1007
1008 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1009                                      struct bnx2x_fastpath *fp, u16 index)
1010 {
1011         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1012         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1013         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1014         dma_addr_t mapping;
1015
1016         if (unlikely(page == NULL))
1017                 return -ENOMEM;
1018
1019         mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
1020                                PCI_DMA_FROMDEVICE);
1021         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1022                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1023                 return -ENOMEM;
1024         }
1025
1026         sw_buf->page = page;
1027         pci_unmap_addr_set(sw_buf, mapping, mapping);
1028
1029         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1030         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1031
1032         return 0;
1033 }
1034
1035 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1036                                      struct bnx2x_fastpath *fp, u16 index)
1037 {
1038         struct sk_buff *skb;
1039         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1040         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1041         dma_addr_t mapping;
1042
1043         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1044         if (unlikely(skb == NULL))
1045                 return -ENOMEM;
1046
1047         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1048                                  PCI_DMA_FROMDEVICE);
1049         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1050                 dev_kfree_skb(skb);
1051                 return -ENOMEM;
1052         }
1053
1054         rx_buf->skb = skb;
1055         pci_unmap_addr_set(rx_buf, mapping, mapping);
1056
1057         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1058         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1059
1060         return 0;
1061 }
1062
1063 /* note that we are not allocating a new skb,
1064  * we are just moving one from cons to prod
1065  * we are not creating a new mapping,
1066  * so there is no need to check for dma_mapping_error().
1067  */
1068 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1069                                struct sk_buff *skb, u16 cons, u16 prod)
1070 {
1071         struct bnx2x *bp = fp->bp;
1072         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1073         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1074         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1075         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1076
1077         pci_dma_sync_single_for_device(bp->pdev,
1078                                        pci_unmap_addr(cons_rx_buf, mapping),
1079                                        bp->rx_offset + RX_COPY_THRESH,
1080                                        PCI_DMA_FROMDEVICE);
1081
1082         prod_rx_buf->skb = cons_rx_buf->skb;
1083         pci_unmap_addr_set(prod_rx_buf, mapping,
1084                            pci_unmap_addr(cons_rx_buf, mapping));
1085         *prod_bd = *cons_bd;
1086 }
1087
1088 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1089                                              u16 idx)
1090 {
1091         u16 last_max = fp->last_max_sge;
1092
1093         if (SUB_S16(idx, last_max) > 0)
1094                 fp->last_max_sge = idx;
1095 }
1096
1097 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1098 {
1099         int i, j;
1100
1101         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1102                 int idx = RX_SGE_CNT * i - 1;
1103
1104                 for (j = 0; j < 2; j++) {
1105                         SGE_MASK_CLEAR_BIT(fp, idx);
1106                         idx--;
1107                 }
1108         }
1109 }
1110
1111 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1112                                   struct eth_fast_path_rx_cqe *fp_cqe)
1113 {
1114         struct bnx2x *bp = fp->bp;
1115         u16 sge_len = BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1116                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1117                       BCM_PAGE_SHIFT;
1118         u16 last_max, last_elem, first_elem;
1119         u16 delta = 0;
1120         u16 i;
1121
1122         if (!sge_len)
1123                 return;
1124
1125         /* First mark all used pages */
1126         for (i = 0; i < sge_len; i++)
1127                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1128
1129         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1130            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1131
1132         /* Here we assume that the last SGE index is the biggest */
1133         prefetch((void *)(fp->sge_mask));
1134         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1135
1136         last_max = RX_SGE(fp->last_max_sge);
1137         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1138         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1139
1140         /* If ring is not full */
1141         if (last_elem + 1 != first_elem)
1142                 last_elem++;
1143
1144         /* Now update the prod */
1145         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1146                 if (likely(fp->sge_mask[i]))
1147                         break;
1148
1149                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1150                 delta += RX_SGE_MASK_ELEM_SZ;
1151         }
1152
1153         if (delta > 0) {
1154                 fp->rx_sge_prod += delta;
1155                 /* clear page-end entries */
1156                 bnx2x_clear_sge_mask_next_elems(fp);
1157         }
1158
1159         DP(NETIF_MSG_RX_STATUS,
1160            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1161            fp->last_max_sge, fp->rx_sge_prod);
1162 }
1163
1164 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1165 {
1166         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1167         memset(fp->sge_mask, 0xff,
1168                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1169
1170         /* Clear the two last indeces in the page to 1:
1171            these are the indeces that correspond to the "next" element,
1172            hence will never be indicated and should be removed from
1173            the calculations. */
1174         bnx2x_clear_sge_mask_next_elems(fp);
1175 }
1176
1177 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1178                             struct sk_buff *skb, u16 cons, u16 prod)
1179 {
1180         struct bnx2x *bp = fp->bp;
1181         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1182         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1183         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1184         dma_addr_t mapping;
1185
1186         /* move empty skb from pool to prod and map it */
1187         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1188         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1189                                  bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1190         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1191
1192         /* move partial skb from cons to pool (don't unmap yet) */
1193         fp->tpa_pool[queue] = *cons_rx_buf;
1194
1195         /* mark bin state as start - print error if current state != stop */
1196         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1197                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1198
1199         fp->tpa_state[queue] = BNX2X_TPA_START;
1200
1201         /* point prod_bd to new skb */
1202         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1203         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1204
1205 #ifdef BNX2X_STOP_ON_ERROR
1206         fp->tpa_queue_used |= (1 << queue);
1207 #ifdef __powerpc64__
1208         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1209 #else
1210         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1211 #endif
1212            fp->tpa_queue_used);
1213 #endif
1214 }
1215
1216 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1217                                struct sk_buff *skb,
1218                                struct eth_fast_path_rx_cqe *fp_cqe,
1219                                u16 cqe_idx)
1220 {
1221         struct sw_rx_page *rx_pg, old_rx_pg;
1222         struct page *sge;
1223         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1224         u32 i, frag_len, frag_size, pages;
1225         int err;
1226         int j;
1227
1228         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1229         pages = BCM_PAGE_ALIGN(frag_size) >> BCM_PAGE_SHIFT;
1230
1231         /* This is needed in order to enable forwarding support */
1232         if (frag_size)
1233                 skb_shinfo(skb)->gso_size = min((u32)BCM_PAGE_SIZE,
1234                                                max(frag_size, (u32)len_on_bd));
1235
1236 #ifdef BNX2X_STOP_ON_ERROR
1237         if (pages > 8*PAGES_PER_SGE) {
1238                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1239                           pages, cqe_idx);
1240                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1241                           fp_cqe->pkt_len, len_on_bd);
1242                 bnx2x_panic();
1243                 return -EINVAL;
1244         }
1245 #endif
1246
1247         /* Run through the SGL and compose the fragmented skb */
1248         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1249                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1250
1251                 /* FW gives the indices of the SGE as if the ring is an array
1252                    (meaning that "next" element will consume 2 indices) */
1253                 frag_len = min(frag_size, (u32)(BCM_PAGE_SIZE*PAGES_PER_SGE));
1254                 rx_pg = &fp->rx_page_ring[sge_idx];
1255                 sge = rx_pg->page;
1256                 old_rx_pg = *rx_pg;
1257
1258                 /* If we fail to allocate a substitute page, we simply stop
1259                    where we are and drop the whole packet */
1260                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1261                 if (unlikely(err)) {
1262                         fp->rx_alloc_failed++;
1263                         return err;
1264                 }
1265
1266                 /* Unmap the page as we r going to pass it to the stack */
1267                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1268                               BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1269
1270                 /* Add one frag and update the appropriate fields in the skb */
1271                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1272
1273                 skb->data_len += frag_len;
1274                 skb->truesize += frag_len;
1275                 skb->len += frag_len;
1276
1277                 frag_size -= frag_len;
1278         }
1279
1280         return 0;
1281 }
1282
1283 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1284                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1285                            u16 cqe_idx)
1286 {
1287         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1288         struct sk_buff *skb = rx_buf->skb;
1289         /* alloc new skb */
1290         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1291
1292         /* Unmap skb in the pool anyway, as we are going to change
1293            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1294            fails. */
1295         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1296                          bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1297
1298         /* if alloc failed drop the packet and keep the buffer in the bin */
1299         if (likely(new_skb)) {
1300
1301                 prefetch(skb);
1302                 prefetch(((char *)(skb)) + 128);
1303
1304                 /* else fix ip xsum and give it to the stack */
1305                 /* (no need to map the new skb) */
1306 #ifdef BNX2X_STOP_ON_ERROR
1307                 if (pad + len > bp->rx_buf_size) {
1308                         BNX2X_ERR("skb_put is about to fail...  "
1309                                   "pad %d  len %d  rx_buf_size %d\n",
1310                                   pad, len, bp->rx_buf_size);
1311                         bnx2x_panic();
1312                         return;
1313                 }
1314 #endif
1315
1316                 skb_reserve(skb, pad);
1317                 skb_put(skb, len);
1318
1319                 skb->protocol = eth_type_trans(skb, bp->dev);
1320                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1321
1322                 {
1323                         struct iphdr *iph;
1324
1325                         iph = (struct iphdr *)skb->data;
1326                         iph->check = 0;
1327                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1328                 }
1329
1330                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1331                                          &cqe->fast_path_cqe, cqe_idx)) {
1332 #ifdef BCM_VLAN
1333                         if ((bp->vlgrp != NULL) &&
1334                             (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1335                              PARSING_FLAGS_VLAN))
1336                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1337                                                 le16_to_cpu(cqe->fast_path_cqe.
1338                                                             vlan_tag));
1339                         else
1340 #endif
1341                                 netif_receive_skb(skb);
1342                 } else {
1343                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1344                            " - dropping packet!\n");
1345                         dev_kfree_skb(skb);
1346                 }
1347
1348                 bp->dev->last_rx = jiffies;
1349
1350                 /* put new skb in bin */
1351                 fp->tpa_pool[queue].skb = new_skb;
1352
1353         } else {
1354                 DP(NETIF_MSG_RX_STATUS,
1355                    "Failed to allocate new skb - dropping packet!\n");
1356                 fp->rx_alloc_failed++;
1357         }
1358
1359         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1360 }
1361
1362 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1363                                         struct bnx2x_fastpath *fp,
1364                                         u16 bd_prod, u16 rx_comp_prod,
1365                                         u16 rx_sge_prod)
1366 {
1367         struct tstorm_eth_rx_producers rx_prods = {0};
1368         int i;
1369
1370         /* Update producers */
1371         rx_prods.bd_prod = bd_prod;
1372         rx_prods.cqe_prod = rx_comp_prod;
1373         rx_prods.sge_prod = rx_sge_prod;
1374
1375         for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1376                 REG_WR(bp, BAR_TSTRORM_INTMEM +
1377                        TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1378                        ((u32 *)&rx_prods)[i]);
1379
1380         DP(NETIF_MSG_RX_STATUS,
1381            "Wrote: bd_prod %u  cqe_prod %u  sge_prod %u\n",
1382            bd_prod, rx_comp_prod, rx_sge_prod);
1383 }
1384
1385 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1386 {
1387         struct bnx2x *bp = fp->bp;
1388         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1389         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1390         int rx_pkt = 0;
1391         u16 queue;
1392
1393 #ifdef BNX2X_STOP_ON_ERROR
1394         if (unlikely(bp->panic))
1395                 return 0;
1396 #endif
1397
1398         /* CQ "next element" is of the size of the regular element,
1399            that's why it's ok here */
1400         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1401         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1402                 hw_comp_cons++;
1403
1404         bd_cons = fp->rx_bd_cons;
1405         bd_prod = fp->rx_bd_prod;
1406         bd_prod_fw = bd_prod;
1407         sw_comp_cons = fp->rx_comp_cons;
1408         sw_comp_prod = fp->rx_comp_prod;
1409
1410         /* Memory barrier necessary as speculative reads of the rx
1411          * buffer can be ahead of the index in the status block
1412          */
1413         rmb();
1414
1415         DP(NETIF_MSG_RX_STATUS,
1416            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1417            FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1418
1419         while (sw_comp_cons != hw_comp_cons) {
1420                 struct sw_rx_bd *rx_buf = NULL;
1421                 struct sk_buff *skb;
1422                 union eth_rx_cqe *cqe;
1423                 u8 cqe_fp_flags;
1424                 u16 len, pad;
1425
1426                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1427                 bd_prod = RX_BD(bd_prod);
1428                 bd_cons = RX_BD(bd_cons);
1429
1430                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1431                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1432
1433                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1434                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1435                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1436                    cqe->fast_path_cqe.rss_hash_result,
1437                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1438                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1439
1440                 /* is this a slowpath msg? */
1441                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1442                         bnx2x_sp_event(fp, cqe);
1443                         goto next_cqe;
1444
1445                 /* this is an rx packet */
1446                 } else {
1447                         rx_buf = &fp->rx_buf_ring[bd_cons];
1448                         skb = rx_buf->skb;
1449                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1450                         pad = cqe->fast_path_cqe.placement_offset;
1451
1452                         /* If CQE is marked both TPA_START and TPA_END
1453                            it is a non-TPA CQE */
1454                         if ((!fp->disable_tpa) &&
1455                             (TPA_TYPE(cqe_fp_flags) !=
1456                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1457                                 queue = cqe->fast_path_cqe.queue_index;
1458
1459                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1460                                         DP(NETIF_MSG_RX_STATUS,
1461                                            "calling tpa_start on queue %d\n",
1462                                            queue);
1463
1464                                         bnx2x_tpa_start(fp, queue, skb,
1465                                                         bd_cons, bd_prod);
1466                                         goto next_rx;
1467                                 }
1468
1469                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1470                                         DP(NETIF_MSG_RX_STATUS,
1471                                            "calling tpa_stop on queue %d\n",
1472                                            queue);
1473
1474                                         if (!BNX2X_RX_SUM_FIX(cqe))
1475                                                 BNX2X_ERR("STOP on none TCP "
1476                                                           "data\n");
1477
1478                                         /* This is a size of the linear data
1479                                            on this skb */
1480                                         len = le16_to_cpu(cqe->fast_path_cqe.
1481                                                                 len_on_bd);
1482                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1483                                                     len, cqe, comp_ring_cons);
1484 #ifdef BNX2X_STOP_ON_ERROR
1485                                         if (bp->panic)
1486                                                 return -EINVAL;
1487 #endif
1488
1489                                         bnx2x_update_sge_prod(fp,
1490                                                         &cqe->fast_path_cqe);
1491                                         goto next_cqe;
1492                                 }
1493                         }
1494
1495                         pci_dma_sync_single_for_device(bp->pdev,
1496                                         pci_unmap_addr(rx_buf, mapping),
1497                                                        pad + RX_COPY_THRESH,
1498                                                        PCI_DMA_FROMDEVICE);
1499                         prefetch(skb);
1500                         prefetch(((char *)(skb)) + 128);
1501
1502                         /* is this an error packet? */
1503                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1504                                 DP(NETIF_MSG_RX_ERR,
1505                                    "ERROR  flags %x  rx packet %u\n",
1506                                    cqe_fp_flags, sw_comp_cons);
1507                                 /* TBD make sure MC counts this as a drop */
1508                                 goto reuse_rx;
1509                         }
1510
1511                         /* Since we don't have a jumbo ring
1512                          * copy small packets if mtu > 1500
1513                          */
1514                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1515                             (len <= RX_COPY_THRESH)) {
1516                                 struct sk_buff *new_skb;
1517
1518                                 new_skb = netdev_alloc_skb(bp->dev,
1519                                                            len + pad);
1520                                 if (new_skb == NULL) {
1521                                         DP(NETIF_MSG_RX_ERR,
1522                                            "ERROR  packet dropped "
1523                                            "because of alloc failure\n");
1524                                         fp->rx_alloc_failed++;
1525                                         goto reuse_rx;
1526                                 }
1527
1528                                 /* aligned copy */
1529                                 skb_copy_from_linear_data_offset(skb, pad,
1530                                                     new_skb->data + pad, len);
1531                                 skb_reserve(new_skb, pad);
1532                                 skb_put(new_skb, len);
1533
1534                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1535
1536                                 skb = new_skb;
1537
1538                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1539                                 pci_unmap_single(bp->pdev,
1540                                         pci_unmap_addr(rx_buf, mapping),
1541                                                  bp->rx_buf_use_size,
1542                                                  PCI_DMA_FROMDEVICE);
1543                                 skb_reserve(skb, pad);
1544                                 skb_put(skb, len);
1545
1546                         } else {
1547                                 DP(NETIF_MSG_RX_ERR,
1548                                    "ERROR  packet dropped because "
1549                                    "of alloc failure\n");
1550                                 fp->rx_alloc_failed++;
1551 reuse_rx:
1552                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1553                                 goto next_rx;
1554                         }
1555
1556                         skb->protocol = eth_type_trans(skb, bp->dev);
1557
1558                         skb->ip_summed = CHECKSUM_NONE;
1559                         if (bp->rx_csum)
1560                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1561                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1562
1563                 }
1564
1565 #ifdef BCM_VLAN
1566                 if ((bp->vlgrp != NULL) &&
1567                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1568                      PARSING_FLAGS_VLAN))
1569                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1570                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1571                 else
1572 #endif
1573                         netif_receive_skb(skb);
1574
1575                 bp->dev->last_rx = jiffies;
1576
1577 next_rx:
1578                 rx_buf->skb = NULL;
1579
1580                 bd_cons = NEXT_RX_IDX(bd_cons);
1581                 bd_prod = NEXT_RX_IDX(bd_prod);
1582                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1583                 rx_pkt++;
1584 next_cqe:
1585                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1586                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1587
1588                 if (rx_pkt == budget)
1589                         break;
1590         } /* while */
1591
1592         fp->rx_bd_cons = bd_cons;
1593         fp->rx_bd_prod = bd_prod_fw;
1594         fp->rx_comp_cons = sw_comp_cons;
1595         fp->rx_comp_prod = sw_comp_prod;
1596
1597         /* Update producers */
1598         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1599                              fp->rx_sge_prod);
1600         mmiowb(); /* keep prod updates ordered */
1601
1602         fp->rx_pkt += rx_pkt;
1603         fp->rx_calls++;
1604
1605         return rx_pkt;
1606 }
1607
1608 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1609 {
1610         struct bnx2x_fastpath *fp = fp_cookie;
1611         struct bnx2x *bp = fp->bp;
1612         struct net_device *dev = bp->dev;
1613         int index = FP_IDX(fp);
1614
1615         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1616            index, FP_SB_ID(fp));
1617         bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1618
1619 #ifdef BNX2X_STOP_ON_ERROR
1620         if (unlikely(bp->panic))
1621                 return IRQ_HANDLED;
1622 #endif
1623
1624         prefetch(fp->rx_cons_sb);
1625         prefetch(fp->tx_cons_sb);
1626         prefetch(&fp->status_blk->c_status_block.status_block_index);
1627         prefetch(&fp->status_blk->u_status_block.status_block_index);
1628
1629         netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
1630
1631         return IRQ_HANDLED;
1632 }
1633
1634 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1635 {
1636         struct net_device *dev = dev_instance;
1637         struct bnx2x *bp = netdev_priv(dev);
1638         u16 status = bnx2x_ack_int(bp);
1639         u16 mask;
1640
1641         /* Return here if interrupt is shared and it's not for us */
1642         if (unlikely(status == 0)) {
1643                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1644                 return IRQ_NONE;
1645         }
1646         DP(NETIF_MSG_INTR, "got an interrupt  status %u\n", status);
1647
1648 #ifdef BNX2X_STOP_ON_ERROR
1649         if (unlikely(bp->panic))
1650                 return IRQ_HANDLED;
1651 #endif
1652
1653         /* Return here if interrupt is disabled */
1654         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1655                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1656                 return IRQ_HANDLED;
1657         }
1658
1659         mask = 0x2 << bp->fp[0].sb_id;
1660         if (status & mask) {
1661                 struct bnx2x_fastpath *fp = &bp->fp[0];
1662
1663                 prefetch(fp->rx_cons_sb);
1664                 prefetch(fp->tx_cons_sb);
1665                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1666                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1667
1668                 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1669
1670                 status &= ~mask;
1671         }
1672
1673
1674         if (unlikely(status & 0x1)) {
1675                 schedule_work(&bp->sp_task);
1676
1677                 status &= ~0x1;
1678                 if (!status)
1679                         return IRQ_HANDLED;
1680         }
1681
1682         if (status)
1683                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1684                    status);
1685
1686         return IRQ_HANDLED;
1687 }
1688
1689 /* end of fast path */
1690
1691 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1692
1693 /* Link */
1694
1695 /*
1696  * General service functions
1697  */
1698
1699 static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
1700 {
1701         u32 lock_status;
1702         u32 resource_bit = (1 << resource);
1703         u8 port = BP_PORT(bp);
1704         int cnt;
1705
1706         /* Validating that the resource is within range */
1707         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1708                 DP(NETIF_MSG_HW,
1709                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1710                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1711                 return -EINVAL;
1712         }
1713
1714         /* Validating that the resource is not already taken */
1715         lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1716         if (lock_status & resource_bit) {
1717                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1718                    lock_status, resource_bit);
1719                 return -EEXIST;
1720         }
1721
1722         /* Try for 1 second every 5ms */
1723         for (cnt = 0; cnt < 200; cnt++) {
1724                 /* Try to acquire the lock */
1725                 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8 + 4,
1726                        resource_bit);
1727                 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1728                 if (lock_status & resource_bit)
1729                         return 0;
1730
1731                 msleep(5);
1732         }
1733         DP(NETIF_MSG_HW, "Timeout\n");
1734         return -EAGAIN;
1735 }
1736
1737 static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource)
1738 {
1739         u32 lock_status;
1740         u32 resource_bit = (1 << resource);
1741         u8 port = BP_PORT(bp);
1742
1743         /* Validating that the resource is within range */
1744         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1745                 DP(NETIF_MSG_HW,
1746                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1747                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1748                 return -EINVAL;
1749         }
1750
1751         /* Validating that the resource is currently taken */
1752         lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1753         if (!(lock_status & resource_bit)) {
1754                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1755                    lock_status, resource_bit);
1756                 return -EFAULT;
1757         }
1758
1759         REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8, resource_bit);
1760         return 0;
1761 }
1762
1763 /* HW Lock for shared dual port PHYs */
1764 static void bnx2x_phy_hw_lock(struct bnx2x *bp)
1765 {
1766         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1767
1768         mutex_lock(&bp->port.phy_mutex);
1769
1770         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1771             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1772                 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1773 }
1774
1775 static void bnx2x_phy_hw_unlock(struct bnx2x *bp)
1776 {
1777         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1778
1779         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1780             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1781                 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1782
1783         mutex_unlock(&bp->port.phy_mutex);
1784 }
1785
1786 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1787 {
1788         /* The GPIO should be swapped if swap register is set and active */
1789         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1790                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ BP_PORT(bp);
1791         int gpio_shift = gpio_num +
1792                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1793         u32 gpio_mask = (1 << gpio_shift);
1794         u32 gpio_reg;
1795
1796         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1797                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1798                 return -EINVAL;
1799         }
1800
1801         bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1802         /* read GPIO and mask except the float bits */
1803         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1804
1805         switch (mode) {
1806         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1807                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1808                    gpio_num, gpio_shift);
1809                 /* clear FLOAT and set CLR */
1810                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1811                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1812                 break;
1813
1814         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1815                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1816                    gpio_num, gpio_shift);
1817                 /* clear FLOAT and set SET */
1818                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1819                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1820                 break;
1821
1822         case MISC_REGISTERS_GPIO_INPUT_HI_Z :
1823                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1824                    gpio_num, gpio_shift);
1825                 /* set FLOAT */
1826                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1827                 break;
1828
1829         default:
1830                 break;
1831         }
1832
1833         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1834         bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_GPIO);
1835
1836         return 0;
1837 }
1838
1839 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1840 {
1841         u32 spio_mask = (1 << spio_num);
1842         u32 spio_reg;
1843
1844         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1845             (spio_num > MISC_REGISTERS_SPIO_7)) {
1846                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1847                 return -EINVAL;
1848         }
1849
1850         bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1851         /* read SPIO and mask except the float bits */
1852         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1853
1854         switch (mode) {
1855         case MISC_REGISTERS_SPIO_OUTPUT_LOW :
1856                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1857                 /* clear FLOAT and set CLR */
1858                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1859                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1860                 break;
1861
1862         case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
1863                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1864                 /* clear FLOAT and set SET */
1865                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1866                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1867                 break;
1868
1869         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1870                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1871                 /* set FLOAT */
1872                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1873                 break;
1874
1875         default:
1876                 break;
1877         }
1878
1879         REG_WR(bp, MISC_REG_SPIO, spio_reg);
1880         bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_SPIO);
1881
1882         return 0;
1883 }
1884
1885 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1886 {
1887         switch (bp->link_vars.ieee_fc) {
1888         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1889                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1890                                           ADVERTISED_Pause);
1891                 break;
1892         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1893                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1894                                          ADVERTISED_Pause);
1895                 break;
1896         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1897                 bp->port.advertising |= ADVERTISED_Asym_Pause;
1898                 break;
1899         default:
1900                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1901                                           ADVERTISED_Pause);
1902                 break;
1903         }
1904 }
1905
1906 static void bnx2x_link_report(struct bnx2x *bp)
1907 {
1908         if (bp->link_vars.link_up) {
1909                 if (bp->state == BNX2X_STATE_OPEN)
1910                         netif_carrier_on(bp->dev);
1911                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1912
1913                 printk("%d Mbps ", bp->link_vars.line_speed);
1914
1915                 if (bp->link_vars.duplex == DUPLEX_FULL)
1916                         printk("full duplex");
1917                 else
1918                         printk("half duplex");
1919
1920                 if (bp->link_vars.flow_ctrl != FLOW_CTRL_NONE) {
1921                         if (bp->link_vars.flow_ctrl & FLOW_CTRL_RX) {
1922                                 printk(", receive ");
1923                                 if (bp->link_vars.flow_ctrl & FLOW_CTRL_TX)
1924                                         printk("& transmit ");
1925                         } else {
1926                                 printk(", transmit ");
1927                         }
1928                         printk("flow control ON");
1929                 }
1930                 printk("\n");
1931
1932         } else { /* link_down */
1933                 netif_carrier_off(bp->dev);
1934                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1935         }
1936 }
1937
1938 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1939 {
1940         if (!BP_NOMCP(bp)) {
1941                 u8 rc;
1942
1943                 /* Initialize link parameters structure variables */
1944                 bp->link_params.mtu = bp->dev->mtu;
1945
1946                 bnx2x_phy_hw_lock(bp);
1947                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1948                 bnx2x_phy_hw_unlock(bp);
1949
1950                 if (bp->link_vars.link_up)
1951                         bnx2x_link_report(bp);
1952
1953                 bnx2x_calc_fc_adv(bp);
1954
1955                 return rc;
1956         }
1957         BNX2X_ERR("Bootcode is missing -not initializing link\n");
1958         return -EINVAL;
1959 }
1960
1961 static void bnx2x_link_set(struct bnx2x *bp)
1962 {
1963         if (!BP_NOMCP(bp)) {
1964                 bnx2x_phy_hw_lock(bp);
1965                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1966                 bnx2x_phy_hw_unlock(bp);
1967
1968                 bnx2x_calc_fc_adv(bp);
1969         } else
1970                 BNX2X_ERR("Bootcode is missing -not setting link\n");
1971 }
1972
1973 static void bnx2x__link_reset(struct bnx2x *bp)
1974 {
1975         if (!BP_NOMCP(bp)) {
1976                 bnx2x_phy_hw_lock(bp);
1977                 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
1978                 bnx2x_phy_hw_unlock(bp);
1979         } else
1980                 BNX2X_ERR("Bootcode is missing -not resetting link\n");
1981 }
1982
1983 static u8 bnx2x_link_test(struct bnx2x *bp)
1984 {
1985         u8 rc;
1986
1987         bnx2x_phy_hw_lock(bp);
1988         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
1989         bnx2x_phy_hw_unlock(bp);
1990
1991         return rc;
1992 }
1993
1994 /* Calculates the sum of vn_min_rates.
1995    It's needed for further normalizing of the min_rates.
1996
1997    Returns:
1998      sum of vn_min_rates
1999        or
2000      0 - if all the min_rates are 0.
2001      In the later case fainess algorithm should be deactivated.
2002      If not all min_rates are zero then those that are zeroes will
2003      be set to 1.
2004  */
2005 static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2006 {
2007         int i, port = BP_PORT(bp);
2008         u32 wsum = 0;
2009         int all_zero = 1;
2010
2011         for (i = 0; i < E1HVN_MAX; i++) {
2012                 u32 vn_cfg =
2013                         SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2014                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2015                                      FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2016                 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2017                         /* If min rate is zero - set it to 1 */
2018                         if (!vn_min_rate)
2019                                 vn_min_rate = DEF_MIN_RATE;
2020                         else
2021                                 all_zero = 0;
2022
2023                         wsum += vn_min_rate;
2024                 }
2025         }
2026
2027         /* ... only if all min rates are zeros - disable FAIRNESS */
2028         if (all_zero)
2029                 return 0;
2030
2031         return wsum;
2032 }
2033
2034 static void bnx2x_init_port_minmax(struct bnx2x *bp,
2035                                    int en_fness,
2036                                    u16 port_rate,
2037                                    struct cmng_struct_per_port *m_cmng_port)
2038 {
2039         u32 r_param = port_rate / 8;
2040         int port = BP_PORT(bp);
2041         int i;
2042
2043         memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2044
2045         /* Enable minmax only if we are in e1hmf mode */
2046         if (IS_E1HMF(bp)) {
2047                 u32 fair_periodic_timeout_usec;
2048                 u32 t_fair;
2049
2050                 /* Enable rate shaping and fairness */
2051                 m_cmng_port->flags.cmng_vn_enable = 1;
2052                 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2053                 m_cmng_port->flags.rate_shaping_enable = 1;
2054
2055                 if (!en_fness)
2056                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2057                            "  fairness will be disabled\n");
2058
2059                 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2060                 m_cmng_port->rs_vars.rs_periodic_timeout =
2061                                                 RS_PERIODIC_TIMEOUT_USEC / 4;
2062
2063                 /* this is the threshold below which no timer arming will occur
2064                    1.25 coefficient is for the threshold to be a little bigger
2065                    than the real time, to compensate for timer in-accuracy */
2066                 m_cmng_port->rs_vars.rs_threshold =
2067                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2068
2069                 /* resolution of fairness timer */
2070                 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2071                 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2072                 t_fair = T_FAIR_COEF / port_rate;
2073
2074                 /* this is the threshold below which we won't arm
2075                    the timer anymore */
2076                 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2077
2078                 /* we multiply by 1e3/8 to get bytes/msec.
2079                    We don't want the credits to pass a credit
2080                    of the T_FAIR*FAIR_MEM (algorithm resolution) */
2081                 m_cmng_port->fair_vars.upper_bound =
2082                                                 r_param * t_fair * FAIR_MEM;
2083                 /* since each tick is 4 usec */
2084                 m_cmng_port->fair_vars.fairness_timeout =
2085                                                 fair_periodic_timeout_usec / 4;
2086
2087         } else {
2088                 /* Disable rate shaping and fairness */
2089                 m_cmng_port->flags.cmng_vn_enable = 0;
2090                 m_cmng_port->flags.fairness_enable = 0;
2091                 m_cmng_port->flags.rate_shaping_enable = 0;
2092
2093                 DP(NETIF_MSG_IFUP,
2094                    "Single function mode  minmax will be disabled\n");
2095         }
2096
2097         /* Store it to internal memory */
2098         for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2099                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2100                        XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2101                        ((u32 *)(m_cmng_port))[i]);
2102 }
2103
2104 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2105                                    u32 wsum, u16 port_rate,
2106                                  struct cmng_struct_per_port *m_cmng_port)
2107 {
2108         struct rate_shaping_vars_per_vn m_rs_vn;
2109         struct fairness_vars_per_vn m_fair_vn;
2110         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2111         u16 vn_min_rate, vn_max_rate;
2112         int i;
2113
2114         /* If function is hidden - set min and max to zeroes */
2115         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2116                 vn_min_rate = 0;
2117                 vn_max_rate = 0;
2118
2119         } else {
2120                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2121                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2122                 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2123                    if current min rate is zero - set it to 1.
2124                    This is a requirment of the algorithm. */
2125                 if ((vn_min_rate == 0) && wsum)
2126                         vn_min_rate = DEF_MIN_RATE;
2127                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2128                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2129         }
2130
2131         DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d  vn_max_rate=%d  "
2132            "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2133
2134         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2135         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2136
2137         /* global vn counter - maximal Mbps for this vn */
2138         m_rs_vn.vn_counter.rate = vn_max_rate;
2139
2140         /* quota - number of bytes transmitted in this period */
2141         m_rs_vn.vn_counter.quota =
2142                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2143
2144 #ifdef BNX2X_PER_PROT_QOS
2145         /* per protocol counter */
2146         for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2147                 /* maximal Mbps for this protocol */
2148                 m_rs_vn.protocol_counters[protocol].rate =
2149                                                 protocol_max_rate[protocol];
2150                 /* the quota in each timer period -
2151                    number of bytes transmitted in this period */
2152                 m_rs_vn.protocol_counters[protocol].quota =
2153                         (u32)(rs_periodic_timeout_usec *
2154                           ((double)m_rs_vn.
2155                                    protocol_counters[protocol].rate/8));
2156         }
2157 #endif
2158
2159         if (wsum) {
2160                 /* credit for each period of the fairness algorithm:
2161                    number of bytes in T_FAIR (the vn share the port rate).
2162                    wsum should not be larger than 10000, thus
2163                    T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2164                 m_fair_vn.vn_credit_delta =
2165                         max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2166                             (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2167                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2168                    m_fair_vn.vn_credit_delta);
2169         }
2170
2171 #ifdef BNX2X_PER_PROT_QOS
2172         do {
2173                 u32 protocolWeightSum = 0;
2174
2175                 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2176                         protocolWeightSum +=
2177                                         drvInit.protocol_min_rate[protocol];
2178                 /* per protocol counter -
2179                    NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2180                 if (protocolWeightSum > 0) {
2181                         for (protocol = 0;
2182                              protocol < NUM_OF_PROTOCOLS; protocol++)
2183                                 /* credit for each period of the
2184                                    fairness algorithm - number of bytes in
2185                                    T_FAIR (the protocol share the vn rate) */
2186                                 m_fair_vn.protocol_credit_delta[protocol] =
2187                                         (u32)((vn_min_rate / 8) * t_fair *
2188                                         protocol_min_rate / protocolWeightSum);
2189                 }
2190         } while (0);
2191 #endif
2192
2193         /* Store it to internal memory */
2194         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2195                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2196                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2197                        ((u32 *)(&m_rs_vn))[i]);
2198
2199         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2200                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2201                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2202                        ((u32 *)(&m_fair_vn))[i]);
2203 }
2204
2205 /* This function is called upon link interrupt */
2206 static void bnx2x_link_attn(struct bnx2x *bp)
2207 {
2208         int vn;
2209
2210         /* Make sure that we are synced with the current statistics */
2211         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2212
2213         bnx2x_phy_hw_lock(bp);
2214         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2215         bnx2x_phy_hw_unlock(bp);
2216
2217         if (bp->link_vars.link_up) {
2218
2219                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2220                         struct host_port_stats *pstats;
2221
2222                         pstats = bnx2x_sp(bp, port_stats);
2223                         /* reset old bmac stats */
2224                         memset(&(pstats->mac_stx[0]), 0,
2225                                sizeof(struct mac_stx));
2226                 }
2227                 if ((bp->state == BNX2X_STATE_OPEN) ||
2228                     (bp->state == BNX2X_STATE_DISABLED))
2229                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2230         }
2231
2232         /* indicate link status */
2233         bnx2x_link_report(bp);
2234
2235         if (IS_E1HMF(bp)) {
2236                 int func;
2237
2238                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2239                         if (vn == BP_E1HVN(bp))
2240                                 continue;
2241
2242                         func = ((vn << 1) | BP_PORT(bp));
2243
2244                         /* Set the attention towards other drivers
2245                            on the same port */
2246                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2247                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2248                 }
2249         }
2250
2251         if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2252                 struct cmng_struct_per_port m_cmng_port;
2253                 u32 wsum;
2254                 int port = BP_PORT(bp);
2255
2256                 /* Init RATE SHAPING and FAIRNESS contexts */
2257                 wsum = bnx2x_calc_vn_wsum(bp);
2258                 bnx2x_init_port_minmax(bp, (int)wsum,
2259                                         bp->link_vars.line_speed,
2260                                         &m_cmng_port);
2261                 if (IS_E1HMF(bp))
2262                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2263                                 bnx2x_init_vn_minmax(bp, 2*vn + port,
2264                                         wsum, bp->link_vars.line_speed,
2265                                                      &m_cmng_port);
2266         }
2267 }
2268
2269 static void bnx2x__link_status_update(struct bnx2x *bp)
2270 {
2271         if (bp->state != BNX2X_STATE_OPEN)
2272                 return;
2273
2274         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2275
2276         if (bp->link_vars.link_up)
2277                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2278         else
2279                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2280
2281         /* indicate link status */
2282         bnx2x_link_report(bp);
2283 }
2284
2285 static void bnx2x_pmf_update(struct bnx2x *bp)
2286 {
2287         int port = BP_PORT(bp);
2288         u32 val;
2289
2290         bp->port.pmf = 1;
2291         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2292
2293         /* enable nig attention */
2294         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2295         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2296         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2297
2298         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2299 }
2300
2301 /* end of Link */
2302
2303 /* slow path */
2304
2305 /*
2306  * General service functions
2307  */
2308
2309 /* the slow path queue is odd since completions arrive on the fastpath ring */
2310 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2311                          u32 data_hi, u32 data_lo, int common)
2312 {
2313         int func = BP_FUNC(bp);
2314
2315         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2316            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2317            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2318            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2319            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2320
2321 #ifdef BNX2X_STOP_ON_ERROR
2322         if (unlikely(bp->panic))
2323                 return -EIO;
2324 #endif
2325
2326         spin_lock_bh(&bp->spq_lock);
2327
2328         if (!bp->spq_left) {
2329                 BNX2X_ERR("BUG! SPQ ring full!\n");
2330                 spin_unlock_bh(&bp->spq_lock);
2331                 bnx2x_panic();
2332                 return -EBUSY;
2333         }
2334
2335         /* CID needs port number to be encoded int it */
2336         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2337                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2338                                      HW_CID(bp, cid)));
2339         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2340         if (common)
2341                 bp->spq_prod_bd->hdr.type |=
2342                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2343
2344         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2345         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2346
2347         bp->spq_left--;
2348
2349         if (bp->spq_prod_bd == bp->spq_last_bd) {
2350                 bp->spq_prod_bd = bp->spq;
2351                 bp->spq_prod_idx = 0;
2352                 DP(NETIF_MSG_TIMER, "end of spq\n");
2353
2354         } else {
2355                 bp->spq_prod_bd++;
2356                 bp->spq_prod_idx++;
2357         }
2358
2359         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2360                bp->spq_prod_idx);
2361
2362         spin_unlock_bh(&bp->spq_lock);
2363         return 0;
2364 }
2365
2366 /* acquire split MCP access lock register */
2367 static int bnx2x_lock_alr(struct bnx2x *bp)
2368 {
2369         u32 i, j, val;
2370         int rc = 0;
2371
2372         might_sleep();
2373         i = 100;
2374         for (j = 0; j < i*10; j++) {
2375                 val = (1UL << 31);
2376                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2377                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2378                 if (val & (1L << 31))
2379                         break;
2380
2381                 msleep(5);
2382         }
2383         if (!(val & (1L << 31))) {
2384                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2385                 rc = -EBUSY;
2386         }
2387
2388         return rc;
2389 }
2390
2391 /* Release split MCP access lock register */
2392 static void bnx2x_unlock_alr(struct bnx2x *bp)
2393 {
2394         u32 val = 0;
2395
2396         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2397 }
2398
2399 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2400 {
2401         struct host_def_status_block *def_sb = bp->def_status_blk;
2402         u16 rc = 0;
2403
2404         barrier(); /* status block is written to by the chip */
2405
2406         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2407                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2408                 rc |= 1;
2409         }
2410         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2411                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2412                 rc |= 2;
2413         }
2414         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2415                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2416                 rc |= 4;
2417         }
2418         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2419                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2420                 rc |= 8;
2421         }
2422         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2423                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2424                 rc |= 16;
2425         }
2426         return rc;
2427 }
2428
2429 /*
2430  * slow path service functions
2431  */
2432
2433 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2434 {
2435         int port = BP_PORT(bp);
2436         int func = BP_FUNC(bp);
2437         u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_FUNC_BASE * func) * 8;
2438         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2439                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2440         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2441                                        NIG_REG_MASK_INTERRUPT_PORT0;
2442
2443         if (~bp->aeu_mask & (asserted & 0xff))
2444                 BNX2X_ERR("IGU ERROR\n");
2445         if (bp->attn_state & asserted)
2446                 BNX2X_ERR("IGU ERROR\n");
2447
2448         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2449            bp->aeu_mask, asserted);
2450         bp->aeu_mask &= ~(asserted & 0xff);
2451         DP(NETIF_MSG_HW, "after masking: aeu_mask %x\n", bp->aeu_mask);
2452
2453         REG_WR(bp, aeu_addr, bp->aeu_mask);
2454
2455         bp->attn_state |= asserted;
2456
2457         if (asserted & ATTN_HARD_WIRED_MASK) {
2458                 if (asserted & ATTN_NIG_FOR_FUNC) {
2459
2460                         /* save nig interrupt mask */
2461                         bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2462                         REG_WR(bp, nig_int_mask_addr, 0);
2463
2464                         bnx2x_link_attn(bp);
2465
2466                         /* handle unicore attn? */
2467                 }
2468                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2469                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2470
2471                 if (asserted & GPIO_2_FUNC)
2472                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2473
2474                 if (asserted & GPIO_3_FUNC)
2475                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2476
2477                 if (asserted & GPIO_4_FUNC)
2478                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2479
2480                 if (port == 0) {
2481                         if (asserted & ATTN_GENERAL_ATTN_1) {
2482                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2483                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2484                         }
2485                         if (asserted & ATTN_GENERAL_ATTN_2) {
2486                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2487                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2488                         }
2489                         if (asserted & ATTN_GENERAL_ATTN_3) {
2490                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2491                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2492                         }
2493                 } else {
2494                         if (asserted & ATTN_GENERAL_ATTN_4) {
2495                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2496                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2497                         }
2498                         if (asserted & ATTN_GENERAL_ATTN_5) {
2499                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2500                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2501                         }
2502                         if (asserted & ATTN_GENERAL_ATTN_6) {
2503                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2504                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2505                         }
2506                 }
2507
2508         } /* if hardwired */
2509
2510         DP(NETIF_MSG_HW, "about to mask 0x%08x at IGU addr 0x%x\n",
2511            asserted, BAR_IGU_INTMEM + igu_addr);
2512         REG_WR(bp, BAR_IGU_INTMEM + igu_addr, asserted);
2513
2514         /* now set back the mask */
2515         if (asserted & ATTN_NIG_FOR_FUNC)
2516                 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2517 }
2518
2519 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2520 {
2521         int port = BP_PORT(bp);
2522         int reg_offset;
2523         u32 val;
2524
2525         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2526                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2527
2528         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2529
2530                 val = REG_RD(bp, reg_offset);
2531                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2532                 REG_WR(bp, reg_offset, val);
2533
2534                 BNX2X_ERR("SPIO5 hw attention\n");
2535
2536                 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2537                 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2538                         /* Fan failure attention */
2539
2540                         /* The PHY reset is controled by GPIO 1 */
2541                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2542                                        MISC_REGISTERS_GPIO_OUTPUT_LOW);
2543                         /* Low power mode is controled by GPIO 2 */
2544                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2545                                        MISC_REGISTERS_GPIO_OUTPUT_LOW);
2546                         /* mark the failure */
2547                         bp->link_params.ext_phy_config &=
2548                                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2549                         bp->link_params.ext_phy_config |=
2550                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2551                         SHMEM_WR(bp,
2552                                  dev_info.port_hw_config[port].
2553                                                         external_phy_config,
2554                                  bp->link_params.ext_phy_config);
2555                         /* log the failure */
2556                         printk(KERN_ERR PFX "Fan Failure on Network"
2557                                " Controller %s has caused the driver to"
2558                                " shutdown the card to prevent permanent"
2559                                " damage.  Please contact Dell Support for"
2560                                " assistance\n", bp->dev->name);
2561                         break;
2562
2563                 default:
2564                         break;
2565                 }
2566         }
2567
2568         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2569
2570                 val = REG_RD(bp, reg_offset);
2571                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2572                 REG_WR(bp, reg_offset, val);
2573
2574                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2575                           (attn & HW_INTERRUT_ASSERT_SET_0));
2576                 bnx2x_panic();
2577         }
2578 }
2579
2580 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2581 {
2582         u32 val;
2583
2584         if (attn & BNX2X_DOORQ_ASSERT) {
2585
2586                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2587                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2588                 /* DORQ discard attention */
2589                 if (val & 0x2)
2590                         BNX2X_ERR("FATAL error from DORQ\n");
2591         }
2592
2593         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2594
2595                 int port = BP_PORT(bp);
2596                 int reg_offset;
2597
2598                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2599                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2600
2601                 val = REG_RD(bp, reg_offset);
2602                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2603                 REG_WR(bp, reg_offset, val);
2604
2605                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2606                           (attn & HW_INTERRUT_ASSERT_SET_1));
2607                 bnx2x_panic();
2608         }
2609 }
2610
2611 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2612 {
2613         u32 val;
2614
2615         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2616
2617                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2618                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2619                 /* CFC error attention */
2620                 if (val & 0x2)
2621                         BNX2X_ERR("FATAL error from CFC\n");
2622         }
2623
2624         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2625
2626                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2627                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2628                 /* RQ_USDMDP_FIFO_OVERFLOW */
2629                 if (val & 0x18000)
2630                         BNX2X_ERR("FATAL error from PXP\n");
2631         }
2632
2633         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2634
2635                 int port = BP_PORT(bp);
2636                 int reg_offset;
2637
2638                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2639                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2640
2641                 val = REG_RD(bp, reg_offset);
2642                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2643                 REG_WR(bp, reg_offset, val);
2644
2645                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2646                           (attn & HW_INTERRUT_ASSERT_SET_2));
2647                 bnx2x_panic();
2648         }
2649 }
2650
2651 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2652 {
2653         u32 val;
2654
2655         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2656
2657                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2658                         int func = BP_FUNC(bp);
2659
2660                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2661                         bnx2x__link_status_update(bp);
2662                         if (SHMEM_RD(bp, func_mb[func].drv_status) &
2663                                                         DRV_STATUS_PMF)
2664                                 bnx2x_pmf_update(bp);
2665
2666                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2667
2668                         BNX2X_ERR("MC assert!\n");
2669                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2670                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2671                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2672                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2673                         bnx2x_panic();
2674
2675                 } else if (attn & BNX2X_MCP_ASSERT) {
2676
2677                         BNX2X_ERR("MCP assert!\n");
2678                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2679                         bnx2x_fw_dump(bp);
2680
2681                 } else
2682                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2683         }
2684
2685         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2686                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2687                 if (attn & BNX2X_GRC_TIMEOUT) {
2688                         val = CHIP_IS_E1H(bp) ?
2689                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2690                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2691                 }
2692                 if (attn & BNX2X_GRC_RSV) {
2693                         val = CHIP_IS_E1H(bp) ?
2694                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2695                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2696                 }
2697                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2698         }
2699 }
2700
2701 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2702 {
2703         struct attn_route attn;
2704         struct attn_route group_mask;
2705         int port = BP_PORT(bp);
2706         int index;
2707         u32 reg_addr;
2708         u32 val;
2709
2710         /* need to take HW lock because MCP or other port might also
2711            try to handle this event */
2712         bnx2x_lock_alr(bp);
2713
2714         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2715         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2716         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2717         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2718         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2719            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2720
2721         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2722                 if (deasserted & (1 << index)) {
2723                         group_mask = bp->attn_group[index];
2724
2725                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2726                            index, group_mask.sig[0], group_mask.sig[1],
2727                            group_mask.sig[2], group_mask.sig[3]);
2728
2729                         bnx2x_attn_int_deasserted3(bp,
2730                                         attn.sig[3] & group_mask.sig[3]);
2731                         bnx2x_attn_int_deasserted1(bp,
2732                                         attn.sig[1] & group_mask.sig[1]);
2733                         bnx2x_attn_int_deasserted2(bp,
2734                                         attn.sig[2] & group_mask.sig[2]);
2735                         bnx2x_attn_int_deasserted0(bp,
2736                                         attn.sig[0] & group_mask.sig[0]);
2737
2738                         if ((attn.sig[0] & group_mask.sig[0] &
2739                                                 HW_PRTY_ASSERT_SET_0) ||
2740                             (attn.sig[1] & group_mask.sig[1] &
2741                                                 HW_PRTY_ASSERT_SET_1) ||
2742                             (attn.sig[2] & group_mask.sig[2] &
2743                                                 HW_PRTY_ASSERT_SET_2))
2744                                BNX2X_ERR("FATAL HW block parity attention\n");
2745                 }
2746         }
2747
2748         bnx2x_unlock_alr(bp);
2749
2750         reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
2751
2752         val = ~deasserted;
2753 /*      DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
2754            val, BAR_IGU_INTMEM + reg_addr); */
2755         REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val);
2756
2757         if (bp->aeu_mask & (deasserted & 0xff))
2758                 BNX2X_ERR("IGU BUG!\n");
2759         if (~bp->attn_state & deasserted)
2760                 BNX2X_ERR("IGU BUG!\n");
2761
2762         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2763                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2764
2765         DP(NETIF_MSG_HW, "aeu_mask %x\n", bp->aeu_mask);
2766         bp->aeu_mask |= (deasserted & 0xff);
2767
2768         DP(NETIF_MSG_HW, "new mask %x\n", bp->aeu_mask);
2769         REG_WR(bp, reg_addr, bp->aeu_mask);
2770
2771         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2772         bp->attn_state &= ~deasserted;
2773         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2774 }
2775
2776 static void bnx2x_attn_int(struct bnx2x *bp)
2777 {
2778         /* read local copy of bits */
2779         u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2780         u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2781         u32 attn_state = bp->attn_state;
2782
2783         /* look for changed bits */
2784         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2785         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2786
2787         DP(NETIF_MSG_HW,
2788            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2789            attn_bits, attn_ack, asserted, deasserted);
2790
2791         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2792                 BNX2X_ERR("BAD attention state\n");
2793
2794         /* handle bits that were raised */
2795         if (asserted)
2796                 bnx2x_attn_int_asserted(bp, asserted);
2797
2798         if (deasserted)
2799                 bnx2x_attn_int_deasserted(bp, deasserted);
2800 }
2801
2802 static void bnx2x_sp_task(struct work_struct *work)
2803 {
2804         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
2805         u16 status;
2806
2807
2808         /* Return here if interrupt is disabled */
2809         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2810                 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
2811                 return;
2812         }
2813
2814         status = bnx2x_update_dsb_idx(bp);
2815 /*      if (status == 0)                                     */
2816 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2817
2818         DP(BNX2X_MSG_SP, "got a slowpath interrupt (updated %x)\n", status);
2819
2820         /* HW attentions */
2821         if (status & 0x1)
2822                 bnx2x_attn_int(bp);
2823
2824         /* CStorm events: query_stats, port delete ramrod */
2825         if (status & 0x2)
2826                 bp->stats_pending = 0;
2827
2828         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2829                      IGU_INT_NOP, 1);
2830         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2831                      IGU_INT_NOP, 1);
2832         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2833                      IGU_INT_NOP, 1);
2834         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2835                      IGU_INT_NOP, 1);
2836         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2837                      IGU_INT_ENABLE, 1);
2838
2839 }
2840
2841 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2842 {
2843         struct net_device *dev = dev_instance;
2844         struct bnx2x *bp = netdev_priv(dev);
2845
2846         /* Return here if interrupt is disabled */
2847         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2848                 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
2849                 return IRQ_HANDLED;
2850         }
2851
2852         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
2853
2854 #ifdef BNX2X_STOP_ON_ERROR
2855         if (unlikely(bp->panic))
2856                 return IRQ_HANDLED;
2857 #endif
2858
2859         schedule_work(&bp->sp_task);
2860
2861         return IRQ_HANDLED;
2862 }
2863
2864 /* end of slow path */
2865
2866 /* Statistics */
2867
2868 /****************************************************************************
2869 * Macros
2870 ****************************************************************************/
2871
2872 /* sum[hi:lo] += add[hi:lo] */
2873 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2874         do { \
2875                 s_lo += a_lo; \
2876                 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2877         } while (0)
2878
2879 /* difference = minuend - subtrahend */
2880 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2881         do { \
2882                 if (m_lo < s_lo) { \
2883                         /* underflow */ \
2884                         d_hi = m_hi - s_hi; \
2885                         if (d_hi > 0) { \
2886                         /* we can 'loan' 1 */ \
2887                                 d_hi--; \
2888                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2889                         } else { \
2890                         /* m_hi <= s_hi */ \
2891                                 d_hi = 0; \
2892                                 d_lo = 0; \
2893                         } \
2894                 } else { \
2895                         /* m_lo >= s_lo */ \
2896                         if (m_hi < s_hi) { \
2897                                 d_hi = 0; \
2898                                 d_lo = 0; \
2899                         } else { \
2900                         /* m_hi >= s_hi */ \
2901                                 d_hi = m_hi - s_hi; \
2902                                 d_lo = m_lo - s_lo; \
2903                         } \
2904                 } \
2905         } while (0)
2906
2907 #define UPDATE_STAT64(s, t) \
2908         do { \
2909                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2910                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2911                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2912                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2913                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2914                        pstats->mac_stx[1].t##_lo, diff.lo); \
2915         } while (0)
2916
2917 #define UPDATE_STAT64_NIG(s, t) \
2918         do { \
2919                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2920                         diff.lo, new->s##_lo, old->s##_lo); \
2921                 ADD_64(estats->t##_hi, diff.hi, \
2922                        estats->t##_lo, diff.lo); \
2923         } while (0)
2924
2925 /* sum[hi:lo] += add */
2926 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2927         do { \
2928                 s_lo += a; \
2929                 s_hi += (s_lo < a) ? 1 : 0; \
2930         } while (0)
2931
2932 #define UPDATE_EXTEND_STAT(s) \
2933         do { \
2934                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2935                               pstats->mac_stx[1].s##_lo, \
2936                               new->s); \
2937         } while (0)
2938
2939 #define UPDATE_EXTEND_TSTAT(s, t) \
2940         do { \
2941                 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2942                 old_tclient->s = le32_to_cpu(tclient->s); \
2943                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2944         } while (0)
2945
2946 #define UPDATE_EXTEND_XSTAT(s, t) \
2947         do { \
2948                 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2949                 old_xclient->s = le32_to_cpu(xclient->s); \
2950                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2951         } while (0)
2952
2953 /*
2954  * General service functions
2955  */
2956
2957 static inline long bnx2x_hilo(u32 *hiref)
2958 {
2959         u32 lo = *(hiref + 1);
2960 #if (BITS_PER_LONG == 64)
2961         u32 hi = *hiref;
2962
2963         return HILO_U64(hi, lo);
2964 #else
2965         return lo;
2966 #endif
2967 }
2968
2969 /*
2970  * Init service functions
2971  */
2972
2973 static void bnx2x_storm_stats_init(struct bnx2x *bp)
2974 {
2975         int func = BP_FUNC(bp);
2976
2977         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func), 1);
2978         REG_WR(bp, BAR_XSTRORM_INTMEM +
2979                XSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
2980
2981         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func), 1);
2982         REG_WR(bp, BAR_TSTRORM_INTMEM +
2983                TSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
2984
2985         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func), 0);
2986         REG_WR(bp, BAR_CSTRORM_INTMEM +
2987                CSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
2988
2989         REG_WR(bp, BAR_XSTRORM_INTMEM +
2990                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
2991                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
2992         REG_WR(bp, BAR_XSTRORM_INTMEM +
2993                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
2994                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
2995
2996         REG_WR(bp, BAR_TSTRORM_INTMEM +
2997                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
2998                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
2999         REG_WR(bp, BAR_TSTRORM_INTMEM +
3000                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3001                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3002 }
3003
3004 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3005 {
3006         if (!bp->stats_pending) {
3007                 struct eth_query_ramrod_data ramrod_data = {0};
3008                 int rc;
3009
3010                 ramrod_data.drv_counter = bp->stats_counter++;
3011                 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3012                 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3013
3014                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3015                                    ((u32 *)&ramrod_data)[1],
3016                                    ((u32 *)&ramrod_data)[0], 0);
3017                 if (rc == 0) {
3018                         /* stats ramrod has it's own slot on the spq */
3019                         bp->spq_left++;
3020                         bp->stats_pending = 1;
3021                 }
3022         }
3023 }
3024
3025 static void bnx2x_stats_init(struct bnx2x *bp)
3026 {
3027         int port = BP_PORT(bp);
3028
3029         bp->executer_idx = 0;
3030         bp->stats_counter = 0;
3031
3032         /* port stats */
3033         if (!BP_NOMCP(bp))
3034                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3035         else
3036                 bp->port.port_stx = 0;
3037         DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3038
3039         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3040         bp->port.old_nig_stats.brb_discard =
3041                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3042         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3043                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3044         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3045                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3046
3047         /* function stats */
3048         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3049         memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3050         memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3051         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3052
3053         bp->stats_state = STATS_STATE_DISABLED;
3054         if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3055                 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3056 }
3057
3058 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3059 {
3060         struct dmae_command *dmae = &bp->stats_dmae;
3061         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3062
3063         *stats_comp = DMAE_COMP_VAL;
3064
3065         /* loader */
3066         if (bp->executer_idx) {
3067                 int loader_idx = PMF_DMAE_C(bp);
3068
3069                 memset(dmae, 0, sizeof(struct dmae_command));
3070
3071                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3072                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3073                                 DMAE_CMD_DST_RESET |
3074 #ifdef __BIG_ENDIAN
3075                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3076 #else
3077                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3078 #endif
3079                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3080                                                DMAE_CMD_PORT_0) |
3081                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3082                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3083                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3084                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3085                                      sizeof(struct dmae_command) *
3086                                      (loader_idx + 1)) >> 2;
3087                 dmae->dst_addr_hi = 0;
3088                 dmae->len = sizeof(struct dmae_command) >> 2;
3089                 if (CHIP_IS_E1(bp))
3090                         dmae->len--;
3091                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3092                 dmae->comp_addr_hi = 0;
3093                 dmae->comp_val = 1;
3094
3095                 *stats_comp = 0;
3096                 bnx2x_post_dmae(bp, dmae, loader_idx);
3097
3098         } else if (bp->func_stx) {
3099                 *stats_comp = 0;
3100                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3101         }
3102 }
3103
3104 static int bnx2x_stats_comp(struct bnx2x *bp)
3105 {
3106         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3107         int cnt = 10;
3108
3109         might_sleep();
3110         while (*stats_comp != DMAE_COMP_VAL) {
3111                 msleep(1);
3112                 if (!cnt) {
3113                         BNX2X_ERR("timeout waiting for stats finished\n");
3114                         break;
3115                 }
3116                 cnt--;
3117         }
3118         return 1;
3119 }
3120
3121 /*
3122  * Statistics service functions
3123  */
3124
3125 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3126 {
3127         struct dmae_command *dmae;
3128         u32 opcode;
3129         int loader_idx = PMF_DMAE_C(bp);
3130         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3131
3132         /* sanity */
3133         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3134                 BNX2X_ERR("BUG!\n");
3135                 return;
3136         }
3137
3138         bp->executer_idx = 0;
3139
3140         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3141                   DMAE_CMD_C_ENABLE |
3142                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3143 #ifdef __BIG_ENDIAN
3144                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3145 #else
3146                   DMAE_CMD_ENDIANITY_DW_SWAP |
3147 #endif
3148                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3149                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3150
3151         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3152         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3153         dmae->src_addr_lo = bp->port.port_stx >> 2;
3154         dmae->src_addr_hi = 0;
3155         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3156         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3157         dmae->len = DMAE_LEN32_RD_MAX;
3158         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3159         dmae->comp_addr_hi = 0;
3160         dmae->comp_val = 1;
3161
3162         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3163         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3164         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3165         dmae->src_addr_hi = 0;
3166         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3167                                    DMAE_LEN32_RD_MAX * 4);
3168         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3169                                    DMAE_LEN32_RD_MAX * 4);
3170         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3171         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3172         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3173         dmae->comp_val = DMAE_COMP_VAL;
3174
3175         *stats_comp = 0;
3176         bnx2x_hw_stats_post(bp);
3177         bnx2x_stats_comp(bp);
3178 }
3179
3180 static void bnx2x_port_stats_init(struct bnx2x *bp)
3181 {
3182         struct dmae_command *dmae;
3183         int port = BP_PORT(bp);
3184         int vn = BP_E1HVN(bp);
3185         u32 opcode;
3186         int loader_idx = PMF_DMAE_C(bp);
3187         u32 mac_addr;
3188         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3189
3190         /* sanity */
3191         if (!bp->link_vars.link_up || !bp->port.pmf) {
3192                 BNX2X_ERR("BUG!\n");
3193                 return;
3194         }
3195
3196         bp->executer_idx = 0;
3197
3198         /* MCP */
3199         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3200                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3201                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3202 #ifdef __BIG_ENDIAN
3203                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3204 #else
3205                   DMAE_CMD_ENDIANITY_DW_SWAP |
3206 #endif
3207                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3208                   (vn << DMAE_CMD_E1HVN_SHIFT));
3209
3210         if (bp->port.port_stx) {
3211
3212                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3213                 dmae->opcode = opcode;
3214                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3215                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3216                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3217                 dmae->dst_addr_hi = 0;
3218                 dmae->len = sizeof(struct host_port_stats) >> 2;
3219                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3220                 dmae->comp_addr_hi = 0;
3221                 dmae->comp_val = 1;
3222         }
3223
3224         if (bp->func_stx) {
3225
3226                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3227                 dmae->opcode = opcode;
3228                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3229                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3230                 dmae->dst_addr_lo = bp->func_stx >> 2;
3231                 dmae->dst_addr_hi = 0;
3232                 dmae->len = sizeof(struct host_func_stats) >> 2;
3233                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3234                 dmae->comp_addr_hi = 0;
3235                 dmae->comp_val = 1;
3236         }
3237
3238         /* MAC */
3239         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3240                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3241                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3242 #ifdef __BIG_ENDIAN
3243                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3244 #else
3245                   DMAE_CMD_ENDIANITY_DW_SWAP |
3246 #endif
3247                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3248                   (vn << DMAE_CMD_E1HVN_SHIFT));
3249
3250         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3251
3252                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3253                                    NIG_REG_INGRESS_BMAC0_MEM);
3254
3255                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3256                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3257                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3258                 dmae->opcode = opcode;
3259                 dmae->src_addr_lo = (mac_addr +
3260                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3261                 dmae->src_addr_hi = 0;
3262                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3263                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3264                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3265                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3266                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3267                 dmae->comp_addr_hi = 0;
3268                 dmae->comp_val = 1;
3269
3270                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3271                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3272                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3273                 dmae->opcode = opcode;
3274                 dmae->src_addr_lo = (mac_addr +
3275                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3276                 dmae->src_addr_hi = 0;
3277                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3278                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3279                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3280                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3281                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3282                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3283                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3284                 dmae->comp_addr_hi = 0;
3285                 dmae->comp_val = 1;
3286
3287         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3288
3289                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3290
3291                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3292                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3293                 dmae->opcode = opcode;
3294                 dmae->src_addr_lo = (mac_addr +
3295                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3296                 dmae->src_addr_hi = 0;
3297                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3298                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3299                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3300                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3301                 dmae->comp_addr_hi = 0;
3302                 dmae->comp_val = 1;
3303
3304                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3305                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3306                 dmae->opcode = opcode;
3307                 dmae->src_addr_lo = (mac_addr +
3308                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3309                 dmae->src_addr_hi = 0;
3310                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3311                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3312                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3313                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3314                 dmae->len = 1;
3315                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3316                 dmae->comp_addr_hi = 0;
3317                 dmae->comp_val = 1;
3318
3319                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3320                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3321                 dmae->opcode = opcode;
3322                 dmae->src_addr_lo = (mac_addr +
3323                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3324                 dmae->src_addr_hi = 0;
3325                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3326                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3327                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3328                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3329                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3330                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3331                 dmae->comp_addr_hi = 0;
3332                 dmae->comp_val = 1;
3333         }
3334
3335         /* NIG */
3336         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3337         dmae->opcode = opcode;
3338         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3339                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3340         dmae->src_addr_hi = 0;
3341         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3342         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3343         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3344         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3345         dmae->comp_addr_hi = 0;
3346         dmae->comp_val = 1;
3347
3348         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3349         dmae->opcode = opcode;
3350         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3351                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3352         dmae->src_addr_hi = 0;
3353         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3354                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3355         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3356                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3357         dmae->len = (2*sizeof(u32)) >> 2;
3358         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3359         dmae->comp_addr_hi = 0;
3360         dmae->comp_val = 1;
3361
3362         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3363         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3364                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3365                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3366 #ifdef __BIG_ENDIAN
3367                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3368 #else
3369                         DMAE_CMD_ENDIANITY_DW_SWAP |
3370 #endif
3371                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3372                         (vn << DMAE_CMD_E1HVN_SHIFT));
3373         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3374                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3375         dmae->src_addr_hi = 0;
3376         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3377                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3378         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3379                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3380         dmae->len = (2*sizeof(u32)) >> 2;
3381         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3382         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3383         dmae->comp_val = DMAE_COMP_VAL;
3384
3385         *stats_comp = 0;
3386 }
3387
3388 static void bnx2x_func_stats_init(struct bnx2x *bp)
3389 {
3390         struct dmae_command *dmae = &bp->stats_dmae;
3391         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3392
3393         /* sanity */
3394         if (!bp->func_stx) {
3395                 BNX2X_ERR("BUG!\n");
3396                 return;
3397         }
3398
3399         bp->executer_idx = 0;
3400         memset(dmae, 0, sizeof(struct dmae_command));
3401
3402         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3403                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3404                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3405 #ifdef __BIG_ENDIAN
3406                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3407 #else
3408                         DMAE_CMD_ENDIANITY_DW_SWAP |
3409 #endif
3410                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3411                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3412         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3413         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3414         dmae->dst_addr_lo = bp->func_stx >> 2;
3415         dmae->dst_addr_hi = 0;
3416         dmae->len = sizeof(struct host_func_stats) >> 2;
3417         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3418         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3419         dmae->comp_val = DMAE_COMP_VAL;
3420
3421         *stats_comp = 0;
3422 }
3423
3424 static void bnx2x_stats_start(struct bnx2x *bp)
3425 {
3426         if (bp->port.pmf)
3427                 bnx2x_port_stats_init(bp);
3428
3429         else if (bp->func_stx)
3430                 bnx2x_func_stats_init(bp);
3431
3432         bnx2x_hw_stats_post(bp);
3433         bnx2x_storm_stats_post(bp);
3434 }
3435
3436 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3437 {
3438         bnx2x_stats_comp(bp);
3439         bnx2x_stats_pmf_update(bp);
3440         bnx2x_stats_start(bp);
3441 }
3442
3443 static void bnx2x_stats_restart(struct bnx2x *bp)
3444 {
3445         bnx2x_stats_comp(bp);
3446         bnx2x_stats_start(bp);
3447 }
3448
3449 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3450 {
3451         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3452         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3453         struct regpair diff;
3454
3455         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3456         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3457         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3458         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3459         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3460         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3461         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3462         UPDATE_STAT64(rx_stat_grxcf, rx_stat_bmac_xcf);
3463         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3464         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3465         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3466         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3467         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3468         UPDATE_STAT64(tx_stat_gt127,
3469                                 tx_stat_etherstatspkts65octetsto127octets);
3470         UPDATE_STAT64(tx_stat_gt255,
3471                                 tx_stat_etherstatspkts128octetsto255octets);
3472         UPDATE_STAT64(tx_stat_gt511,
3473                                 tx_stat_etherstatspkts256octetsto511octets);
3474         UPDATE_STAT64(tx_stat_gt1023,
3475                                 tx_stat_etherstatspkts512octetsto1023octets);
3476         UPDATE_STAT64(tx_stat_gt1518,
3477                                 tx_stat_etherstatspkts1024octetsto1522octets);
3478         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3479         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3480         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3481         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3482         UPDATE_STAT64(tx_stat_gterr,
3483                                 tx_stat_dot3statsinternalmactransmiterrors);
3484         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3485 }
3486
3487 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3488 {
3489         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3490         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3491
3492         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3493         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3494         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3495         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3496         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3497         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3498         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3499         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3500         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3501         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3502         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3503         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3504         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3505         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3506         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3507         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3508         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3509         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3510         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3511         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3512         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3513         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3514         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3515         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3516         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3517         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3518         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3519         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3520         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3521         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3522         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3523 }
3524
3525 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3526 {
3527         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3528         struct nig_stats *old = &(bp->port.old_nig_stats);
3529         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3530         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3531         struct regpair diff;
3532
3533         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3534                 bnx2x_bmac_stats_update(bp);
3535
3536         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3537                 bnx2x_emac_stats_update(bp);
3538
3539         else { /* unreached */
3540                 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3541                 return -1;
3542         }
3543
3544         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3545                       new->brb_discard - old->brb_discard);
3546
3547         UPDATE_STAT64_NIG(egress_mac_pkt0,
3548                                         etherstatspkts1024octetsto1522octets);
3549         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3550
3551         memcpy(old, new, sizeof(struct nig_stats));
3552
3553         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3554                sizeof(struct mac_stx));
3555         estats->brb_drop_hi = pstats->brb_drop_hi;
3556         estats->brb_drop_lo = pstats->brb_drop_lo;
3557
3558         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3559
3560         return 0;
3561 }
3562
3563 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3564 {
3565         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3566         int cl_id = BP_CL_ID(bp);
3567         struct tstorm_per_port_stats *tport =
3568                                 &stats->tstorm_common.port_statistics;
3569         struct tstorm_per_client_stats *tclient =
3570                         &stats->tstorm_common.client_statistics[cl_id];
3571         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3572         struct xstorm_per_client_stats *xclient =
3573                         &stats->xstorm_common.client_statistics[cl_id];
3574         struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3575         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3576         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3577         u32 diff;
3578
3579         /* are storm stats valid? */
3580         if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3581                                                         bp->stats_counter) {
3582                 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3583                    "  tstorm counter (%d) != stats_counter (%d)\n",
3584                    tclient->stats_counter, bp->stats_counter);
3585                 return -1;
3586         }
3587         if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3588                                                         bp->stats_counter) {
3589                 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3590                    "  xstorm counter (%d) != stats_counter (%d)\n",
3591                    xclient->stats_counter, bp->stats_counter);
3592                 return -2;
3593         }
3594
3595         fstats->total_bytes_received_hi =
3596         fstats->valid_bytes_received_hi =
3597                                 le32_to_cpu(tclient->total_rcv_bytes.hi);
3598         fstats->total_bytes_received_lo =
3599         fstats->valid_bytes_received_lo =
3600                                 le32_to_cpu(tclient->total_rcv_bytes.lo);
3601
3602         estats->error_bytes_received_hi =
3603                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
3604         estats->error_bytes_received_lo =
3605                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
3606         ADD_64(estats->error_bytes_received_hi,
3607                estats->rx_stat_ifhcinbadoctets_hi,
3608                estats->error_bytes_received_lo,
3609                estats->rx_stat_ifhcinbadoctets_lo);
3610
3611         ADD_64(fstats->total_bytes_received_hi,
3612                estats->error_bytes_received_hi,
3613                fstats->total_bytes_received_lo,
3614                estats->error_bytes_received_lo);
3615
3616         UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
3617         UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3618                                 total_multicast_packets_received);
3619         UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3620                                 total_broadcast_packets_received);
3621
3622         fstats->total_bytes_transmitted_hi =
3623                                 le32_to_cpu(xclient->total_sent_bytes.hi);
3624         fstats->total_bytes_transmitted_lo =
3625                                 le32_to_cpu(xclient->total_sent_bytes.lo);
3626
3627         UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3628                                 total_unicast_packets_transmitted);
3629         UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3630                                 total_multicast_packets_transmitted);
3631         UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3632                                 total_broadcast_packets_transmitted);
3633
3634         memcpy(estats, &(fstats->total_bytes_received_hi),
3635                sizeof(struct host_func_stats) - 2*sizeof(u32));
3636
3637         estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3638         estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3639         estats->brb_truncate_discard =
3640                                 le32_to_cpu(tport->brb_truncate_discard);
3641         estats->mac_discard = le32_to_cpu(tport->mac_discard);
3642
3643         old_tclient->rcv_unicast_bytes.hi =
3644                                 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
3645         old_tclient->rcv_unicast_bytes.lo =
3646                                 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
3647         old_tclient->rcv_broadcast_bytes.hi =
3648                                 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3649         old_tclient->rcv_broadcast_bytes.lo =
3650                                 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3651         old_tclient->rcv_multicast_bytes.hi =
3652                                 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
3653         old_tclient->rcv_multicast_bytes.lo =
3654                                 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
3655         old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
3656
3657         old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3658         old_tclient->packets_too_big_discard =
3659                                 le32_to_cpu(tclient->packets_too_big_discard);
3660         estats->no_buff_discard =
3661         old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3662         old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3663
3664         old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3665         old_xclient->unicast_bytes_sent.hi =
3666                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3667         old_xclient->unicast_bytes_sent.lo =
3668                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3669         old_xclient->multicast_bytes_sent.hi =
3670                                 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3671         old_xclient->multicast_bytes_sent.lo =
3672                                 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3673         old_xclient->broadcast_bytes_sent.hi =
3674                                 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3675         old_xclient->broadcast_bytes_sent.lo =
3676                                 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3677
3678         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3679
3680         return 0;
3681 }
3682
3683 static void bnx2x_net_stats_update(struct bnx2x *bp)
3684 {
3685         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3686         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3687         struct net_device_stats *nstats = &bp->dev->stats;
3688
3689         nstats->rx_packets =
3690                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3691                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3692                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3693
3694         nstats->tx_packets =
3695                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3696                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3697                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3698
3699         nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
3700
3701         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3702
3703         nstats->rx_dropped = old_tclient->checksum_discard +
3704                              estats->mac_discard;
3705         nstats->tx_dropped = 0;
3706
3707         nstats->multicast =
3708                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3709
3710         nstats->collisions =
3711                         estats->tx_stat_dot3statssinglecollisionframes_lo +
3712                         estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3713                         estats->tx_stat_dot3statslatecollisions_lo +
3714                         estats->tx_stat_dot3statsexcessivecollisions_lo;
3715
3716         estats->jabber_packets_received =
3717                                 old_tclient->packets_too_big_discard +
3718                                 estats->rx_stat_dot3statsframestoolong_lo;
3719
3720         nstats->rx_length_errors =
3721                                 estats->rx_stat_etherstatsundersizepkts_lo +
3722                                 estats->jabber_packets_received;
3723         nstats->rx_over_errors = estats->brb_drop_lo +
3724                                  estats->brb_truncate_discard;
3725         nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3726         nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3727         nstats->rx_fifo_errors = old_tclient->no_buff_discard;
3728         nstats->rx_missed_errors = estats->xxoverflow_discard;
3729
3730         nstats->rx_errors = nstats->rx_length_errors +
3731                             nstats->rx_over_errors +
3732                             nstats->rx_crc_errors +
3733                             nstats->rx_frame_errors +
3734                             nstats->rx_fifo_errors +
3735                             nstats->rx_missed_errors;
3736
3737         nstats->tx_aborted_errors =
3738                         estats->tx_stat_dot3statslatecollisions_lo +
3739                         estats->tx_stat_dot3statsexcessivecollisions_lo;
3740         nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
3741         nstats->tx_fifo_errors = 0;
3742         nstats->tx_heartbeat_errors = 0;
3743         nstats->tx_window_errors = 0;
3744
3745         nstats->tx_errors = nstats->tx_aborted_errors +
3746                             nstats->tx_carrier_errors;
3747 }
3748
3749 static void bnx2x_stats_update(struct bnx2x *bp)
3750 {
3751         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3752         int update = 0;
3753
3754         if (*stats_comp != DMAE_COMP_VAL)
3755                 return;
3756
3757         if (bp->port.pmf)
3758                 update = (bnx2x_hw_stats_update(bp) == 0);
3759
3760         update |= (bnx2x_storm_stats_update(bp) == 0);
3761
3762         if (update)
3763                 bnx2x_net_stats_update(bp);
3764
3765         else {
3766                 if (bp->stats_pending) {
3767                         bp->stats_pending++;
3768                         if (bp->stats_pending == 3) {
3769                                 BNX2X_ERR("stats not updated for 3 times\n");
3770                                 bnx2x_panic();
3771                                 return;
3772                         }
3773                 }
3774         }
3775
3776         if (bp->msglevel & NETIF_MSG_TIMER) {
3777                 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3778                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3779                 struct net_device_stats *nstats = &bp->dev->stats;
3780                 int i;
3781
3782                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3783                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
3784                                   "  tx pkt (%lx)\n",
3785                        bnx2x_tx_avail(bp->fp),
3786                        le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3787                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
3788                                   "  rx pkt (%lx)\n",
3789                        (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3790                              bp->fp->rx_comp_cons),
3791                        le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3792                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u\n",
3793                        netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
3794                        estats->driver_xoff, estats->brb_drop_lo);
3795                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
3796                         "packets_too_big_discard %u  no_buff_discard %u  "
3797                         "mac_discard %u  mac_filter_discard %u  "
3798                         "xxovrflow_discard %u  brb_truncate_discard %u  "
3799                         "ttl0_discard %u\n",
3800                        old_tclient->checksum_discard,
3801                        old_tclient->packets_too_big_discard,
3802                        old_tclient->no_buff_discard, estats->mac_discard,
3803                        estats->mac_filter_discard, estats->xxoverflow_discard,
3804                        estats->brb_truncate_discard,
3805                        old_tclient->ttl0_discard);
3806
3807                 for_each_queue(bp, i) {
3808                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3809                                bnx2x_fp(bp, i, tx_pkt),
3810                                bnx2x_fp(bp, i, rx_pkt),
3811                                bnx2x_fp(bp, i, rx_calls));
3812                 }
3813         }
3814
3815         bnx2x_hw_stats_post(bp);
3816         bnx2x_storm_stats_post(bp);
3817 }
3818
3819 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3820 {
3821         struct dmae_command *dmae;
3822         u32 opcode;
3823         int loader_idx = PMF_DMAE_C(bp);
3824         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3825
3826         bp->executer_idx = 0;
3827
3828         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3829                   DMAE_CMD_C_ENABLE |
3830                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3831 #ifdef __BIG_ENDIAN
3832                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3833 #else
3834                   DMAE_CMD_ENDIANITY_DW_SWAP |
3835 #endif
3836                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3837                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3838
3839         if (bp->port.port_stx) {
3840
3841                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3842                 if (bp->func_stx)
3843                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3844                 else
3845                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3846                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3847                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3848                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3849                 dmae->dst_addr_hi = 0;
3850                 dmae->len = sizeof(struct host_port_stats) >> 2;
3851                 if (bp->func_stx) {
3852                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3853                         dmae->comp_addr_hi = 0;
3854                         dmae->comp_val = 1;
3855                 } else {
3856                         dmae->comp_addr_lo =
3857                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3858                         dmae->comp_addr_hi =
3859                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3860                         dmae->comp_val = DMAE_COMP_VAL;
3861
3862                         *stats_comp = 0;
3863                 }
3864         }
3865
3866         if (bp->func_stx) {
3867
3868                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3869                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3870                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3871                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3872                 dmae->dst_addr_lo = bp->func_stx >> 2;
3873                 dmae->dst_addr_hi = 0;
3874                 dmae->len = sizeof(struct host_func_stats) >> 2;
3875                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3876                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3877                 dmae->comp_val = DMAE_COMP_VAL;
3878
3879                 *stats_comp = 0;
3880         }
3881 }
3882
3883 static void bnx2x_stats_stop(struct bnx2x *bp)
3884 {
3885         int update = 0;
3886
3887         bnx2x_stats_comp(bp);
3888
3889         if (bp->port.pmf)
3890                 update = (bnx2x_hw_stats_update(bp) == 0);
3891
3892         update |= (bnx2x_storm_stats_update(bp) == 0);
3893
3894         if (update) {
3895                 bnx2x_net_stats_update(bp);
3896
3897                 if (bp->port.pmf)
3898                         bnx2x_port_stats_stop(bp);
3899
3900                 bnx2x_hw_stats_post(bp);
3901                 bnx2x_stats_comp(bp);
3902         }
3903 }
3904
3905 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3906 {
3907 }
3908
3909 static const struct {
3910         void (*action)(struct bnx2x *bp);
3911         enum bnx2x_stats_state next_state;
3912 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3913 /* state        event   */
3914 {
3915 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3916 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
3917 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3918 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3919 },
3920 {
3921 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
3922 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
3923 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
3924 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
3925 }
3926 };
3927
3928 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3929 {
3930         enum bnx2x_stats_state state = bp->stats_state;
3931
3932         bnx2x_stats_stm[state][event].action(bp);
3933         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3934
3935         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3936                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3937                    state, event, bp->stats_state);
3938 }
3939
3940 static void bnx2x_timer(unsigned long data)
3941 {
3942         struct bnx2x *bp = (struct bnx2x *) data;
3943
3944         if (!netif_running(bp->dev))
3945                 return;
3946
3947         if (atomic_read(&bp->intr_sem) != 0)
3948                 goto timer_restart;
3949
3950         if (poll) {
3951                 struct bnx2x_fastpath *fp = &bp->fp[0];
3952                 int rc;
3953
3954                 bnx2x_tx_int(fp, 1000);
3955                 rc = bnx2x_rx_int(fp, 1000);
3956         }
3957
3958         if (!BP_NOMCP(bp)) {
3959                 int func = BP_FUNC(bp);
3960                 u32 drv_pulse;
3961                 u32 mcp_pulse;
3962
3963                 ++bp->fw_drv_pulse_wr_seq;
3964                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3965                 /* TBD - add SYSTEM_TIME */
3966                 drv_pulse = bp->fw_drv_pulse_wr_seq;
3967                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
3968
3969                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
3970                              MCP_PULSE_SEQ_MASK);
3971                 /* The delta between driver pulse and mcp response
3972                  * should be 1 (before mcp response) or 0 (after mcp response)
3973                  */
3974                 if ((drv_pulse != mcp_pulse) &&
3975                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3976                         /* someone lost a heartbeat... */
3977                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3978                                   drv_pulse, mcp_pulse);
3979                 }
3980         }
3981
3982         if ((bp->state == BNX2X_STATE_OPEN) ||
3983             (bp->state == BNX2X_STATE_DISABLED))
3984                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
3985
3986 timer_restart:
3987         mod_timer(&bp->timer, jiffies + bp->current_interval);
3988 }
3989
3990 /* end of Statistics */
3991
3992 /* nic init */
3993
3994 /*
3995  * nic init service functions
3996  */
3997
3998 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
3999 {
4000         int port = BP_PORT(bp);
4001
4002         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4003                         USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4004                         sizeof(struct ustorm_def_status_block)/4);
4005         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4006                         CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4007                         sizeof(struct cstorm_def_status_block)/4);
4008 }
4009
4010 static void bnx2x_init_sb(struct bnx2x *bp, int sb_id,
4011                           struct host_status_block *sb, dma_addr_t mapping)
4012 {
4013         int port = BP_PORT(bp);
4014         int func = BP_FUNC(bp);
4015         int index;
4016         u64 section;
4017
4018         /* USTORM */
4019         section = ((u64)mapping) + offsetof(struct host_status_block,
4020                                             u_status_block);
4021         sb->u_status_block.status_block_id = sb_id;
4022
4023         REG_WR(bp, BAR_USTRORM_INTMEM +
4024                USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4025         REG_WR(bp, BAR_USTRORM_INTMEM +
4026                ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4027                U64_HI(section));
4028         REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4029                 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4030
4031         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4032                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4033                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4034
4035         /* CSTORM */
4036         section = ((u64)mapping) + offsetof(struct host_status_block,
4037                                             c_status_block);
4038         sb->c_status_block.status_block_id = sb_id;
4039
4040         REG_WR(bp, BAR_CSTRORM_INTMEM +
4041                CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4042         REG_WR(bp, BAR_CSTRORM_INTMEM +
4043                ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4044                U64_HI(section));
4045         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4046                 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4047
4048         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4049                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4050                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4051
4052         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4053 }
4054
4055 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4056 {
4057         int func = BP_FUNC(bp);
4058
4059         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4060                         USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4061                         sizeof(struct ustorm_def_status_block)/4);
4062         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4063                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4064                         sizeof(struct cstorm_def_status_block)/4);
4065         bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4066                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4067                         sizeof(struct xstorm_def_status_block)/4);
4068         bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4069                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4070                         sizeof(struct tstorm_def_status_block)/4);
4071 }
4072
4073 static void bnx2x_init_def_sb(struct bnx2x *bp,
4074                               struct host_def_status_block *def_sb,
4075                               dma_addr_t mapping, int sb_id)
4076 {
4077         int port = BP_PORT(bp);
4078         int func = BP_FUNC(bp);
4079         int index, val, reg_offset;
4080         u64 section;
4081
4082         /* ATTN */
4083         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4084                                             atten_status_block);
4085         def_sb->atten_status_block.status_block_id = sb_id;
4086
4087         bp->def_att_idx = 0;
4088         bp->attn_state = 0;
4089
4090         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4091                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4092
4093         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4094                 bp->attn_group[index].sig[0] = REG_RD(bp,
4095                                                      reg_offset + 0x10*index);
4096                 bp->attn_group[index].sig[1] = REG_RD(bp,
4097                                                reg_offset + 0x4 + 0x10*index);
4098                 bp->attn_group[index].sig[2] = REG_RD(bp,
4099                                                reg_offset + 0x8 + 0x10*index);
4100                 bp->attn_group[index].sig[3] = REG_RD(bp,
4101                                                reg_offset + 0xc + 0x10*index);
4102         }
4103
4104         bp->aeu_mask = REG_RD(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4105                                           MISC_REG_AEU_MASK_ATTN_FUNC_0));
4106
4107         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4108                              HC_REG_ATTN_MSG0_ADDR_L);
4109
4110         REG_WR(bp, reg_offset, U64_LO(section));
4111         REG_WR(bp, reg_offset + 4, U64_HI(section));
4112
4113         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4114
4115         val = REG_RD(bp, reg_offset);
4116         val |= sb_id;
4117         REG_WR(bp, reg_offset, val);
4118
4119         /* USTORM */
4120         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4121                                             u_def_status_block);
4122         def_sb->u_def_status_block.status_block_id = sb_id;
4123
4124         bp->def_u_idx = 0;
4125
4126         REG_WR(bp, BAR_USTRORM_INTMEM +
4127                USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4128         REG_WR(bp, BAR_USTRORM_INTMEM +
4129                ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4130                U64_HI(section));
4131         REG_WR8(bp, BAR_USTRORM_INTMEM +  DEF_USB_FUNC_OFF +
4132                 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4133         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(func),
4134                BNX2X_BTR);
4135
4136         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4137                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4138                          USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4139
4140         /* CSTORM */
4141         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4142                                             c_def_status_block);
4143         def_sb->c_def_status_block.status_block_id = sb_id;
4144
4145         bp->def_c_idx = 0;
4146
4147         REG_WR(bp, BAR_CSTRORM_INTMEM +
4148                CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4149         REG_WR(bp, BAR_CSTRORM_INTMEM +
4150                ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4151                U64_HI(section));
4152         REG_WR8(bp, BAR_CSTRORM_INTMEM +  DEF_CSB_FUNC_OFF +
4153                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4154         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(func),
4155                BNX2X_BTR);
4156
4157         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4158                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4159                          CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4160
4161         /* TSTORM */
4162         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4163                                             t_def_status_block);
4164         def_sb->t_def_status_block.status_block_id = sb_id;
4165
4166         bp->def_t_idx = 0;
4167
4168         REG_WR(bp, BAR_TSTRORM_INTMEM +
4169                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4170         REG_WR(bp, BAR_TSTRORM_INTMEM +
4171                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4172                U64_HI(section));
4173         REG_WR8(bp, BAR_TSTRORM_INTMEM +  DEF_TSB_FUNC_OFF +
4174                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4175         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(func),
4176                BNX2X_BTR);
4177
4178         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4179                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4180                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4181
4182         /* XSTORM */
4183         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4184                                             x_def_status_block);
4185         def_sb->x_def_status_block.status_block_id = sb_id;
4186
4187         bp->def_x_idx = 0;
4188
4189         REG_WR(bp, BAR_XSTRORM_INTMEM +
4190                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4191         REG_WR(bp, BAR_XSTRORM_INTMEM +
4192                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4193                U64_HI(section));
4194         REG_WR8(bp, BAR_XSTRORM_INTMEM +  DEF_XSB_FUNC_OFF +
4195                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4196         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(func),
4197                BNX2X_BTR);
4198
4199         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4200                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4201                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4202
4203         bp->stats_pending = 0;
4204
4205         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4206 }
4207
4208 static void bnx2x_update_coalesce(struct bnx2x *bp)
4209 {
4210         int port = BP_PORT(bp);
4211         int i;
4212
4213         for_each_queue(bp, i) {
4214                 int sb_id = bp->fp[i].sb_id;
4215
4216                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4217                 REG_WR8(bp, BAR_USTRORM_INTMEM +
4218                         USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4219                                                    HC_INDEX_U_ETH_RX_CQ_CONS),
4220                         bp->rx_ticks/12);
4221                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4222                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4223                                                    HC_INDEX_U_ETH_RX_CQ_CONS),
4224                          bp->rx_ticks ? 0 : 1);
4225
4226                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4227                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4228                         CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4229                                                    HC_INDEX_C_ETH_TX_CQ_CONS),
4230                         bp->tx_ticks/12);
4231                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4232                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4233                                                    HC_INDEX_C_ETH_TX_CQ_CONS),
4234                          bp->tx_ticks ? 0 : 1);
4235         }
4236 }
4237
4238 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4239                                        struct bnx2x_fastpath *fp, int last)
4240 {
4241         int i;
4242
4243         for (i = 0; i < last; i++) {
4244                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4245                 struct sk_buff *skb = rx_buf->skb;
4246
4247                 if (skb == NULL) {
4248                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4249                         continue;
4250                 }
4251
4252                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4253                         pci_unmap_single(bp->pdev,
4254                                          pci_unmap_addr(rx_buf, mapping),
4255                                          bp->rx_buf_use_size,
4256                                          PCI_DMA_FROMDEVICE);
4257
4258                 dev_kfree_skb(skb);
4259                 rx_buf->skb = NULL;
4260         }
4261 }
4262
4263 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4264 {
4265         int func = BP_FUNC(bp);
4266         u16 ring_prod, cqe_ring_prod = 0;
4267         int i, j;
4268
4269         bp->rx_buf_use_size = bp->dev->mtu;
4270         bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
4271         bp->rx_buf_size = bp->rx_buf_use_size + 64;
4272
4273         if (bp->flags & TPA_ENABLE_FLAG) {
4274                 DP(NETIF_MSG_IFUP,
4275                    "rx_buf_use_size %d  rx_buf_size %d  effective_mtu %d\n",
4276                    bp->rx_buf_use_size, bp->rx_buf_size,
4277                    bp->dev->mtu + ETH_OVREHEAD);
4278
4279                 for_each_queue(bp, j) {
4280                         for (i = 0; i < ETH_MAX_AGGREGATION_QUEUES_E1H; i++) {
4281                                 struct bnx2x_fastpath *fp = &bp->fp[j];
4282
4283                                 fp->tpa_pool[i].skb =
4284                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4285                                 if (!fp->tpa_pool[i].skb) {
4286                                         BNX2X_ERR("Failed to allocate TPA "
4287                                                   "skb pool for queue[%d] - "
4288                                                   "disabling TPA on this "
4289                                                   "queue!\n", j);
4290                                         bnx2x_free_tpa_pool(bp, fp, i);
4291                                         fp->disable_tpa = 1;
4292                                         break;
4293                                 }
4294                                 pci_unmap_addr_set((struct sw_rx_bd *)
4295                                                         &bp->fp->tpa_pool[i],
4296                                                    mapping, 0);
4297                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4298                         }
4299                 }
4300         }
4301
4302         for_each_queue(bp, j) {
4303                 struct bnx2x_fastpath *fp = &bp->fp[j];
4304
4305                 fp->rx_bd_cons = 0;
4306                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4307                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4308
4309                 /* "next page" elements initialization */
4310                 /* SGE ring */
4311                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4312                         struct eth_rx_sge *sge;
4313
4314                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4315                         sge->addr_hi =
4316                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4317                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4318                         sge->addr_lo =
4319                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4320                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4321                 }
4322
4323                 bnx2x_init_sge_ring_bit_mask(fp);
4324
4325                 /* RX BD ring */
4326                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4327                         struct eth_rx_bd *rx_bd;
4328
4329                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4330                         rx_bd->addr_hi =
4331                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4332                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4333                         rx_bd->addr_lo =
4334                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4335                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4336                 }
4337
4338                 /* CQ ring */
4339                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4340                         struct eth_rx_cqe_next_page *nextpg;
4341
4342                         nextpg = (struct eth_rx_cqe_next_page *)
4343                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4344                         nextpg->addr_hi =
4345                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4346                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4347                         nextpg->addr_lo =
4348                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4349                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4350                 }
4351
4352                 /* Allocate SGEs and initialize the ring elements */
4353                 for (i = 0, ring_prod = 0;
4354                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4355
4356                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4357                                 BNX2X_ERR("was only able to allocate "
4358                                           "%d rx sges\n", i);
4359                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4360                                 /* Cleanup already allocated elements */
4361                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4362                                 bnx2x_free_tpa_pool(bp, fp,
4363                                               ETH_MAX_AGGREGATION_QUEUES_E1H);
4364                                 fp->disable_tpa = 1;
4365                                 ring_prod = 0;
4366                                 break;
4367                         }
4368                         ring_prod = NEXT_SGE_IDX(ring_prod);
4369                 }
4370                 fp->rx_sge_prod = ring_prod;
4371
4372                 /* Allocate BDs and initialize BD ring */
4373                 fp->rx_comp_cons = fp->rx_alloc_failed = 0;
4374                 cqe_ring_prod = ring_prod = 0;
4375                 for (i = 0; i < bp->rx_ring_size; i++) {
4376                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4377                                 BNX2X_ERR("was only able to allocate "
4378                                           "%d rx skbs\n", i);
4379                                 fp->rx_alloc_failed++;
4380                                 break;
4381                         }
4382                         ring_prod = NEXT_RX_IDX(ring_prod);
4383                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4384                         WARN_ON(ring_prod <= i);
4385                 }
4386
4387                 fp->rx_bd_prod = ring_prod;
4388                 /* must not have more available CQEs than BDs */
4389                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4390                                        cqe_ring_prod);
4391                 fp->rx_pkt = fp->rx_calls = 0;
4392
4393                 /* Warning!
4394                  * this will generate an interrupt (to the TSTORM)
4395                  * must only be done after chip is initialized
4396                  */
4397                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4398                                      fp->rx_sge_prod);
4399                 if (j != 0)
4400                         continue;
4401
4402                 REG_WR(bp, BAR_USTRORM_INTMEM +
4403                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4404                        U64_LO(fp->rx_comp_mapping));
4405                 REG_WR(bp, BAR_USTRORM_INTMEM +
4406                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4407                        U64_HI(fp->rx_comp_mapping));
4408         }
4409 }
4410
4411 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4412 {
4413         int i, j;
4414
4415         for_each_queue(bp, j) {
4416                 struct bnx2x_fastpath *fp = &bp->fp[j];
4417
4418                 for (i = 1; i <= NUM_TX_RINGS; i++) {
4419                         struct eth_tx_bd *tx_bd =
4420                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4421
4422                         tx_bd->addr_hi =
4423                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4424                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4425                         tx_bd->addr_lo =
4426                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4427                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4428                 }
4429
4430                 fp->tx_pkt_prod = 0;
4431                 fp->tx_pkt_cons = 0;
4432                 fp->tx_bd_prod = 0;
4433                 fp->tx_bd_cons = 0;
4434                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4435                 fp->tx_pkt = 0;
4436         }
4437 }
4438
4439 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4440 {
4441         int func = BP_FUNC(bp);
4442
4443         spin_lock_init(&bp->spq_lock);
4444
4445         bp->spq_left = MAX_SPQ_PENDING;
4446         bp->spq_prod_idx = 0;
4447         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4448         bp->spq_prod_bd = bp->spq;
4449         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4450
4451         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4452                U64_LO(bp->spq_mapping));
4453         REG_WR(bp,
4454                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4455                U64_HI(bp->spq_mapping));
4456
4457         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4458                bp->spq_prod_idx);
4459 }
4460
4461 static void bnx2x_init_context(struct bnx2x *bp)
4462 {
4463         int i;
4464
4465         for_each_queue(bp, i) {
4466                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4467                 struct bnx2x_fastpath *fp = &bp->fp[i];
4468                 u8 sb_id = FP_SB_ID(fp);
4469
4470                 context->xstorm_st_context.tx_bd_page_base_hi =
4471                                                 U64_HI(fp->tx_desc_mapping);
4472                 context->xstorm_st_context.tx_bd_page_base_lo =
4473                                                 U64_LO(fp->tx_desc_mapping);
4474                 context->xstorm_st_context.db_data_addr_hi =
4475                                                 U64_HI(fp->tx_prods_mapping);
4476                 context->xstorm_st_context.db_data_addr_lo =
4477                                                 U64_LO(fp->tx_prods_mapping);
4478                 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4479                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4480
4481                 context->ustorm_st_context.common.sb_index_numbers =
4482                                                 BNX2X_RX_SB_INDEX_NUM;
4483                 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4484                 context->ustorm_st_context.common.status_block_id = sb_id;
4485                 context->ustorm_st_context.common.flags =
4486                         USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4487                 context->ustorm_st_context.common.mc_alignment_size = 64;
4488                 context->ustorm_st_context.common.bd_buff_size =
4489                                                 bp->rx_buf_use_size;
4490                 context->ustorm_st_context.common.bd_page_base_hi =
4491                                                 U64_HI(fp->rx_desc_mapping);
4492                 context->ustorm_st_context.common.bd_page_base_lo =
4493                                                 U64_LO(fp->rx_desc_mapping);
4494                 if (!fp->disable_tpa) {
4495                         context->ustorm_st_context.common.flags |=
4496                                 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4497                                  USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4498                         context->ustorm_st_context.common.sge_buff_size =
4499                                         (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4500                         context->ustorm_st_context.common.sge_page_base_hi =
4501                                                 U64_HI(fp->rx_sge_mapping);
4502                         context->ustorm_st_context.common.sge_page_base_lo =
4503                                                 U64_LO(fp->rx_sge_mapping);
4504                 }
4505
4506                 context->cstorm_st_context.sb_index_number =
4507                                                 HC_INDEX_C_ETH_TX_CQ_CONS;
4508                 context->cstorm_st_context.status_block_id = sb_id;
4509
4510                 context->xstorm_ag_context.cdu_reserved =
4511                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4512                                                CDU_REGION_NUMBER_XCM_AG,
4513                                                ETH_CONNECTION_TYPE);
4514                 context->ustorm_ag_context.cdu_usage =
4515                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4516                                                CDU_REGION_NUMBER_UCM_AG,
4517                                                ETH_CONNECTION_TYPE);
4518         }
4519 }
4520
4521 static void bnx2x_init_ind_table(struct bnx2x *bp)
4522 {
4523         int port = BP_PORT(bp);
4524         int i;
4525
4526         if (!is_multi(bp))
4527                 return;
4528
4529         DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
4530         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4531                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4532                         TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
4533                         i % bp->num_queues);
4534
4535         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4536 }
4537
4538 static void bnx2x_set_client_config(struct bnx2x *bp)
4539 {
4540         struct tstorm_eth_client_config tstorm_client = {0};
4541         int port = BP_PORT(bp);
4542         int i;
4543
4544         tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
4545         tstorm_client.statistics_counter_id = 0;
4546         tstorm_client.config_flags =
4547                                 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4548 #ifdef BCM_VLAN
4549         if (bp->rx_mode && bp->vlgrp) {
4550                 tstorm_client.config_flags |=
4551                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4552                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4553         }
4554 #endif
4555
4556         if (bp->flags & TPA_ENABLE_FLAG) {
4557                 tstorm_client.max_sges_for_packet =
4558                         BCM_PAGE_ALIGN(tstorm_client.mtu) >> BCM_PAGE_SHIFT;
4559                 tstorm_client.max_sges_for_packet =
4560                         ((tstorm_client.max_sges_for_packet +
4561                           PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4562                         PAGES_PER_SGE_SHIFT;
4563
4564                 tstorm_client.config_flags |=
4565                                 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4566         }
4567
4568         for_each_queue(bp, i) {
4569                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4570                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4571                        ((u32 *)&tstorm_client)[0]);
4572                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4573                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4574                        ((u32 *)&tstorm_client)[1]);
4575         }
4576
4577         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4578            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4579 }
4580
4581 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4582 {
4583         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4584         int mode = bp->rx_mode;
4585         int mask = (1 << BP_L_ID(bp));
4586         int func = BP_FUNC(bp);
4587         int i;
4588
4589         DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode);
4590
4591         switch (mode) {
4592         case BNX2X_RX_MODE_NONE: /* no Rx */
4593                 tstorm_mac_filter.ucast_drop_all = mask;
4594                 tstorm_mac_filter.mcast_drop_all = mask;
4595                 tstorm_mac_filter.bcast_drop_all = mask;
4596                 break;
4597         case BNX2X_RX_MODE_NORMAL:
4598                 tstorm_mac_filter.bcast_accept_all = mask;
4599                 break;
4600         case BNX2X_RX_MODE_ALLMULTI:
4601                 tstorm_mac_filter.mcast_accept_all = mask;
4602                 tstorm_mac_filter.bcast_accept_all = mask;
4603                 break;
4604         case BNX2X_RX_MODE_PROMISC:
4605                 tstorm_mac_filter.ucast_accept_all = mask;
4606                 tstorm_mac_filter.mcast_accept_all = mask;
4607                 tstorm_mac_filter.bcast_accept_all = mask;
4608                 break;
4609         default:
4610                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4611                 break;
4612         }
4613
4614         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4615                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4616                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4617                        ((u32 *)&tstorm_mac_filter)[i]);
4618
4619 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4620                    ((u32 *)&tstorm_mac_filter)[i]); */
4621         }
4622
4623         if (mode != BNX2X_RX_MODE_NONE)
4624                 bnx2x_set_client_config(bp);
4625 }
4626
4627 static void bnx2x_init_internal(struct bnx2x *bp)
4628 {
4629         struct tstorm_eth_function_common_config tstorm_config = {0};
4630         struct stats_indication_flags stats_flags = {0};
4631         int port = BP_PORT(bp);
4632         int func = BP_FUNC(bp);
4633         int i;
4634
4635         if (is_multi(bp)) {
4636                 tstorm_config.config_flags = MULTI_FLAGS;
4637                 tstorm_config.rss_result_mask = MULTI_MASK;
4638         }
4639
4640         tstorm_config.leading_client_id = BP_L_ID(bp);
4641
4642         REG_WR(bp, BAR_TSTRORM_INTMEM +
4643                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4644                (*(u32 *)&tstorm_config));
4645
4646 /*      DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n",
4647            (*(u32 *)&tstorm_config)); */
4648
4649         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4650         bnx2x_set_storm_rx_mode(bp);
4651
4652         stats_flags.collect_eth = 1;
4653
4654         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port),
4655                ((u32 *)&stats_flags)[0]);
4656         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port) + 4,
4657                ((u32 *)&stats_flags)[1]);
4658
4659         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port),
4660                ((u32 *)&stats_flags)[0]);
4661         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port) + 4,
4662                ((u32 *)&stats_flags)[1]);
4663
4664         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port),
4665                ((u32 *)&stats_flags)[0]);
4666         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port) + 4,
4667                ((u32 *)&stats_flags)[1]);
4668
4669 /*      DP(NETIF_MSG_IFUP, "stats_flags: 0x%08x 0x%08x\n",
4670            ((u32 *)&stats_flags)[0], ((u32 *)&stats_flags)[1]); */
4671
4672         if (CHIP_IS_E1H(bp)) {
4673                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4674                         IS_E1HMF(bp));
4675                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4676                         IS_E1HMF(bp));
4677                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4678                         IS_E1HMF(bp));
4679                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4680                         IS_E1HMF(bp));
4681
4682                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4683                          bp->e1hov);
4684         }
4685
4686         /* Zero this manualy as its initialization is
4687            currently missing in the initTool */
4688         for (i = 0; i < USTORM_AGG_DATA_SIZE >> 2; i++)
4689                 REG_WR(bp, BAR_USTRORM_INTMEM +
4690                        USTORM_AGG_DATA_OFFSET + 4*i, 0);
4691
4692         for_each_queue(bp, i) {
4693                 struct bnx2x_fastpath *fp = &bp->fp[i];
4694                 u16 max_agg_size;
4695
4696                 REG_WR(bp, BAR_USTRORM_INTMEM +
4697                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4698                        U64_LO(fp->rx_comp_mapping));
4699                 REG_WR(bp, BAR_USTRORM_INTMEM +
4700                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4701                        U64_HI(fp->rx_comp_mapping));
4702
4703                 max_agg_size = min((u32)(bp->rx_buf_use_size +
4704                                          8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4705                                    (u32)0xffff);
4706                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4707                          USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4708                          max_agg_size);
4709         }
4710 }
4711
4712 static void bnx2x_nic_init(struct bnx2x *bp)
4713 {
4714         int i;
4715
4716         for_each_queue(bp, i) {
4717                 struct bnx2x_fastpath *fp = &bp->fp[i];
4718
4719                 fp->bp = bp;
4720                 fp->state = BNX2X_FP_STATE_CLOSED;
4721                 fp->index = i;
4722                 fp->cl_id = BP_L_ID(bp) + i;
4723                 fp->sb_id = fp->cl_id;
4724                 DP(NETIF_MSG_IFUP,
4725                    "bnx2x_init_sb(%p,%p) index %d  cl_id %d  sb %d\n",
4726                    bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4727                 bnx2x_init_sb(bp, FP_SB_ID(fp), fp->status_blk,
4728                               fp->status_blk_mapping);
4729         }
4730
4731         bnx2x_init_def_sb(bp, bp->def_status_blk,
4732                           bp->def_status_blk_mapping, DEF_SB_ID);
4733         bnx2x_update_coalesce(bp);
4734         bnx2x_init_rx_rings(bp);
4735         bnx2x_init_tx_ring(bp);
4736         bnx2x_init_sp_ring(bp);
4737         bnx2x_init_context(bp);
4738         bnx2x_init_internal(bp);
4739         bnx2x_storm_stats_init(bp);
4740         bnx2x_init_ind_table(bp);
4741         bnx2x_int_enable(bp);
4742 }
4743
4744 /* end of nic init */
4745
4746 /*
4747  * gzip service functions
4748  */
4749
4750 static int bnx2x_gunzip_init(struct bnx2x *bp)
4751 {
4752         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4753                                               &bp->gunzip_mapping);
4754         if (bp->gunzip_buf  == NULL)
4755                 goto gunzip_nomem1;
4756
4757         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4758         if (bp->strm  == NULL)
4759                 goto gunzip_nomem2;
4760
4761         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4762                                       GFP_KERNEL);
4763         if (bp->strm->workspace == NULL)
4764                 goto gunzip_nomem3;
4765
4766         return 0;
4767
4768 gunzip_nomem3:
4769         kfree(bp->strm);
4770         bp->strm = NULL;
4771
4772 gunzip_nomem2:
4773         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4774                             bp->gunzip_mapping);
4775         bp->gunzip_buf = NULL;
4776
4777 gunzip_nomem1:
4778         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4779                " un-compression\n", bp->dev->name);
4780         return -ENOMEM;
4781 }
4782
4783 static void bnx2x_gunzip_end(struct bnx2x *bp)
4784 {
4785         kfree(bp->strm->workspace);
4786
4787         kfree(bp->strm);
4788         bp->strm = NULL;
4789
4790         if (bp->gunzip_buf) {
4791                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4792                                     bp->gunzip_mapping);
4793                 bp->gunzip_buf = NULL;
4794         }
4795 }
4796
4797 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4798 {
4799         int n, rc;
4800
4801         /* check gzip header */
4802         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4803                 return -EINVAL;
4804
4805         n = 10;
4806
4807 #define FNAME                           0x8
4808
4809         if (zbuf[3] & FNAME)
4810                 while ((zbuf[n++] != 0) && (n < len));
4811
4812         bp->strm->next_in = zbuf + n;
4813         bp->strm->avail_in = len - n;
4814         bp->strm->next_out = bp->gunzip_buf;
4815         bp->strm->avail_out = FW_BUF_SIZE;
4816
4817         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4818         if (rc != Z_OK)
4819                 return rc;
4820
4821         rc = zlib_inflate(bp->strm, Z_FINISH);
4822         if ((rc != Z_OK) && (rc != Z_STREAM_END))
4823                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4824                        bp->dev->name, bp->strm->msg);
4825
4826         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4827         if (bp->gunzip_outlen & 0x3)
4828                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4829                                     " gunzip_outlen (%d) not aligned\n",
4830                        bp->dev->name, bp->gunzip_outlen);
4831         bp->gunzip_outlen >>= 2;
4832
4833         zlib_inflateEnd(bp->strm);
4834
4835         if (rc == Z_STREAM_END)
4836                 return 0;
4837
4838         return rc;
4839 }
4840
4841 /* nic load/unload */
4842
4843 /*
4844  * General service functions
4845  */
4846
4847 /* send a NIG loopback debug packet */
4848 static void bnx2x_lb_pckt(struct bnx2x *bp)
4849 {
4850         u32 wb_write[3];
4851
4852         /* Ethernet source and destination addresses */
4853         wb_write[0] = 0x55555555;
4854         wb_write[1] = 0x55555555;
4855         wb_write[2] = 0x20;             /* SOP */
4856         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4857
4858         /* NON-IP protocol */
4859         wb_write[0] = 0x09000000;
4860         wb_write[1] = 0x55555555;
4861         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
4862         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4863 }
4864
4865 /* some of the internal memories
4866  * are not directly readable from the driver
4867  * to test them we send debug packets
4868  */
4869 static int bnx2x_int_mem_test(struct bnx2x *bp)
4870 {
4871         int factor;
4872         int count, i;
4873         u32 val = 0;
4874
4875         if (CHIP_REV_IS_FPGA(bp))
4876                 factor = 120;
4877         else if (CHIP_REV_IS_EMUL(bp))
4878                 factor = 200;
4879         else
4880                 factor = 1;
4881
4882         DP(NETIF_MSG_HW, "start part1\n");
4883
4884         /* Disable inputs of parser neighbor blocks */
4885         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4886         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4887         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4888         NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4889
4890         /*  Write 0 to parser credits for CFC search request */
4891         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4892
4893         /* send Ethernet packet */
4894         bnx2x_lb_pckt(bp);
4895
4896         /* TODO do i reset NIG statistic? */
4897         /* Wait until NIG register shows 1 packet of size 0x10 */
4898         count = 1000 * factor;
4899         while (count) {
4900
4901                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4902                 val = *bnx2x_sp(bp, wb_data[0]);
4903                 if (val == 0x10)
4904                         break;
4905
4906                 msleep(10);
4907                 count--;
4908         }
4909         if (val != 0x10) {
4910                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
4911                 return -1;
4912         }
4913
4914         /* Wait until PRS register shows 1 packet */
4915         count = 1000 * factor;
4916         while (count) {
4917                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4918                 if (val == 1)
4919                         break;
4920
4921                 msleep(10);
4922                 count--;
4923         }
4924         if (val != 0x1) {
4925                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4926                 return -2;
4927         }
4928
4929         /* Reset and init BRB, PRS */
4930         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4931         msleep(50);
4932         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4933         msleep(50);
4934         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4935         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4936
4937         DP(NETIF_MSG_HW, "part2\n");
4938
4939         /* Disable inputs of parser neighbor blocks */
4940         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4941         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4942         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4943         NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4944
4945         /* Write 0 to parser credits for CFC search request */
4946         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4947
4948         /* send 10 Ethernet packets */
4949         for (i = 0; i < 10; i++)
4950                 bnx2x_lb_pckt(bp);
4951
4952         /* Wait until NIG register shows 10 + 1
4953            packets of size 11*0x10 = 0xb0 */
4954         count = 1000 * factor;
4955         while (count) {
4956
4957                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4958                 val = *bnx2x_sp(bp, wb_data[0]);
4959                 if (val == 0xb0)
4960                         break;
4961
4962                 msleep(10);
4963                 count--;
4964         }
4965         if (val != 0xb0) {
4966                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
4967                 return -3;
4968         }
4969
4970         /* Wait until PRS register shows 2 packets */
4971         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4972         if (val != 2)
4973                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
4974
4975         /* Write 1 to parser credits for CFC search request */
4976         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
4977
4978         /* Wait until PRS register shows 3 packets */
4979         msleep(10 * factor);
4980         /* Wait until NIG register shows 1 packet of size 0x10 */
4981         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4982         if (val != 3)
4983                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
4984
4985         /* clear NIG EOP FIFO */
4986         for (i = 0; i < 11; i++)
4987                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
4988         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
4989         if (val != 1) {
4990                 BNX2X_ERR("clear of NIG failed\n");
4991                 return -4;
4992         }
4993
4994         /* Reset and init BRB, PRS, NIG */
4995         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4996         msleep(50);
4997         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4998         msleep(50);
4999         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5000         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5001 #ifndef BCM_ISCSI
5002         /* set NIC mode */
5003         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5004 #endif
5005
5006         /* Enable inputs of parser neighbor blocks */
5007         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5008         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5009         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5010         NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1);
5011
5012         DP(NETIF_MSG_HW, "done\n");
5013
5014         return 0; /* OK */
5015 }
5016
5017 static void enable_blocks_attention(struct bnx2x *bp)
5018 {
5019         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5020         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5021         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5022         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5023         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5024         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5025         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5026         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5027         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5028 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5029 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5030         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5031         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5032         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5033 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5034 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5035         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5036         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5037         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5038         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5039 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5040 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5041         if (CHIP_REV_IS_FPGA(bp))
5042                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5043         else
5044                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5045         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5046         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5047         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5048 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5049 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5050         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5051         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5052 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5053         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5054 }
5055
5056
5057 static int bnx2x_init_common(struct bnx2x *bp)
5058 {
5059         u32 val, i;
5060
5061         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5062
5063         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5064         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5065
5066         bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5067         if (CHIP_IS_E1H(bp))
5068                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5069
5070         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5071         msleep(30);
5072         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5073
5074         bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5075         if (CHIP_IS_E1(bp)) {
5076                 /* enable HW interrupt from PXP on USDM overflow
5077                    bit 16 on INT_MASK_0 */
5078                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5079         }
5080
5081         bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5082         bnx2x_init_pxp(bp);
5083
5084 #ifdef __BIG_ENDIAN
5085         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5086         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5087         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5088         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5089         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5090         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5091
5092 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5093         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5094         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5095         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5096         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5097 #endif
5098
5099 #ifndef BCM_ISCSI
5100                 /* set NIC mode */
5101                 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5102 #endif
5103
5104         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5105 #ifdef BCM_ISCSI
5106         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5107         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5108         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5109 #endif
5110
5111         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5112                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5113
5114         /* let the HW do it's magic ... */
5115         msleep(100);
5116         /* finish PXP init */
5117         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5118         if (val != 1) {
5119                 BNX2X_ERR("PXP2 CFG failed\n");
5120                 return -EBUSY;
5121         }
5122         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5123         if (val != 1) {
5124                 BNX2X_ERR("PXP2 RD_INIT failed\n");
5125                 return -EBUSY;
5126         }
5127
5128         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5129         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5130
5131         bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5132
5133         /* clean the DMAE memory */
5134         bp->dmae_ready = 1;
5135         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5136
5137         bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5138         bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5139         bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5140         bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5141
5142         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5143         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5144         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5145         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5146
5147         bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5148         /* soft reset pulse */
5149         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5150         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5151
5152 #ifdef BCM_ISCSI
5153         bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5154 #endif
5155
5156         bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5157         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5158         if (!CHIP_REV_IS_SLOW(bp)) {
5159                 /* enable hw interrupt from doorbell Q */
5160                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5161         }
5162
5163         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5164         if (CHIP_REV_IS_SLOW(bp)) {
5165                 /* fix for emulation and FPGA for no pause */
5166                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5167                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5168                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5169                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5170         }
5171
5172         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5173         if (CHIP_IS_E1H(bp))
5174                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5175
5176         bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5177         bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5178         bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5179         bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5180
5181         if (CHIP_IS_E1H(bp)) {
5182                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5183                                 STORM_INTMEM_SIZE_E1H/2);
5184                 bnx2x_init_fill(bp,
5185                                 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5186                                 0, STORM_INTMEM_SIZE_E1H/2);
5187                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5188                                 STORM_INTMEM_SIZE_E1H/2);
5189                 bnx2x_init_fill(bp,
5190                                 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5191                                 0, STORM_INTMEM_SIZE_E1H/2);
5192                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5193                                 STORM_INTMEM_SIZE_E1H/2);
5194                 bnx2x_init_fill(bp,
5195                                 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5196                                 0, STORM_INTMEM_SIZE_E1H/2);
5197                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5198                                 STORM_INTMEM_SIZE_E1H/2);
5199                 bnx2x_init_fill(bp,
5200                                 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5201                                 0, STORM_INTMEM_SIZE_E1H/2);
5202         } else { /* E1 */
5203                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5204                                 STORM_INTMEM_SIZE_E1);
5205                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5206                                 STORM_INTMEM_SIZE_E1);
5207                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5208                                 STORM_INTMEM_SIZE_E1);
5209                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5210                                 STORM_INTMEM_SIZE_E1);
5211         }
5212
5213         bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5214         bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5215         bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5216         bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5217
5218         /* sync semi rtc */
5219         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5220                0x80000000);
5221         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5222                0x80000000);
5223
5224         bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5225         bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5226         bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5227
5228         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5229         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5230                 REG_WR(bp, i, 0xc0cac01a);
5231                 /* TODO: replace with something meaningful */
5232         }
5233         if (CHIP_IS_E1H(bp))
5234                 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5235         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5236
5237         if (sizeof(union cdu_context) != 1024)
5238                 /* we currently assume that a context is 1024 bytes */
5239                 printk(KERN_ALERT PFX "please adjust the size of"
5240                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5241
5242         bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5243         val = (4 << 24) + (0 << 12) + 1024;
5244         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5245         if (CHIP_IS_E1(bp)) {
5246                 /* !!! fix pxp client crdit until excel update */
5247                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5248                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5249         }
5250
5251         bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5252         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5253
5254         bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5255         bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5256
5257         /* PXPCS COMMON comes here */
5258         /* Reset PCIE errors for debug */
5259         REG_WR(bp, 0x2814, 0xffffffff);
5260         REG_WR(bp, 0x3820, 0xffffffff);
5261
5262         /* EMAC0 COMMON comes here */
5263         /* EMAC1 COMMON comes here */
5264         /* DBU COMMON comes here */
5265         /* DBG COMMON comes here */
5266
5267         bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5268         if (CHIP_IS_E1H(bp)) {
5269                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5270                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5271         }
5272
5273         if (CHIP_REV_IS_SLOW(bp))
5274                 msleep(200);
5275
5276         /* finish CFC init */
5277         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5278         if (val != 1) {
5279                 BNX2X_ERR("CFC LL_INIT failed\n");
5280                 return -EBUSY;
5281         }
5282         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5283         if (val != 1) {
5284                 BNX2X_ERR("CFC AC_INIT failed\n");
5285                 return -EBUSY;
5286         }
5287         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5288         if (val != 1) {
5289                 BNX2X_ERR("CFC CAM_INIT failed\n");
5290                 return -EBUSY;
5291         }
5292         REG_WR(bp, CFC_REG_DEBUG0, 0);
5293
5294         /* read NIG statistic
5295            to see if this is our first up since powerup */
5296         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5297         val = *bnx2x_sp(bp, wb_data[0]);
5298
5299         /* do internal memory self test */
5300         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5301                 BNX2X_ERR("internal mem self test failed\n");
5302                 return -EBUSY;
5303         }
5304
5305         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5306         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5307                 /* Fan failure is indicated by SPIO 5 */
5308                 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5309                                MISC_REGISTERS_SPIO_INPUT_HI_Z);
5310
5311                 /* set to active low mode */
5312                 val = REG_RD(bp, MISC_REG_SPIO_INT);
5313                 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5314                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5315                 REG_WR(bp, MISC_REG_SPIO_INT, val);
5316
5317                 /* enable interrupt to signal the IGU */
5318                 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5319                 val |= (1 << MISC_REGISTERS_SPIO_5);
5320                 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5321                 break;
5322
5323         default:
5324                 break;
5325         }
5326
5327         /* clear PXP2 attentions */
5328         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5329
5330         enable_blocks_attention(bp);
5331
5332         if (bp->flags & TPA_ENABLE_FLAG) {
5333                 struct tstorm_eth_tpa_exist tmp = {0};
5334
5335                 tmp.tpa_exist = 1;
5336
5337                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
5338                        ((u32 *)&tmp)[0]);
5339                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
5340                        ((u32 *)&tmp)[1]);
5341         }
5342
5343         return 0;
5344 }
5345
5346 static int bnx2x_init_port(struct bnx2x *bp)
5347 {
5348         int port = BP_PORT(bp);
5349         u32 val;
5350
5351         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
5352
5353         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5354
5355         /* Port PXP comes here */
5356         /* Port PXP2 comes here */
5357 #ifdef BCM_ISCSI
5358         /* Port0  1
5359          * Port1  385 */
5360         i++;
5361         wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5362         wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5363         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5364         REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5365
5366         /* Port0  2
5367          * Port1  386 */
5368         i++;
5369         wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5370         wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5371         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5372         REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5373
5374         /* Port0  3
5375          * Port1  387 */
5376         i++;
5377         wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5378         wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5379         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5380         REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5381 #endif
5382         /* Port CMs come here */
5383
5384         /* Port QM comes here */
5385 #ifdef BCM_ISCSI
5386         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5387         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5388
5389         bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5390                              func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5391 #endif
5392         /* Port DQ comes here */
5393         /* Port BRB1 comes here */
5394         /* Port PRS comes here */
5395         /* Port TSDM comes here */
5396         /* Port CSDM comes here */
5397         /* Port USDM comes here */
5398         /* Port XSDM comes here */
5399         bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5400                              port ? TSEM_PORT1_END : TSEM_PORT0_END);
5401         bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5402                              port ? USEM_PORT1_END : USEM_PORT0_END);
5403         bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5404                              port ? CSEM_PORT1_END : CSEM_PORT0_END);
5405         bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5406                              port ? XSEM_PORT1_END : XSEM_PORT0_END);
5407         /* Port UPB comes here */
5408         /* Port XPB comes here */
5409
5410         bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5411                              port ? PBF_PORT1_END : PBF_PORT0_END);
5412
5413         /* configure PBF to work without PAUSE mtu 9000 */
5414         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5415
5416         /* update threshold */
5417         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5418         /* update init credit */
5419         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5420
5421         /* probe changes */
5422         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5423         msleep(5);
5424         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5425
5426 #ifdef BCM_ISCSI
5427         /* tell the searcher where the T2 table is */
5428         REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5429
5430         wb_write[0] = U64_LO(bp->t2_mapping);
5431         wb_write[1] = U64_HI(bp->t2_mapping);
5432         REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5433         wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5434         wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5435         REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5436
5437         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5438         /* Port SRCH comes here */
5439 #endif
5440         /* Port CDU comes here */
5441         /* Port CFC comes here */
5442
5443         if (CHIP_IS_E1(bp)) {
5444                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5445                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5446         }
5447         bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5448                              port ? HC_PORT1_END : HC_PORT0_END);
5449
5450         bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5451                                     MISC_AEU_PORT0_START,
5452                              port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5453         /* init aeu_mask_attn_func_0/1:
5454          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5455          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5456          *             bits 4-7 are used for "per vn group attention" */
5457         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5458                (IS_E1HMF(bp) ? 0xF7 : 0x7));
5459
5460         /* Port PXPCS comes here */
5461         /* Port EMAC0 comes here */
5462         /* Port EMAC1 comes here */
5463         /* Port DBU comes here */
5464         /* Port DBG comes here */
5465         bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5466                              port ? NIG_PORT1_END : NIG_PORT0_END);
5467
5468         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5469
5470         if (CHIP_IS_E1H(bp)) {
5471                 u32 wsum;
5472                 struct cmng_struct_per_port m_cmng_port;
5473                 int vn;
5474
5475                 /* 0x2 disable e1hov, 0x1 enable */
5476                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5477                        (IS_E1HMF(bp) ? 0x1 : 0x2));
5478
5479                 /* Init RATE SHAPING and FAIRNESS contexts.
5480                    Initialize as if there is 10G link. */
5481                 wsum = bnx2x_calc_vn_wsum(bp);
5482                 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5483                 if (IS_E1HMF(bp))
5484                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
5485                                 bnx2x_init_vn_minmax(bp, 2*vn + port,
5486                                         wsum, 10000, &m_cmng_port);
5487         }
5488
5489         /* Port MCP comes here */
5490         /* Port DMAE comes here */
5491
5492         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5493         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5494                 /* add SPIO 5 to group 0 */
5495                 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5496                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5497                 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5498                 break;
5499
5500         default:
5501                 break;
5502         }
5503
5504         bnx2x__link_reset(bp);
5505
5506         return 0;
5507 }
5508
5509 #define ILT_PER_FUNC            (768/2)
5510 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
5511 /* the phys address is shifted right 12 bits and has an added
5512    1=valid bit added to the 53rd bit
5513    then since this is a wide register(TM)
5514    we split it into two 32 bit writes
5515  */
5516 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5517 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
5518 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
5519 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
5520
5521 #define CNIC_ILT_LINES          0
5522
5523 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5524 {
5525         int reg;
5526
5527         if (CHIP_IS_E1H(bp))
5528                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5529         else /* E1 */
5530                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5531
5532         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5533 }
5534
5535 static int bnx2x_init_func(struct bnx2x *bp)
5536 {
5537         int port = BP_PORT(bp);
5538         int func = BP_FUNC(bp);
5539         int i;
5540
5541         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
5542
5543         i = FUNC_ILT_BASE(func);
5544
5545         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5546         if (CHIP_IS_E1H(bp)) {
5547                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5548                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5549         } else /* E1 */
5550                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5551                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5552
5553
5554         if (CHIP_IS_E1H(bp)) {
5555                 for (i = 0; i < 9; i++)
5556                         bnx2x_init_block(bp,
5557                                          cm_start[func][i], cm_end[func][i]);
5558
5559                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5560                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5561         }
5562
5563         /* HC init per function */
5564         if (CHIP_IS_E1H(bp)) {
5565                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5566
5567                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5568                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5569         }
5570         bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5571
5572         if (CHIP_IS_E1H(bp))
5573                 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5574
5575         /* Reset PCIE errors for debug */
5576         REG_WR(bp, 0x2114, 0xffffffff);
5577         REG_WR(bp, 0x2120, 0xffffffff);
5578
5579         return 0;
5580 }
5581
5582 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5583 {
5584         int i, rc = 0;
5585
5586         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
5587            BP_FUNC(bp), load_code);
5588
5589         bp->dmae_ready = 0;
5590         mutex_init(&bp->dmae_mutex);
5591         bnx2x_gunzip_init(bp);
5592
5593         switch (load_code) {
5594         case FW_MSG_CODE_DRV_LOAD_COMMON:
5595                 rc = bnx2x_init_common(bp);
5596                 if (rc)
5597                         goto init_hw_err;
5598                 /* no break */
5599
5600         case FW_MSG_CODE_DRV_LOAD_PORT:
5601                 bp->dmae_ready = 1;
5602                 rc = bnx2x_init_port(bp);
5603                 if (rc)
5604                         goto init_hw_err;
5605                 /* no break */
5606
5607         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5608                 bp->dmae_ready = 1;
5609                 rc = bnx2x_init_func(bp);
5610                 if (rc)
5611                         goto init_hw_err;
5612                 break;
5613
5614         default:
5615                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5616                 break;
5617         }
5618
5619         if (!BP_NOMCP(bp)) {
5620                 int func = BP_FUNC(bp);
5621
5622                 bp->fw_drv_pulse_wr_seq =
5623                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5624                                  DRV_PULSE_SEQ_MASK);
5625                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5626                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x  func_stx 0x%x\n",
5627                    bp->fw_drv_pulse_wr_seq, bp->func_stx);
5628         } else
5629                 bp->func_stx = 0;
5630
5631         /* this needs to be done before gunzip end */
5632         bnx2x_zero_def_sb(bp);
5633         for_each_queue(bp, i)
5634                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5635
5636 init_hw_err:
5637         bnx2x_gunzip_end(bp);
5638
5639         return rc;
5640 }
5641
5642 /* send the MCP a request, block until there is a reply */
5643 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5644 {
5645         int func = BP_FUNC(bp);
5646         u32 seq = ++bp->fw_seq;
5647         u32 rc = 0;
5648         u32 cnt = 1;
5649         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5650
5651         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5652         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5653
5654         do {
5655                 /* let the FW do it's magic ... */
5656                 msleep(delay);
5657
5658                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5659
5660                 /* Give the FW up to 2 second (200*10ms) */
5661         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5662
5663         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5664            cnt*delay, rc, seq);
5665
5666         /* is this a reply to our command? */
5667         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5668                 rc &= FW_MSG_CODE_MASK;
5669
5670         } else {
5671                 /* FW BUG! */
5672                 BNX2X_ERR("FW failed to respond!\n");
5673                 bnx2x_fw_dump(bp);
5674                 rc = 0;
5675         }
5676
5677         return rc;
5678 }
5679
5680 static void bnx2x_free_mem(struct bnx2x *bp)
5681 {
5682
5683 #define BNX2X_PCI_FREE(x, y, size) \
5684         do { \
5685                 if (x) { \
5686                         pci_free_consistent(bp->pdev, size, x, y); \
5687                         x = NULL; \
5688                         y = 0; \
5689                 } \
5690         } while (0)
5691
5692 #define BNX2X_FREE(x) \
5693         do { \
5694                 if (x) { \
5695                         vfree(x); \
5696                         x = NULL; \
5697                 } \
5698         } while (0)
5699
5700         int i;
5701
5702         /* fastpath */
5703         for_each_queue(bp, i) {
5704
5705                 /* Status blocks */
5706                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5707                                bnx2x_fp(bp, i, status_blk_mapping),
5708                                sizeof(struct host_status_block) +
5709                                sizeof(struct eth_tx_db_data));
5710
5711                 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5712                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5713                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5714                                bnx2x_fp(bp, i, tx_desc_mapping),
5715                                sizeof(struct eth_tx_bd) * NUM_TX_BD);
5716
5717                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5718                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5719                                bnx2x_fp(bp, i, rx_desc_mapping),
5720                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
5721
5722                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5723                                bnx2x_fp(bp, i, rx_comp_mapping),
5724                                sizeof(struct eth_fast_path_rx_cqe) *
5725                                NUM_RCQ_BD);
5726
5727                 /* SGE ring */
5728                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5729                                bnx2x_fp(bp, i, rx_sge_mapping),
5730                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5731         }
5732         /* end of fastpath */
5733
5734         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5735                        sizeof(struct host_def_status_block));
5736
5737         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5738                        sizeof(struct bnx2x_slowpath));
5739
5740 #ifdef BCM_ISCSI
5741         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5742         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5743         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5744         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5745 #endif
5746         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5747
5748 #undef BNX2X_PCI_FREE
5749 #undef BNX2X_KFREE
5750 }
5751
5752 static int bnx2x_alloc_mem(struct bnx2x *bp)
5753 {
5754
5755 #define BNX2X_PCI_ALLOC(x, y, size) \
5756         do { \
5757                 x = pci_alloc_consistent(bp->pdev, size, y); \
5758                 if (x == NULL) \
5759                         goto alloc_mem_err; \
5760                 memset(x, 0, size); \
5761         } while (0)
5762
5763 #define BNX2X_ALLOC(x, size) \
5764         do { \
5765                 x = vmalloc(size); \
5766                 if (x == NULL) \
5767                         goto alloc_mem_err; \
5768                 memset(x, 0, size); \
5769         } while (0)
5770
5771         int i;
5772
5773         /* fastpath */
5774         for_each_queue(bp, i) {
5775                 bnx2x_fp(bp, i, bp) = bp;
5776
5777                 /* Status blocks */
5778                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5779                                 &bnx2x_fp(bp, i, status_blk_mapping),
5780                                 sizeof(struct host_status_block) +
5781                                 sizeof(struct eth_tx_db_data));
5782
5783                 bnx2x_fp(bp, i, hw_tx_prods) =
5784                                 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5785
5786                 bnx2x_fp(bp, i, tx_prods_mapping) =
5787                                 bnx2x_fp(bp, i, status_blk_mapping) +
5788                                 sizeof(struct host_status_block);
5789
5790                 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5791                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5792                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5793                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5794                                 &bnx2x_fp(bp, i, tx_desc_mapping),
5795                                 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5796
5797                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5798                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5799                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5800                                 &bnx2x_fp(bp, i, rx_desc_mapping),
5801                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5802
5803                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5804                                 &bnx2x_fp(bp, i, rx_comp_mapping),
5805                                 sizeof(struct eth_fast_path_rx_cqe) *
5806                                 NUM_RCQ_BD);
5807
5808                 /* SGE ring */
5809                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5810                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5811                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5812                                 &bnx2x_fp(bp, i, rx_sge_mapping),
5813                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5814         }
5815         /* end of fastpath */
5816
5817         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5818                         sizeof(struct host_def_status_block));
5819
5820         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5821                         sizeof(struct bnx2x_slowpath));
5822
5823 #ifdef BCM_ISCSI
5824         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5825
5826         /* Initialize T1 */
5827         for (i = 0; i < 64*1024; i += 64) {
5828                 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5829                 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5830         }
5831
5832         /* allocate searcher T2 table
5833            we allocate 1/4 of alloc num for T2
5834           (which is not entered into the ILT) */
5835         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5836
5837         /* Initialize T2 */
5838         for (i = 0; i < 16*1024; i += 64)
5839                 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5840
5841         /* now fixup the last line in the block to point to the next block */
5842         *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5843
5844         /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5845         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5846
5847         /* QM queues (128*MAX_CONN) */
5848         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5849 #endif
5850
5851         /* Slow path ring */
5852         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5853
5854         return 0;
5855
5856 alloc_mem_err:
5857         bnx2x_free_mem(bp);
5858         return -ENOMEM;
5859
5860 #undef BNX2X_PCI_ALLOC
5861 #undef BNX2X_ALLOC
5862 }
5863
5864 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5865 {
5866         int i;
5867
5868         for_each_queue(bp, i) {
5869                 struct bnx2x_fastpath *fp = &bp->fp[i];
5870
5871                 u16 bd_cons = fp->tx_bd_cons;
5872                 u16 sw_prod = fp->tx_pkt_prod;
5873                 u16 sw_cons = fp->tx_pkt_cons;
5874
5875                 while (sw_cons != sw_prod) {
5876                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5877                         sw_cons++;
5878                 }
5879         }
5880 }
5881
5882 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5883 {
5884         int i, j;
5885
5886         for_each_queue(bp, j) {
5887                 struct bnx2x_fastpath *fp = &bp->fp[j];
5888
5889                 for (i = 0; i < NUM_RX_BD; i++) {
5890                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5891                         struct sk_buff *skb = rx_buf->skb;
5892
5893                         if (skb == NULL)
5894                                 continue;
5895
5896                         pci_unmap_single(bp->pdev,
5897                                          pci_unmap_addr(rx_buf, mapping),
5898                                          bp->rx_buf_use_size,
5899                                          PCI_DMA_FROMDEVICE);
5900
5901                         rx_buf->skb = NULL;
5902                         dev_kfree_skb(skb);
5903                 }
5904                 if (!fp->disable_tpa)
5905                         bnx2x_free_tpa_pool(bp, fp,
5906                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
5907         }
5908 }
5909
5910 static void bnx2x_free_skbs(struct bnx2x *bp)
5911 {
5912         bnx2x_free_tx_skbs(bp);
5913         bnx2x_free_rx_skbs(bp);
5914 }
5915
5916 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5917 {
5918         int i, offset = 1;
5919
5920         free_irq(bp->msix_table[0].vector, bp->dev);
5921         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
5922            bp->msix_table[0].vector);
5923
5924         for_each_queue(bp, i) {
5925                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
5926                    "state %x\n", i, bp->msix_table[i + offset].vector,
5927                    bnx2x_fp(bp, i, state));
5928
5929                 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
5930                         BNX2X_ERR("IRQ of fp #%d being freed while "
5931                                   "state != closed\n", i);
5932
5933                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
5934         }
5935 }
5936
5937 static void bnx2x_free_irq(struct bnx2x *bp)
5938 {
5939         if (bp->flags & USING_MSIX_FLAG) {
5940                 bnx2x_free_msix_irqs(bp);
5941                 pci_disable_msix(bp->pdev);
5942                 bp->flags &= ~USING_MSIX_FLAG;
5943
5944         } else
5945                 free_irq(bp->pdev->irq, bp->dev);
5946 }
5947
5948 static int bnx2x_enable_msix(struct bnx2x *bp)
5949 {
5950         int i, rc, offset;
5951
5952         bp->msix_table[0].entry = 0;
5953         offset = 1;
5954         DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
5955
5956         for_each_queue(bp, i) {
5957                 int igu_vec = offset + i + BP_L_ID(bp);
5958
5959                 bp->msix_table[i + offset].entry = igu_vec;
5960                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
5961                    "(fastpath #%u)\n", i + offset, igu_vec, i);
5962         }
5963
5964         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
5965                              bp->num_queues + offset);
5966         if (rc) {
5967                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
5968                 return -1;
5969         }
5970         bp->flags |= USING_MSIX_FLAG;
5971
5972         return 0;
5973 }
5974
5975 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
5976 {
5977         int i, rc, offset = 1;
5978
5979         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
5980                          bp->dev->name, bp->dev);
5981         if (rc) {
5982                 BNX2X_ERR("request sp irq failed\n");
5983                 return -EBUSY;
5984         }
5985
5986         for_each_queue(bp, i) {
5987                 rc = request_irq(bp->msix_table[i + offset].vector,
5988                                  bnx2x_msix_fp_int, 0,
5989                                  bp->dev->name, &bp->fp[i]);
5990                 if (rc) {
5991                         BNX2X_ERR("request fp #%d irq failed  rc %d\n",
5992                                   i + offset, rc);
5993                         bnx2x_free_msix_irqs(bp);
5994                         return -EBUSY;
5995                 }
5996
5997                 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
5998         }
5999
6000         return 0;
6001 }
6002
6003 static int bnx2x_req_irq(struct bnx2x *bp)
6004 {
6005         int rc;
6006
6007         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6008                          bp->dev->name, bp->dev);
6009         if (!rc)
6010                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6011
6012         return rc;
6013 }
6014
6015 /*
6016  * Init service functions
6017  */
6018
6019 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp)
6020 {
6021         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6022         int port = BP_PORT(bp);
6023
6024         /* CAM allocation
6025          * unicasts 0-31:port0 32-63:port1
6026          * multicast 64-127:port0 128-191:port1
6027          */
6028         config->hdr.length_6b = 2;
6029         config->hdr.offset = port ? 31 : 0;
6030         config->hdr.client_id = BP_CL_ID(bp);
6031         config->hdr.reserved1 = 0;
6032
6033         /* primary MAC */
6034         config->config_table[0].cam_entry.msb_mac_addr =
6035                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6036         config->config_table[0].cam_entry.middle_mac_addr =
6037                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6038         config->config_table[0].cam_entry.lsb_mac_addr =
6039                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6040         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6041         config->config_table[0].target_table_entry.flags = 0;
6042         config->config_table[0].target_table_entry.client_id = 0;
6043         config->config_table[0].target_table_entry.vlan_id = 0;
6044
6045         DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)\n",
6046            config->config_table[0].cam_entry.msb_mac_addr,
6047            config->config_table[0].cam_entry.middle_mac_addr,
6048            config->config_table[0].cam_entry.lsb_mac_addr);
6049
6050         /* broadcast */
6051         config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6052         config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6053         config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6054         config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6055         config->config_table[1].target_table_entry.flags =
6056                                 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6057         config->config_table[1].target_table_entry.client_id = 0;
6058         config->config_table[1].target_table_entry.vlan_id = 0;
6059
6060         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6061                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6062                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6063 }
6064
6065 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp)
6066 {
6067         struct mac_configuration_cmd_e1h *config =
6068                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6069
6070         if (bp->state != BNX2X_STATE_OPEN) {
6071                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6072                 return;
6073         }
6074
6075         /* CAM allocation for E1H
6076          * unicasts: by func number
6077          * multicast: 20+FUNC*20, 20 each
6078          */
6079         config->hdr.length_6b = 1;
6080         config->hdr.offset = BP_FUNC(bp);
6081         config->hdr.client_id = BP_CL_ID(bp);
6082         config->hdr.reserved1 = 0;
6083
6084         /* primary MAC */
6085         config->config_table[0].msb_mac_addr =
6086                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6087         config->config_table[0].middle_mac_addr =
6088                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6089         config->config_table[0].lsb_mac_addr =
6090                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6091         config->config_table[0].client_id = BP_L_ID(bp);
6092         config->config_table[0].vlan_id = 0;
6093         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6094         config->config_table[0].flags = BP_PORT(bp);
6095
6096         DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)  E1HOV %d  CLID %d\n",
6097            config->config_table[0].msb_mac_addr,
6098            config->config_table[0].middle_mac_addr,
6099            config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6100
6101         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6102                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6103                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6104 }
6105
6106 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6107                              int *state_p, int poll)
6108 {
6109         /* can take a while if any port is running */
6110         int cnt = 500;
6111
6112         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6113            poll ? "polling" : "waiting", state, idx);
6114
6115         might_sleep();
6116         while (cnt--) {
6117                 if (poll) {
6118                         bnx2x_rx_int(bp->fp, 10);
6119                         /* if index is different from 0
6120                          * the reply for some commands will
6121                          * be on the none default queue
6122                          */
6123                         if (idx)
6124                                 bnx2x_rx_int(&bp->fp[idx], 10);
6125                 }
6126                 mb(); /* state is changed by bnx2x_sp_event() */
6127
6128                 if (*state_p == state)
6129                         return 0;
6130
6131                 msleep(1);
6132         }
6133
6134         /* timeout! */
6135         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6136                   poll ? "polling" : "waiting", state, idx);
6137 #ifdef BNX2X_STOP_ON_ERROR
6138         bnx2x_panic();
6139 #endif
6140
6141         return -EBUSY;
6142 }
6143
6144 static int bnx2x_setup_leading(struct bnx2x *bp)
6145 {
6146         int rc;
6147
6148         /* reset IGU state */
6149         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6150
6151         /* SETUP ramrod */
6152         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6153
6154         /* Wait for completion */
6155         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6156
6157         return rc;
6158 }
6159
6160 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6161 {
6162         /* reset IGU state */
6163         bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6164
6165         /* SETUP ramrod */
6166         bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6167         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6168
6169         /* Wait for completion */
6170         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6171                                  &(bp->fp[index].state), 0);
6172 }
6173
6174 static int bnx2x_poll(struct napi_struct *napi, int budget);
6175 static void bnx2x_set_rx_mode(struct net_device *dev);
6176
6177 /* must be called with rtnl_lock */
6178 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6179 {
6180         u32 load_code;
6181         int i, rc;
6182
6183 #ifdef BNX2X_STOP_ON_ERROR
6184         if (unlikely(bp->panic))
6185                 return -EPERM;
6186 #endif
6187
6188         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6189
6190         /* Send LOAD_REQUEST command to MCP
6191            Returns the type of LOAD command:
6192            if it is the first port to be initialized
6193            common blocks should be initialized, otherwise - not
6194         */
6195         if (!BP_NOMCP(bp)) {
6196                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6197                 if (!load_code) {
6198                         BNX2X_ERR("MCP response failure, unloading\n");
6199                         return -EBUSY;
6200                 }
6201                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
6202                         return -EBUSY; /* other port in diagnostic mode */
6203
6204         } else {
6205                 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6206                    load_count[0], load_count[1], load_count[2]);
6207                 load_count[0]++;
6208                 load_count[1 + BP_PORT(bp)]++;
6209                 DP(NETIF_MSG_IFUP, "NO MCP new load counts       %d, %d, %d\n",
6210                    load_count[0], load_count[1], load_count[2]);
6211                 if (load_count[0] == 1)
6212                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6213                 else if (load_count[1 + BP_PORT(bp)] == 1)
6214                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6215                 else
6216                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6217         }
6218
6219         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6220             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6221                 bp->port.pmf = 1;
6222         else
6223                 bp->port.pmf = 0;
6224         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6225
6226         /* if we can't use MSI-X we only need one fp,
6227          * so try to enable MSI-X with the requested number of fp's
6228          * and fallback to inta with one fp
6229          */
6230         if (use_inta) {
6231                 bp->num_queues = 1;
6232
6233         } else {
6234                 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6235                         /* user requested number */
6236                         bp->num_queues = use_multi;
6237
6238                 else if (use_multi)
6239                         bp->num_queues = min_t(u32, num_online_cpus(),
6240                                                BP_MAX_QUEUES(bp));
6241                 else
6242                         bp->num_queues = 1;
6243
6244                 if (bnx2x_enable_msix(bp)) {
6245                         /* failed to enable MSI-X */
6246                         bp->num_queues = 1;
6247                         if (use_multi)
6248                                 BNX2X_ERR("Multi requested but failed"
6249                                           " to enable MSI-X\n");
6250                 }
6251         }
6252         DP(NETIF_MSG_IFUP,
6253            "set number of queues to %d\n", bp->num_queues);
6254
6255         if (bnx2x_alloc_mem(bp))
6256                 return -ENOMEM;
6257
6258         for_each_queue(bp, i)
6259                 bnx2x_fp(bp, i, disable_tpa) =
6260                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
6261
6262         /* Disable interrupt handling until HW is initialized */
6263         atomic_set(&bp->intr_sem, 1);
6264
6265         if (bp->flags & USING_MSIX_FLAG) {
6266                 rc = bnx2x_req_msix_irqs(bp);
6267                 if (rc) {
6268                         pci_disable_msix(bp->pdev);
6269                         goto load_error;
6270                 }
6271         } else {
6272                 bnx2x_ack_int(bp);
6273                 rc = bnx2x_req_irq(bp);
6274                 if (rc) {
6275                         BNX2X_ERR("IRQ request failed, aborting\n");
6276                         goto load_error;
6277                 }
6278         }
6279
6280         for_each_queue(bp, i)
6281                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6282                                bnx2x_poll, 128);
6283
6284         /* Initialize HW */
6285         rc = bnx2x_init_hw(bp, load_code);
6286         if (rc) {
6287                 BNX2X_ERR("HW init failed, aborting\n");
6288                 goto load_error;
6289         }
6290
6291         /* Enable interrupt handling */
6292         atomic_set(&bp->intr_sem, 0);
6293
6294         /* Setup NIC internals and enable interrupts */
6295         bnx2x_nic_init(bp);
6296
6297         /* Send LOAD_DONE command to MCP */
6298         if (!BP_NOMCP(bp)) {
6299                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6300                 if (!load_code) {
6301                         BNX2X_ERR("MCP response failure, unloading\n");
6302                         rc = -EBUSY;
6303                         goto load_int_disable;
6304                 }
6305         }
6306
6307         bnx2x_stats_init(bp);
6308
6309         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6310
6311         /* Enable Rx interrupt handling before sending the ramrod
6312            as it's completed on Rx FP queue */
6313         for_each_queue(bp, i)
6314                 napi_enable(&bnx2x_fp(bp, i, napi));
6315
6316         rc = bnx2x_setup_leading(bp);
6317         if (rc) {
6318 #ifdef BNX2X_STOP_ON_ERROR
6319                 bp->panic = 1;
6320 #endif
6321                 goto load_stop_netif;
6322         }
6323
6324         if (CHIP_IS_E1H(bp))
6325                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6326                         BNX2X_ERR("!!!  mf_cfg function disabled\n");
6327                         bp->state = BNX2X_STATE_DISABLED;
6328                 }
6329
6330         if (bp->state == BNX2X_STATE_OPEN)
6331                 for_each_nondefault_queue(bp, i) {
6332                         rc = bnx2x_setup_multi(bp, i);
6333                         if (rc)
6334                                 goto load_stop_netif;
6335                 }
6336
6337         if (CHIP_IS_E1(bp))
6338                 bnx2x_set_mac_addr_e1(bp);
6339         else
6340                 bnx2x_set_mac_addr_e1h(bp);
6341
6342         if (bp->port.pmf)
6343                 bnx2x_initial_phy_init(bp);
6344
6345         /* Start fast path */
6346         switch (load_mode) {
6347         case LOAD_NORMAL:
6348                 /* Tx queue should be only reenabled */
6349                 netif_wake_queue(bp->dev);
6350                 bnx2x_set_rx_mode(bp->dev);
6351                 break;
6352
6353         case LOAD_OPEN:
6354                 /* IRQ is only requested from bnx2x_open */
6355                 netif_start_queue(bp->dev);
6356                 bnx2x_set_rx_mode(bp->dev);
6357                 if (bp->flags & USING_MSIX_FLAG)
6358                         printk(KERN_INFO PFX "%s: using MSI-X\n",
6359                                bp->dev->name);
6360                 break;
6361
6362         case LOAD_DIAG:
6363                 bnx2x_set_rx_mode(bp->dev);
6364                 bp->state = BNX2X_STATE_DIAG;
6365                 break;
6366
6367         default:
6368                 break;
6369         }
6370
6371         if (!bp->port.pmf)
6372                 bnx2x__link_status_update(bp);
6373
6374         /* start the timer */
6375         mod_timer(&bp->timer, jiffies + bp->current_interval);
6376
6377
6378         return 0;
6379
6380 load_stop_netif:
6381         for_each_queue(bp, i)
6382                 napi_disable(&bnx2x_fp(bp, i, napi));
6383
6384 load_int_disable:
6385         bnx2x_int_disable_sync(bp);
6386
6387         /* Release IRQs */
6388         bnx2x_free_irq(bp);
6389
6390         /* Free SKBs, SGEs, TPA pool and driver internals */
6391         bnx2x_free_skbs(bp);
6392         for_each_queue(bp, i)
6393                 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6394                                         RX_SGE_CNT*NUM_RX_SGE_PAGES);
6395 load_error:
6396         bnx2x_free_mem(bp);
6397
6398         /* TBD we really need to reset the chip
6399            if we want to recover from this */
6400         return rc;
6401 }
6402
6403 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6404 {
6405         int rc;
6406
6407         /* halt the connection */
6408         bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6409         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
6410
6411         /* Wait for completion */
6412         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6413                                &(bp->fp[index].state), 1);
6414         if (rc) /* timeout */
6415                 return rc;
6416
6417         /* delete cfc entry */
6418         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6419
6420         /* Wait for completion */
6421         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6422                                &(bp->fp[index].state), 1);
6423         return rc;
6424 }
6425
6426 static void bnx2x_stop_leading(struct bnx2x *bp)
6427 {
6428         u16 dsb_sp_prod_idx;
6429         /* if the other port is handling traffic,
6430            this can take a lot of time */
6431         int cnt = 500;
6432         int rc;
6433
6434         might_sleep();
6435
6436         /* Send HALT ramrod */
6437         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6438         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
6439
6440         /* Wait for completion */
6441         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6442                                &(bp->fp[0].state), 1);
6443         if (rc) /* timeout */
6444                 return;
6445
6446         dsb_sp_prod_idx = *bp->dsb_sp_prod;
6447
6448         /* Send PORT_DELETE ramrod */
6449         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6450
6451         /* Wait for completion to arrive on default status block
6452            we are going to reset the chip anyway
6453            so there is not much to do if this times out
6454          */
6455         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6456                 msleep(1);
6457                 if (!cnt) {
6458                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6459                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6460                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
6461 #ifdef BNX2X_STOP_ON_ERROR
6462                         bnx2x_panic();
6463 #endif
6464                         break;
6465                 }
6466                 cnt--;
6467         }
6468         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6469         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6470 }
6471
6472 static void bnx2x_reset_func(struct bnx2x *bp)
6473 {
6474         int port = BP_PORT(bp);
6475         int func = BP_FUNC(bp);
6476         int base, i;
6477
6478         /* Configure IGU */
6479         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6480         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6481
6482         REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6483
6484         /* Clear ILT */
6485         base = FUNC_ILT_BASE(func);
6486         for (i = base; i < base + ILT_PER_FUNC; i++)
6487                 bnx2x_ilt_wr(bp, i, 0);
6488 }
6489
6490 static void bnx2x_reset_port(struct bnx2x *bp)
6491 {
6492         int port = BP_PORT(bp);
6493         u32 val;
6494
6495         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6496
6497         /* Do not rcv packets to BRB */
6498         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6499         /* Do not direct rcv packets that are not for MCP to the BRB */
6500         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6501                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6502
6503         /* Configure AEU */
6504         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6505
6506         msleep(100);
6507         /* Check for BRB port occupancy */
6508         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6509         if (val)
6510                 DP(NETIF_MSG_IFDOWN,
6511                    "BRB1 is not empty  %d blooks are occupied\n", val);
6512
6513         /* TODO: Close Doorbell port? */
6514 }
6515
6516 static void bnx2x_reset_common(struct bnx2x *bp)
6517 {
6518         /* reset_common */
6519         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6520                0xd3ffff7f);
6521         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6522 }
6523
6524 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6525 {
6526         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
6527            BP_FUNC(bp), reset_code);
6528
6529         switch (reset_code) {
6530         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6531                 bnx2x_reset_port(bp);
6532                 bnx2x_reset_func(bp);
6533                 bnx2x_reset_common(bp);
6534                 break;
6535
6536         case FW_MSG_CODE_DRV_UNLOAD_PORT:
6537                 bnx2x_reset_port(bp);
6538                 bnx2x_reset_func(bp);
6539                 break;
6540
6541         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6542                 bnx2x_reset_func(bp);
6543                 break;
6544
6545         default:
6546                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6547                 break;
6548         }
6549 }
6550
6551 /* msut be called with rtnl_lock */
6552 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6553 {
6554         u32 reset_code = 0;
6555         int i, cnt;
6556
6557         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6558
6559         bp->rx_mode = BNX2X_RX_MODE_NONE;
6560         bnx2x_set_storm_rx_mode(bp);
6561
6562         if (netif_running(bp->dev)) {
6563                 netif_tx_disable(bp->dev);
6564                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6565         }
6566
6567         del_timer_sync(&bp->timer);
6568         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6569                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6570         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6571
6572         /* Wait until all fast path tasks complete */
6573         for_each_queue(bp, i) {
6574                 struct bnx2x_fastpath *fp = &bp->fp[i];
6575
6576 #ifdef BNX2X_STOP_ON_ERROR
6577 #ifdef __powerpc64__
6578                 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
6579 #else
6580                 DP(NETIF_MSG_IFDOWN, "fp->tpa_queue_used = 0x%llx\n",
6581 #endif
6582                    fp->tpa_queue_used);
6583 #endif
6584                 cnt = 1000;
6585                 smp_rmb();
6586                 while (bnx2x_has_work(fp)) {
6587                         msleep(1);
6588                         if (!cnt) {
6589                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
6590                                           i);
6591 #ifdef BNX2X_STOP_ON_ERROR
6592                                 bnx2x_panic();
6593                                 return -EBUSY;
6594 #else
6595                                 break;
6596 #endif
6597                         }
6598                         cnt--;
6599                         smp_rmb();
6600                 }
6601         }
6602
6603         /* Wait until all slow path tasks complete */
6604         cnt = 1000;
6605         while ((bp->spq_left != MAX_SPQ_PENDING) && cnt--)
6606                 msleep(1);
6607
6608         for_each_queue(bp, i)
6609                 napi_disable(&bnx2x_fp(bp, i, napi));
6610         /* Disable interrupts after Tx and Rx are disabled on stack level */
6611         bnx2x_int_disable_sync(bp);
6612
6613         /* Release IRQs */
6614         bnx2x_free_irq(bp);
6615
6616         if (bp->flags & NO_WOL_FLAG)
6617                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6618
6619         else if (bp->wol) {
6620                 u32 emac_base = BP_PORT(bp) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6621                 u8 *mac_addr = bp->dev->dev_addr;
6622                 u32 val;
6623
6624                 /* The mac address is written to entries 1-4 to
6625                    preserve entry 0 which is used by the PMF */
6626                 val = (mac_addr[0] << 8) | mac_addr[1];
6627                 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8, val);
6628
6629                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6630                       (mac_addr[4] << 8) | mac_addr[5];
6631                 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8 + 4,
6632                         val);
6633
6634                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6635
6636         } else
6637                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6638
6639         /* Close multi and leading connections
6640            Completions for ramrods are collected in a synchronous way */
6641         for_each_nondefault_queue(bp, i)
6642                 if (bnx2x_stop_multi(bp, i))
6643                         goto unload_error;
6644
6645         if (CHIP_IS_E1H(bp))
6646                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + BP_PORT(bp)*8, 0);
6647
6648         bnx2x_stop_leading(bp);
6649 #ifdef BNX2X_STOP_ON_ERROR
6650         /* If ramrod completion timed out - break here! */
6651         if (bp->panic) {
6652                 BNX2X_ERR("Stop leading failed!\n");
6653                 return -EBUSY;
6654         }
6655 #endif
6656
6657         if ((bp->state != BNX2X_STATE_CLOSING_WAIT4_UNLOAD) ||
6658             (bp->fp[0].state != BNX2X_FP_STATE_CLOSED)) {
6659                 DP(NETIF_MSG_IFDOWN, "failed to close leading properly!  "
6660                    "state 0x%x  fp[0].state 0x%x\n",
6661                    bp->state, bp->fp[0].state);
6662         }
6663
6664 unload_error:
6665         if (!BP_NOMCP(bp))
6666                 reset_code = bnx2x_fw_command(bp, reset_code);
6667         else {
6668                 DP(NETIF_MSG_IFDOWN, "NO MCP load counts      %d, %d, %d\n",
6669                    load_count[0], load_count[1], load_count[2]);
6670                 load_count[0]--;
6671                 load_count[1 + BP_PORT(bp)]--;
6672                 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts  %d, %d, %d\n",
6673                    load_count[0], load_count[1], load_count[2]);
6674                 if (load_count[0] == 0)
6675                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6676                 else if (load_count[1 + BP_PORT(bp)] == 0)
6677                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6678                 else
6679                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6680         }
6681
6682         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6683             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6684                 bnx2x__link_reset(bp);
6685
6686         /* Reset the chip */
6687         bnx2x_reset_chip(bp, reset_code);
6688
6689         /* Report UNLOAD_DONE to MCP */
6690         if (!BP_NOMCP(bp))
6691                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6692
6693         /* Free SKBs, SGEs, TPA pool and driver internals */
6694         bnx2x_free_skbs(bp);
6695         for_each_queue(bp, i)
6696                 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6697                                         RX_SGE_CNT*NUM_RX_SGE_PAGES);
6698         bnx2x_free_mem(bp);
6699
6700         bp->state = BNX2X_STATE_CLOSED;
6701
6702         netif_carrier_off(bp->dev);
6703
6704         return 0;
6705 }
6706
6707 static void bnx2x_reset_task(struct work_struct *work)
6708 {
6709         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6710
6711 #ifdef BNX2X_STOP_ON_ERROR
6712         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6713                   " so reset not done to allow debug dump,\n"
6714          KERN_ERR " you will need to reboot when done\n");
6715         return;
6716 #endif
6717
6718         rtnl_lock();
6719
6720         if (!netif_running(bp->dev))
6721                 goto reset_task_exit;
6722
6723         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6724         bnx2x_nic_load(bp, LOAD_NORMAL);
6725
6726 reset_task_exit:
6727         rtnl_unlock();
6728 }
6729
6730 /* end of nic load/unload */
6731
6732 /* ethtool_ops */
6733
6734 /*
6735  * Init service functions
6736  */
6737
6738 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6739 {
6740         u32 val;
6741
6742         /* Check if there is any driver already loaded */
6743         val = REG_RD(bp, MISC_REG_UNPREPARED);
6744         if (val == 0x1) {
6745                 /* Check if it is the UNDI driver
6746                  * UNDI driver initializes CID offset for normal bell to 0x7
6747                  */
6748                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6749                 if (val == 0x7) {
6750                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6751                         /* save our func and fw_seq */
6752                         int func = BP_FUNC(bp);
6753                         u16 fw_seq = bp->fw_seq;
6754
6755                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
6756
6757                         /* try unload UNDI on port 0 */
6758                         bp->func = 0;
6759                         bp->fw_seq = (SHMEM_RD(bp,
6760                                              func_mb[bp->func].drv_mb_header) &
6761                                       DRV_MSG_SEQ_NUMBER_MASK);
6762
6763                         reset_code = bnx2x_fw_command(bp, reset_code);
6764                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6765
6766                         /* if UNDI is loaded on the other port */
6767                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6768
6769                                 bp->func = 1;
6770                                 bp->fw_seq = (SHMEM_RD(bp,
6771                                              func_mb[bp->func].drv_mb_header) &
6772                                               DRV_MSG_SEQ_NUMBER_MASK);
6773
6774                                 bnx2x_fw_command(bp,
6775                                              DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS);
6776                                 bnx2x_fw_command(bp,
6777                                                  DRV_MSG_CODE_UNLOAD_DONE);
6778
6779                                 /* restore our func and fw_seq */
6780                                 bp->func = func;
6781                                 bp->fw_seq = fw_seq;
6782                         }
6783
6784                         /* reset device */
6785                         REG_WR(bp,
6786                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6787                                0xd3ffff7f);
6788                         REG_WR(bp,
6789                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6790                                0x1403);
6791                 }
6792         }
6793 }
6794
6795 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6796 {
6797         u32 val, val2, val3, val4, id;
6798
6799         /* Get the chip revision id and number. */
6800         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6801         val = REG_RD(bp, MISC_REG_CHIP_NUM);
6802         id = ((val & 0xffff) << 16);
6803         val = REG_RD(bp, MISC_REG_CHIP_REV);
6804         id |= ((val & 0xf) << 12);
6805         val = REG_RD(bp, MISC_REG_CHIP_METAL);
6806         id |= ((val & 0xff) << 4);
6807         REG_RD(bp, MISC_REG_BOND_ID);
6808         id |= (val & 0xf);
6809         bp->common.chip_id = id;
6810         bp->link_params.chip_id = bp->common.chip_id;
6811         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6812
6813         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6814         bp->common.flash_size = (NVRAM_1MB_SIZE <<
6815                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
6816         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6817                        bp->common.flash_size, bp->common.flash_size);
6818
6819         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6820         bp->link_params.shmem_base = bp->common.shmem_base;
6821         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6822
6823         if (!bp->common.shmem_base ||
6824             (bp->common.shmem_base < 0xA0000) ||
6825             (bp->common.shmem_base >= 0xC0000)) {
6826                 BNX2X_DEV_INFO("MCP not active\n");
6827                 bp->flags |= NO_MCP_FLAG;
6828                 return;
6829         }
6830
6831         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6832         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6833                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6834                 BNX2X_ERR("BAD MCP validity signature\n");
6835
6836         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6837         bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
6838
6839         BNX2X_DEV_INFO("hw_config 0x%08x  board 0x%08x\n",
6840                        bp->common.hw_config, bp->common.board);
6841
6842         bp->link_params.hw_led_mode = ((bp->common.hw_config &
6843                                         SHARED_HW_CFG_LED_MODE_MASK) >>
6844                                        SHARED_HW_CFG_LED_MODE_SHIFT);
6845
6846         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6847         bp->common.bc_ver = val;
6848         BNX2X_DEV_INFO("bc_ver %X\n", val);
6849         if (val < BNX2X_BC_VER) {
6850                 /* for now only warn
6851                  * later we might need to enforce this */
6852                 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
6853                           " please upgrade BC\n", BNX2X_BC_VER, val);
6854         }
6855         BNX2X_DEV_INFO("%sWoL Capable\n",
6856                        (bp->flags & NO_WOL_FLAG)? "Not " : "");
6857
6858         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6859         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6860         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6861         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6862
6863         printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
6864                val, val2, val3, val4);
6865 }
6866
6867 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6868                                                     u32 switch_cfg)
6869 {
6870         int port = BP_PORT(bp);
6871         u32 ext_phy_type;
6872
6873         switch (switch_cfg) {
6874         case SWITCH_CFG_1G:
6875                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
6876
6877                 ext_phy_type =
6878                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6879                 switch (ext_phy_type) {
6880                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
6881                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6882                                        ext_phy_type);
6883
6884                         bp->port.supported |= (SUPPORTED_10baseT_Half |
6885                                                SUPPORTED_10baseT_Full |
6886                                                SUPPORTED_100baseT_Half |
6887                                                SUPPORTED_100baseT_Full |
6888                                                SUPPORTED_1000baseT_Full |
6889                                                SUPPORTED_2500baseX_Full |
6890                                                SUPPORTED_TP |
6891                                                SUPPORTED_FIBRE |
6892                                                SUPPORTED_Autoneg |
6893                                                SUPPORTED_Pause |
6894                                                SUPPORTED_Asym_Pause);
6895                         break;
6896
6897                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
6898                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
6899                                        ext_phy_type);
6900
6901                         bp->port.supported |= (SUPPORTED_10baseT_Half |
6902                                                SUPPORTED_10baseT_Full |
6903                                                SUPPORTED_100baseT_Half |
6904                                                SUPPORTED_100baseT_Full |
6905                                                SUPPORTED_1000baseT_Full |
6906                                                SUPPORTED_TP |
6907                                                SUPPORTED_FIBRE |
6908                                                SUPPORTED_Autoneg |
6909                                                SUPPORTED_Pause |
6910                                                SUPPORTED_Asym_Pause);
6911                         break;
6912
6913                 default:
6914                         BNX2X_ERR("NVRAM config error. "
6915                                   "BAD SerDes ext_phy_config 0x%x\n",
6916                                   bp->link_params.ext_phy_config);
6917                         return;
6918                 }
6919
6920                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
6921                                            port*0x10);
6922                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
6923                 break;
6924
6925         case SWITCH_CFG_10G:
6926                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
6927
6928                 ext_phy_type =
6929                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6930                 switch (ext_phy_type) {
6931                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
6932                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6933                                        ext_phy_type);
6934
6935                         bp->port.supported |= (SUPPORTED_10baseT_Half |
6936                                                SUPPORTED_10baseT_Full |
6937                                                SUPPORTED_100baseT_Half |
6938                                                SUPPORTED_100baseT_Full |
6939                                                SUPPORTED_1000baseT_Full |
6940                                                SUPPORTED_2500baseX_Full |
6941                                                SUPPORTED_10000baseT_Full |
6942                                                SUPPORTED_TP |
6943                                                SUPPORTED_FIBRE |
6944                                                SUPPORTED_Autoneg |
6945                                                SUPPORTED_Pause |
6946                                                SUPPORTED_Asym_Pause);
6947                         break;
6948
6949                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
6950                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
6951                                        ext_phy_type);
6952
6953                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
6954                                                SUPPORTED_FIBRE |
6955                                                SUPPORTED_Pause |
6956                                                SUPPORTED_Asym_Pause);
6957                         break;
6958
6959                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
6960                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
6961                                        ext_phy_type);
6962
6963                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
6964                                                SUPPORTED_1000baseT_Full |
6965                                                SUPPORTED_FIBRE |
6966                                                SUPPORTED_Pause |
6967                                                SUPPORTED_Asym_Pause);
6968                         break;
6969
6970                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6971                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
6972                                        ext_phy_type);
6973
6974                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
6975                                                SUPPORTED_1000baseT_Full |
6976                                                SUPPORTED_FIBRE |
6977                                                SUPPORTED_Autoneg |
6978                                                SUPPORTED_Pause |
6979                                                SUPPORTED_Asym_Pause);
6980                         break;
6981
6982                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6983                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
6984                                        ext_phy_type);
6985
6986                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
6987                                                SUPPORTED_2500baseX_Full |
6988                                                SUPPORTED_1000baseT_Full |
6989                                                SUPPORTED_FIBRE |
6990                                                SUPPORTED_Autoneg |
6991                                                SUPPORTED_Pause |
6992                                                SUPPORTED_Asym_Pause);
6993                         break;
6994
6995                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6996                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
6997                                        ext_phy_type);
6998
6999                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7000                                                SUPPORTED_TP |
7001                                                SUPPORTED_Autoneg |
7002                                                SUPPORTED_Pause |
7003                                                SUPPORTED_Asym_Pause);
7004                         break;
7005
7006                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7007                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7008                                   bp->link_params.ext_phy_config);
7009                         break;
7010
7011                 default:
7012                         BNX2X_ERR("NVRAM config error. "
7013                                   "BAD XGXS ext_phy_config 0x%x\n",
7014                                   bp->link_params.ext_phy_config);
7015                         return;
7016                 }
7017
7018                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7019                                            port*0x18);
7020                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7021
7022                 break;
7023
7024         default:
7025                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7026                           bp->port.link_config);
7027                 return;
7028         }
7029         bp->link_params.phy_addr = bp->port.phy_addr;
7030
7031         /* mask what we support according to speed_cap_mask */
7032         if (!(bp->link_params.speed_cap_mask &
7033                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7034                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7035
7036         if (!(bp->link_params.speed_cap_mask &
7037                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7038                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7039
7040         if (!(bp->link_params.speed_cap_mask &
7041                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7042                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7043
7044         if (!(bp->link_params.speed_cap_mask &
7045                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7046                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7047
7048         if (!(bp->link_params.speed_cap_mask &
7049                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7050                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7051                                         SUPPORTED_1000baseT_Full);
7052
7053         if (!(bp->link_params.speed_cap_mask &
7054                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7055                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7056
7057         if (!(bp->link_params.speed_cap_mask &
7058                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7059                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7060
7061         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7062 }
7063
7064 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7065 {
7066         bp->link_params.req_duplex = DUPLEX_FULL;
7067
7068         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7069         case PORT_FEATURE_LINK_SPEED_AUTO:
7070                 if (bp->port.supported & SUPPORTED_Autoneg) {
7071                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7072                         bp->port.advertising = bp->port.supported;
7073                 } else {
7074                         u32 ext_phy_type =
7075                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7076
7077                         if ((ext_phy_type ==
7078                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7079                             (ext_phy_type ==
7080                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7081                                 /* force 10G, no AN */
7082                                 bp->link_params.req_line_speed = SPEED_10000;
7083                                 bp->port.advertising =
7084                                                 (ADVERTISED_10000baseT_Full |
7085                                                  ADVERTISED_FIBRE);
7086                                 break;
7087                         }
7088                         BNX2X_ERR("NVRAM config error. "
7089                                   "Invalid link_config 0x%x"
7090                                   "  Autoneg not supported\n",
7091                                   bp->port.link_config);
7092                         return;
7093                 }
7094                 break;
7095
7096         case PORT_FEATURE_LINK_SPEED_10M_FULL:
7097                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7098                         bp->link_params.req_line_speed = SPEED_10;
7099                         bp->port.advertising = (ADVERTISED_10baseT_Full |
7100                                                 ADVERTISED_TP);
7101                 } else {
7102                         BNX2X_ERR("NVRAM config error. "
7103                                   "Invalid link_config 0x%x"
7104                                   "  speed_cap_mask 0x%x\n",
7105                                   bp->port.link_config,
7106                                   bp->link_params.speed_cap_mask);
7107                         return;
7108                 }
7109                 break;
7110
7111         case PORT_FEATURE_LINK_SPEED_10M_HALF:
7112                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7113                         bp->link_params.req_line_speed = SPEED_10;
7114                         bp->link_params.req_duplex = DUPLEX_HALF;
7115                         bp->port.advertising = (ADVERTISED_10baseT_Half |
7116                                                 ADVERTISED_TP);
7117                 } else {
7118                         BNX2X_ERR("NVRAM config error. "
7119                                   "Invalid link_config 0x%x"
7120                                   "  speed_cap_mask 0x%x\n",
7121                                   bp->port.link_config,
7122                                   bp->link_params.speed_cap_mask);
7123                         return;
7124                 }
7125                 break;
7126
7127         case PORT_FEATURE_LINK_SPEED_100M_FULL:
7128                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7129                         bp->link_params.req_line_speed = SPEED_100;
7130                         bp->port.advertising = (ADVERTISED_100baseT_Full |
7131                                                 ADVERTISED_TP);
7132                 } else {
7133                         BNX2X_ERR("NVRAM config error. "
7134                                   "Invalid link_config 0x%x"
7135                                   "  speed_cap_mask 0x%x\n",
7136                                   bp->port.link_config,
7137                                   bp->link_params.speed_cap_mask);
7138                         return;
7139                 }
7140                 break;
7141
7142         case PORT_FEATURE_LINK_SPEED_100M_HALF:
7143                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7144                         bp->link_params.req_line_speed = SPEED_100;
7145                         bp->link_params.req_duplex = DUPLEX_HALF;
7146                         bp->port.advertising = (ADVERTISED_100baseT_Half |
7147                                                 ADVERTISED_TP);
7148                 } else {
7149                         BNX2X_ERR("NVRAM config error. "
7150                                   "Invalid link_config 0x%x"
7151                                   "  speed_cap_mask 0x%x\n",
7152                                   bp->port.link_config,
7153                                   bp->link_params.speed_cap_mask);
7154                         return;
7155                 }
7156                 break;
7157
7158         case PORT_FEATURE_LINK_SPEED_1G:
7159                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7160                         bp->link_params.req_line_speed = SPEED_1000;
7161                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
7162                                                 ADVERTISED_TP);
7163                 } else {
7164                         BNX2X_ERR("NVRAM config error. "
7165                                   "Invalid link_config 0x%x"
7166                                   "  speed_cap_mask 0x%x\n",
7167                                   bp->port.link_config,
7168                                   bp->link_params.speed_cap_mask);
7169                         return;
7170                 }
7171                 break;
7172
7173         case PORT_FEATURE_LINK_SPEED_2_5G:
7174                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7175                         bp->link_params.req_line_speed = SPEED_2500;
7176                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
7177                                                 ADVERTISED_TP);
7178                 } else {
7179                         BNX2X_ERR("NVRAM config error. "
7180                                   "Invalid link_config 0x%x"
7181                                   "  speed_cap_mask 0x%x\n",
7182                                   bp->port.link_config,
7183                                   bp->link_params.speed_cap_mask);
7184                         return;
7185                 }
7186                 break;
7187
7188         case PORT_FEATURE_LINK_SPEED_10G_CX4:
7189         case PORT_FEATURE_LINK_SPEED_10G_KX4:
7190         case PORT_FEATURE_LINK_SPEED_10G_KR:
7191                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7192                         bp->link_params.req_line_speed = SPEED_10000;
7193                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
7194                                                 ADVERTISED_FIBRE);
7195                 } else {
7196                         BNX2X_ERR("NVRAM config error. "
7197                                   "Invalid link_config 0x%x"
7198                                   "  speed_cap_mask 0x%x\n",
7199                                   bp->port.link_config,
7200                                   bp->link_params.speed_cap_mask);
7201                         return;
7202                 }
7203                 break;
7204
7205         default:
7206                 BNX2X_ERR("NVRAM config error. "
7207                           "BAD link speed link_config 0x%x\n",
7208                           bp->port.link_config);
7209                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7210                 bp->port.advertising = bp->port.supported;
7211                 break;
7212         }
7213
7214         bp->link_params.req_flow_ctrl = (bp->port.link_config &
7215                                          PORT_FEATURE_FLOW_CONTROL_MASK);
7216         if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
7217             !(bp->port.supported & SUPPORTED_Autoneg))
7218                 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
7219
7220         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
7221                        "  advertising 0x%x\n",
7222                        bp->link_params.req_line_speed,
7223                        bp->link_params.req_duplex,
7224                        bp->link_params.req_flow_ctrl, bp->port.advertising);
7225 }
7226
7227 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7228 {
7229         int port = BP_PORT(bp);
7230         u32 val, val2;
7231
7232         bp->link_params.bp = bp;
7233         bp->link_params.port = port;
7234
7235         bp->link_params.serdes_config =
7236                 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7237         bp->link_params.lane_config =
7238                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7239         bp->link_params.ext_phy_config =
7240                 SHMEM_RD(bp,
7241                          dev_info.port_hw_config[port].external_phy_config);
7242         bp->link_params.speed_cap_mask =
7243                 SHMEM_RD(bp,
7244                          dev_info.port_hw_config[port].speed_capability_mask);
7245
7246         bp->port.link_config =
7247                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7248
7249         BNX2X_DEV_INFO("serdes_config 0x%08x  lane_config 0x%08x\n"
7250              KERN_INFO "  ext_phy_config 0x%08x  speed_cap_mask 0x%08x"
7251                        "  link_config 0x%08x\n",
7252                        bp->link_params.serdes_config,
7253                        bp->link_params.lane_config,
7254                        bp->link_params.ext_phy_config,
7255                        bp->link_params.speed_cap_mask, bp->port.link_config);
7256
7257         bp->link_params.switch_cfg = (bp->port.link_config &
7258                                       PORT_FEATURE_CONNECTED_SWITCH_MASK);
7259         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7260
7261         bnx2x_link_settings_requested(bp);
7262
7263         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7264         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7265         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7266         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7267         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7268         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7269         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
7270         bp->dev->dev_addr[5] = (u8)(val & 0xff);
7271         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7272         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7273 }
7274
7275 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7276 {
7277         int func = BP_FUNC(bp);
7278         u32 val, val2;
7279         int rc = 0;
7280
7281         bnx2x_get_common_hwinfo(bp);
7282
7283         bp->e1hov = 0;
7284         bp->e1hmf = 0;
7285         if (CHIP_IS_E1H(bp)) {
7286                 bp->mf_config =
7287                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7288
7289                 val =
7290                    (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7291                     FUNC_MF_CFG_E1HOV_TAG_MASK);
7292                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7293
7294                         bp->e1hov = val;
7295                         bp->e1hmf = 1;
7296                         BNX2X_DEV_INFO("MF mode  E1HOV for func %d is %d "
7297                                        "(0x%04x)\n",
7298                                        func, bp->e1hov, bp->e1hov);
7299                 } else {
7300                         BNX2X_DEV_INFO("Single function mode\n");
7301                         if (BP_E1HVN(bp)) {
7302                                 BNX2X_ERR("!!!  No valid E1HOV for func %d,"
7303                                           "  aborting\n", func);
7304                                 rc = -EPERM;
7305                         }
7306                 }
7307         }
7308
7309         if (!BP_NOMCP(bp)) {
7310                 bnx2x_get_port_hwinfo(bp);
7311
7312                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7313                               DRV_MSG_SEQ_NUMBER_MASK);
7314                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7315         }
7316
7317         if (IS_E1HMF(bp)) {
7318                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7319                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
7320                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7321                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7322                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7323                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7324                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7325                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7326                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
7327                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
7328                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7329                                ETH_ALEN);
7330                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7331                                ETH_ALEN);
7332                 }
7333
7334                 return rc;
7335         }
7336
7337         if (BP_NOMCP(bp)) {
7338                 /* only supposed to happen on emulation/FPGA */
7339                 BNX2X_ERR("warning rendom MAC workaround active\n");
7340                 random_ether_addr(bp->dev->dev_addr);
7341                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7342         }
7343
7344         return rc;
7345 }
7346
7347 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7348 {
7349         int func = BP_FUNC(bp);
7350         int rc;
7351
7352         mutex_init(&bp->port.phy_mutex);
7353
7354         INIT_WORK(&bp->sp_task, bnx2x_sp_task);
7355         INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7356
7357         rc = bnx2x_get_hwinfo(bp);
7358
7359         /* need to reset chip if undi was active */
7360         if (!BP_NOMCP(bp))
7361                 bnx2x_undi_unload(bp);
7362
7363         if (CHIP_REV_IS_FPGA(bp))
7364                 printk(KERN_ERR PFX "FPGA detected\n");
7365
7366         if (BP_NOMCP(bp) && (func == 0))
7367                 printk(KERN_ERR PFX
7368                        "MCP disabled, must load devices in order!\n");
7369
7370         /* Set TPA flags */
7371         if (disable_tpa) {
7372                 bp->flags &= ~TPA_ENABLE_FLAG;
7373                 bp->dev->features &= ~NETIF_F_LRO;
7374         } else {
7375                 bp->flags |= TPA_ENABLE_FLAG;
7376                 bp->dev->features |= NETIF_F_LRO;
7377         }
7378
7379
7380         bp->tx_ring_size = MAX_TX_AVAIL;
7381         bp->rx_ring_size = MAX_RX_AVAIL;
7382
7383         bp->rx_csum = 1;
7384         bp->rx_offset = 0;
7385
7386         bp->tx_ticks = 50;
7387         bp->rx_ticks = 25;
7388
7389         bp->stats_ticks = 1000000 & 0xffff00;
7390
7391         bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7392         bp->current_interval = (poll ? poll : bp->timer_interval);
7393
7394         init_timer(&bp->timer);
7395         bp->timer.expires = jiffies + bp->current_interval;
7396         bp->timer.data = (unsigned long) bp;
7397         bp->timer.function = bnx2x_timer;
7398
7399         return rc;
7400 }
7401
7402 /*
7403  * ethtool service functions
7404  */
7405
7406 /* All ethtool functions called with rtnl_lock */
7407
7408 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7409 {
7410         struct bnx2x *bp = netdev_priv(dev);
7411
7412         cmd->supported = bp->port.supported;
7413         cmd->advertising = bp->port.advertising;
7414
7415         if (netif_carrier_ok(dev)) {
7416                 cmd->speed = bp->link_vars.line_speed;
7417                 cmd->duplex = bp->link_vars.duplex;
7418         } else {
7419                 cmd->speed = bp->link_params.req_line_speed;
7420                 cmd->duplex = bp->link_params.req_duplex;
7421         }
7422         if (IS_E1HMF(bp)) {
7423                 u16 vn_max_rate;
7424
7425                 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7426                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7427                 if (vn_max_rate < cmd->speed)
7428                         cmd->speed = vn_max_rate;
7429         }
7430
7431         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7432                 u32 ext_phy_type =
7433                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7434
7435                 switch (ext_phy_type) {
7436                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7437                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7438                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7439                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7440                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7441                         cmd->port = PORT_FIBRE;
7442                         break;
7443
7444                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7445                         cmd->port = PORT_TP;
7446                         break;
7447
7448                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7449                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7450                                   bp->link_params.ext_phy_config);
7451                         break;
7452
7453                 default:
7454                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7455                            bp->link_params.ext_phy_config);
7456                         break;
7457                 }
7458         } else
7459                 cmd->port = PORT_TP;
7460
7461         cmd->phy_address = bp->port.phy_addr;
7462         cmd->transceiver = XCVR_INTERNAL;
7463
7464         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
7465                 cmd->autoneg = AUTONEG_ENABLE;
7466         else
7467                 cmd->autoneg = AUTONEG_DISABLE;
7468
7469         cmd->maxtxpkt = 0;
7470         cmd->maxrxpkt = 0;
7471
7472         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7473            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
7474            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
7475            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
7476            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7477            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7478            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7479
7480         return 0;
7481 }
7482
7483 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7484 {
7485         struct bnx2x *bp = netdev_priv(dev);
7486         u32 advertising;
7487
7488         if (IS_E1HMF(bp))
7489                 return 0;
7490
7491         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7492            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
7493            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
7494            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
7495            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7496            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7497            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7498
7499         if (cmd->autoneg == AUTONEG_ENABLE) {
7500                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7501                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
7502                         return -EINVAL;
7503                 }
7504
7505                 /* advertise the requested speed and duplex if supported */
7506                 cmd->advertising &= bp->port.supported;
7507
7508                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7509                 bp->link_params.req_duplex = DUPLEX_FULL;
7510                 bp->port.advertising |= (ADVERTISED_Autoneg |
7511                                          cmd->advertising);
7512
7513         } else { /* forced speed */
7514                 /* advertise the requested speed and duplex if supported */
7515                 switch (cmd->speed) {
7516                 case SPEED_10:
7517                         if (cmd->duplex == DUPLEX_FULL) {
7518                                 if (!(bp->port.supported &
7519                                       SUPPORTED_10baseT_Full)) {
7520                                         DP(NETIF_MSG_LINK,
7521                                            "10M full not supported\n");
7522                                         return -EINVAL;
7523                                 }
7524
7525                                 advertising = (ADVERTISED_10baseT_Full |
7526                                                ADVERTISED_TP);
7527                         } else {
7528                                 if (!(bp->port.supported &
7529                                       SUPPORTED_10baseT_Half)) {
7530                                         DP(NETIF_MSG_LINK,
7531                                            "10M half not supported\n");
7532                                         return -EINVAL;
7533                                 }
7534
7535                                 advertising = (ADVERTISED_10baseT_Half |
7536                                                ADVERTISED_TP);
7537                         }
7538                         break;
7539
7540                 case SPEED_100:
7541                         if (cmd->duplex == DUPLEX_FULL) {
7542                                 if (!(bp->port.supported &
7543                                                 SUPPORTED_100baseT_Full)) {
7544                                         DP(NETIF_MSG_LINK,
7545                                            "100M full not supported\n");
7546                                         return -EINVAL;
7547                                 }
7548
7549                                 advertising = (ADVERTISED_100baseT_Full |
7550                                                ADVERTISED_TP);
7551                         } else {
7552                                 if (!(bp->port.supported &
7553                                                 SUPPORTED_100baseT_Half)) {
7554                                         DP(NETIF_MSG_LINK,
7555                                            "100M half not supported\n");
7556                                         return -EINVAL;
7557                                 }
7558
7559                                 advertising = (ADVERTISED_100baseT_Half |
7560                                                ADVERTISED_TP);
7561                         }
7562                         break;
7563
7564                 case SPEED_1000:
7565                         if (cmd->duplex != DUPLEX_FULL) {
7566                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
7567                                 return -EINVAL;
7568                         }
7569
7570                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
7571                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
7572                                 return -EINVAL;
7573                         }
7574
7575                         advertising = (ADVERTISED_1000baseT_Full |
7576                                        ADVERTISED_TP);
7577                         break;
7578
7579                 case SPEED_2500:
7580                         if (cmd->duplex != DUPLEX_FULL) {
7581                                 DP(NETIF_MSG_LINK,
7582                                    "2.5G half not supported\n");
7583                                 return -EINVAL;
7584                         }
7585
7586                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
7587                                 DP(NETIF_MSG_LINK,
7588                                    "2.5G full not supported\n");
7589                                 return -EINVAL;
7590                         }
7591
7592                         advertising = (ADVERTISED_2500baseX_Full |
7593                                        ADVERTISED_TP);
7594                         break;
7595
7596                 case SPEED_10000:
7597                         if (cmd->duplex != DUPLEX_FULL) {
7598                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
7599                                 return -EINVAL;
7600                         }
7601
7602                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
7603                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
7604                                 return -EINVAL;
7605                         }
7606
7607                         advertising = (ADVERTISED_10000baseT_Full |
7608                                        ADVERTISED_FIBRE);
7609                         break;
7610
7611                 default:
7612                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
7613                         return -EINVAL;
7614                 }
7615
7616                 bp->link_params.req_line_speed = cmd->speed;
7617                 bp->link_params.req_duplex = cmd->duplex;
7618                 bp->port.advertising = advertising;
7619         }
7620
7621         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
7622            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
7623            bp->link_params.req_line_speed, bp->link_params.req_duplex,
7624            bp->port.advertising);
7625
7626         if (netif_running(dev)) {
7627                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7628                 bnx2x_link_set(bp);
7629         }
7630
7631         return 0;
7632 }
7633
7634 #define PHY_FW_VER_LEN                  10
7635
7636 static void bnx2x_get_drvinfo(struct net_device *dev,
7637                               struct ethtool_drvinfo *info)
7638 {
7639         struct bnx2x *bp = netdev_priv(dev);
7640         char phy_fw_ver[PHY_FW_VER_LEN];
7641
7642         strcpy(info->driver, DRV_MODULE_NAME);
7643         strcpy(info->version, DRV_MODULE_VERSION);
7644
7645         phy_fw_ver[0] = '\0';
7646         if (bp->port.pmf) {
7647                 bnx2x_phy_hw_lock(bp);
7648                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7649                                              (bp->state != BNX2X_STATE_CLOSED),
7650                                              phy_fw_ver, PHY_FW_VER_LEN);
7651                 bnx2x_phy_hw_unlock(bp);
7652         }
7653
7654         snprintf(info->fw_version, 32, "%d.%d.%d:%d BC:%x%s%s",
7655                  BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
7656                  BCM_5710_FW_REVISION_VERSION,
7657                  BCM_5710_FW_COMPILE_FLAGS, bp->common.bc_ver,
7658                  ((phy_fw_ver[0] != '\0')? " PHY:":""), phy_fw_ver);
7659         strcpy(info->bus_info, pci_name(bp->pdev));
7660         info->n_stats = BNX2X_NUM_STATS;
7661         info->testinfo_len = BNX2X_NUM_TESTS;
7662         info->eedump_len = bp->common.flash_size;
7663         info->regdump_len = 0;
7664 }
7665
7666 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7667 {
7668         struct bnx2x *bp = netdev_priv(dev);
7669
7670         if (bp->flags & NO_WOL_FLAG) {
7671                 wol->supported = 0;
7672                 wol->wolopts = 0;
7673         } else {
7674                 wol->supported = WAKE_MAGIC;
7675                 if (bp->wol)
7676                         wol->wolopts = WAKE_MAGIC;
7677                 else
7678                         wol->wolopts = 0;
7679         }
7680         memset(&wol->sopass, 0, sizeof(wol->sopass));
7681 }
7682
7683 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7684 {
7685         struct bnx2x *bp = netdev_priv(dev);
7686
7687         if (wol->wolopts & ~WAKE_MAGIC)
7688                 return -EINVAL;
7689
7690         if (wol->wolopts & WAKE_MAGIC) {
7691                 if (bp->flags & NO_WOL_FLAG)
7692                         return -EINVAL;
7693
7694                 bp->wol = 1;
7695         } else
7696                 bp->wol = 0;
7697
7698         return 0;
7699 }
7700
7701 static u32 bnx2x_get_msglevel(struct net_device *dev)
7702 {
7703         struct bnx2x *bp = netdev_priv(dev);
7704
7705         return bp->msglevel;
7706 }
7707
7708 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7709 {
7710         struct bnx2x *bp = netdev_priv(dev);
7711
7712         if (capable(CAP_NET_ADMIN))
7713                 bp->msglevel = level;
7714 }
7715
7716 static int bnx2x_nway_reset(struct net_device *dev)
7717 {
7718         struct bnx2x *bp = netdev_priv(dev);
7719
7720         if (!bp->port.pmf)
7721                 return 0;
7722
7723         if (netif_running(dev)) {
7724                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7725                 bnx2x_link_set(bp);
7726         }
7727
7728         return 0;
7729 }
7730
7731 static int bnx2x_get_eeprom_len(struct net_device *dev)
7732 {
7733         struct bnx2x *bp = netdev_priv(dev);
7734
7735         return bp->common.flash_size;
7736 }
7737
7738 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7739 {
7740         int port = BP_PORT(bp);
7741         int count, i;
7742         u32 val = 0;
7743
7744         /* adjust timeout for emulation/FPGA */
7745         count = NVRAM_TIMEOUT_COUNT;
7746         if (CHIP_REV_IS_SLOW(bp))
7747                 count *= 100;
7748
7749         /* request access to nvram interface */
7750         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7751                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7752
7753         for (i = 0; i < count*10; i++) {
7754                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7755                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7756                         break;
7757
7758                 udelay(5);
7759         }
7760
7761         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
7762                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
7763                 return -EBUSY;
7764         }
7765
7766         return 0;
7767 }
7768
7769 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7770 {
7771         int port = BP_PORT(bp);
7772         int count, i;
7773         u32 val = 0;
7774
7775         /* adjust timeout for emulation/FPGA */
7776         count = NVRAM_TIMEOUT_COUNT;
7777         if (CHIP_REV_IS_SLOW(bp))
7778                 count *= 100;
7779
7780         /* relinquish nvram interface */
7781         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7782                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7783
7784         for (i = 0; i < count*10; i++) {
7785                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7786                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7787                         break;
7788
7789                 udelay(5);
7790         }
7791
7792         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
7793                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
7794                 return -EBUSY;
7795         }
7796
7797         return 0;
7798 }
7799
7800 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7801 {
7802         u32 val;
7803
7804         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7805
7806         /* enable both bits, even on read */
7807         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7808                (val | MCPR_NVM_ACCESS_ENABLE_EN |
7809                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
7810 }
7811
7812 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7813 {
7814         u32 val;
7815
7816         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7817
7818         /* disable both bits, even after read */
7819         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7820                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7821                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7822 }
7823
7824 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7825                                   u32 cmd_flags)
7826 {
7827         int count, i, rc;
7828         u32 val;
7829
7830         /* build the command word */
7831         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
7832
7833         /* need to clear DONE bit separately */
7834         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7835
7836         /* address of the NVRAM to read from */
7837         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7838                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7839
7840         /* issue a read command */
7841         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7842
7843         /* adjust timeout for emulation/FPGA */
7844         count = NVRAM_TIMEOUT_COUNT;
7845         if (CHIP_REV_IS_SLOW(bp))
7846                 count *= 100;
7847
7848         /* wait for completion */
7849         *ret_val = 0;
7850         rc = -EBUSY;
7851         for (i = 0; i < count; i++) {
7852                 udelay(5);
7853                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
7854
7855                 if (val & MCPR_NVM_COMMAND_DONE) {
7856                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
7857                         /* we read nvram data in cpu order
7858                          * but ethtool sees it as an array of bytes
7859                          * converting to big-endian will do the work */
7860                         val = cpu_to_be32(val);
7861                         *ret_val = val;
7862                         rc = 0;
7863                         break;
7864                 }
7865         }
7866
7867         return rc;
7868 }
7869
7870 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
7871                             int buf_size)
7872 {
7873         int rc;
7874         u32 cmd_flags;
7875         u32 val;
7876
7877         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
7878                 DP(BNX2X_MSG_NVM,
7879                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
7880                    offset, buf_size);
7881                 return -EINVAL;
7882         }
7883
7884         if (offset + buf_size > bp->common.flash_size) {
7885                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
7886                                   " buf_size (0x%x) > flash_size (0x%x)\n",
7887                    offset, buf_size, bp->common.flash_size);
7888                 return -EINVAL;
7889         }
7890
7891         /* request access to nvram interface */
7892         rc = bnx2x_acquire_nvram_lock(bp);
7893         if (rc)
7894                 return rc;
7895
7896         /* enable access to nvram interface */
7897         bnx2x_enable_nvram_access(bp);
7898
7899         /* read the first word(s) */
7900         cmd_flags = MCPR_NVM_COMMAND_FIRST;
7901         while ((buf_size > sizeof(u32)) && (rc == 0)) {
7902                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
7903                 memcpy(ret_buf, &val, 4);
7904
7905                 /* advance to the next dword */
7906                 offset += sizeof(u32);
7907                 ret_buf += sizeof(u32);
7908                 buf_size -= sizeof(u32);
7909                 cmd_flags = 0;
7910         }
7911
7912         if (rc == 0) {
7913                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
7914                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
7915                 memcpy(ret_buf, &val, 4);
7916         }
7917
7918         /* disable access to nvram interface */
7919         bnx2x_disable_nvram_access(bp);
7920         bnx2x_release_nvram_lock(bp);
7921
7922         return rc;
7923 }
7924
7925 static int bnx2x_get_eeprom(struct net_device *dev,
7926                             struct ethtool_eeprom *eeprom, u8 *eebuf)
7927 {
7928         struct bnx2x *bp = netdev_priv(dev);
7929         int rc;
7930
7931         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
7932            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
7933            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
7934            eeprom->len, eeprom->len);
7935
7936         /* parameters already validated in ethtool_get_eeprom */
7937
7938         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7939
7940         return rc;
7941 }
7942
7943 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
7944                                    u32 cmd_flags)
7945 {
7946         int count, i, rc;
7947
7948         /* build the command word */
7949         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
7950
7951         /* need to clear DONE bit separately */
7952         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7953
7954         /* write the data */
7955         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
7956
7957         /* address of the NVRAM to write to */
7958         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7959                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7960
7961         /* issue the write command */
7962         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7963
7964         /* adjust timeout for emulation/FPGA */
7965         count = NVRAM_TIMEOUT_COUNT;
7966         if (CHIP_REV_IS_SLOW(bp))
7967                 count *= 100;
7968
7969         /* wait for completion */
7970         rc = -EBUSY;
7971         for (i = 0; i < count; i++) {
7972                 udelay(5);
7973                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
7974                 if (val & MCPR_NVM_COMMAND_DONE) {
7975                         rc = 0;
7976                         break;
7977                 }
7978         }
7979
7980         return rc;
7981 }
7982
7983 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
7984
7985 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
7986                               int buf_size)
7987 {
7988         int rc;
7989         u32 cmd_flags;
7990         u32 align_offset;
7991         u32 val;
7992
7993         if (offset + buf_size > bp->common.flash_size) {
7994                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
7995                                   " buf_size (0x%x) > flash_size (0x%x)\n",
7996                    offset, buf_size, bp->common.flash_size);
7997                 return -EINVAL;
7998         }
7999
8000         /* request access to nvram interface */
8001         rc = bnx2x_acquire_nvram_lock(bp);
8002         if (rc)
8003                 return rc;
8004
8005         /* enable access to nvram interface */
8006         bnx2x_enable_nvram_access(bp);
8007
8008         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8009         align_offset = (offset & ~0x03);
8010         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8011
8012         if (rc == 0) {
8013                 val &= ~(0xff << BYTE_OFFSET(offset));
8014                 val |= (*data_buf << BYTE_OFFSET(offset));
8015
8016                 /* nvram data is returned as an array of bytes
8017                  * convert it back to cpu order */
8018                 val = be32_to_cpu(val);
8019
8020                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8021                                              cmd_flags);
8022         }
8023
8024         /* disable access to nvram interface */
8025         bnx2x_disable_nvram_access(bp);
8026         bnx2x_release_nvram_lock(bp);
8027
8028         return rc;
8029 }
8030
8031 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8032                              int buf_size)
8033 {
8034         int rc;
8035         u32 cmd_flags;
8036         u32 val;
8037         u32 written_so_far;
8038
8039         if (buf_size == 1)      /* ethtool */
8040                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8041
8042         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8043                 DP(BNX2X_MSG_NVM,
8044                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8045                    offset, buf_size);
8046                 return -EINVAL;
8047         }
8048
8049         if (offset + buf_size > bp->common.flash_size) {
8050                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8051                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8052                    offset, buf_size, bp->common.flash_size);
8053                 return -EINVAL;
8054         }
8055
8056         /* request access to nvram interface */
8057         rc = bnx2x_acquire_nvram_lock(bp);
8058         if (rc)
8059                 return rc;
8060
8061         /* enable access to nvram interface */
8062         bnx2x_enable_nvram_access(bp);
8063
8064         written_so_far = 0;
8065         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8066         while ((written_so_far < buf_size) && (rc == 0)) {
8067                 if (written_so_far == (buf_size - sizeof(u32)))
8068                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8069                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8070                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8071                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8072                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8073
8074                 memcpy(&val, data_buf, 4);
8075
8076                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8077
8078                 /* advance to the next dword */
8079                 offset += sizeof(u32);
8080                 data_buf += sizeof(u32);
8081                 written_so_far += sizeof(u32);
8082                 cmd_flags = 0;
8083         }
8084
8085         /* disable access to nvram interface */
8086         bnx2x_disable_nvram_access(bp);
8087         bnx2x_release_nvram_lock(bp);
8088
8089         return rc;
8090 }
8091
8092 static int bnx2x_set_eeprom(struct net_device *dev,
8093                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8094 {
8095         struct bnx2x *bp = netdev_priv(dev);
8096         int rc;
8097
8098         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8099            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8100            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8101            eeprom->len, eeprom->len);
8102
8103         /* parameters already validated in ethtool_set_eeprom */
8104
8105         /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8106         if (eeprom->magic == 0x00504859)
8107                 if (bp->port.pmf) {
8108
8109                         bnx2x_phy_hw_lock(bp);
8110                         rc = bnx2x_flash_download(bp, BP_PORT(bp),
8111                                              bp->link_params.ext_phy_config,
8112                                              (bp->state != BNX2X_STATE_CLOSED),
8113                                              eebuf, eeprom->len);
8114                         if ((bp->state == BNX2X_STATE_OPEN) ||
8115                             (bp->state == BNX2X_STATE_DISABLED)) {
8116                                 rc |= bnx2x_link_reset(&bp->link_params,
8117                                                        &bp->link_vars);
8118                                 rc |= bnx2x_phy_init(&bp->link_params,
8119                                                      &bp->link_vars);
8120                         }
8121                         bnx2x_phy_hw_unlock(bp);
8122
8123                 } else /* Only the PMF can access the PHY */
8124                         return -EINVAL;
8125         else
8126                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8127
8128         return rc;
8129 }
8130
8131 static int bnx2x_get_coalesce(struct net_device *dev,
8132                               struct ethtool_coalesce *coal)
8133 {
8134         struct bnx2x *bp = netdev_priv(dev);
8135
8136         memset(coal, 0, sizeof(struct ethtool_coalesce));
8137
8138         coal->rx_coalesce_usecs = bp->rx_ticks;
8139         coal->tx_coalesce_usecs = bp->tx_ticks;
8140         coal->stats_block_coalesce_usecs = bp->stats_ticks;
8141
8142         return 0;
8143 }
8144
8145 static int bnx2x_set_coalesce(struct net_device *dev,
8146                               struct ethtool_coalesce *coal)
8147 {
8148         struct bnx2x *bp = netdev_priv(dev);
8149
8150         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8151         if (bp->rx_ticks > 3000)
8152                 bp->rx_ticks = 3000;
8153
8154         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8155         if (bp->tx_ticks > 0x3000)
8156                 bp->tx_ticks = 0x3000;
8157
8158         bp->stats_ticks = coal->stats_block_coalesce_usecs;
8159         if (bp->stats_ticks > 0xffff00)
8160                 bp->stats_ticks = 0xffff00;
8161         bp->stats_ticks &= 0xffff00;
8162
8163         if (netif_running(dev))
8164                 bnx2x_update_coalesce(bp);
8165
8166         return 0;
8167 }
8168
8169 static int bnx2x_set_flags(struct net_device *dev, u32 data)
8170 {
8171         struct bnx2x *bp = netdev_priv(dev);
8172         int changed = 0;
8173         int rc = 0;
8174
8175         if (data & ETH_FLAG_LRO) {
8176                 if (!(dev->features & NETIF_F_LRO)) {
8177                         dev->features |= NETIF_F_LRO;
8178                         bp->flags |= TPA_ENABLE_FLAG;
8179                         changed = 1;
8180                 }
8181
8182         } else if (dev->features & NETIF_F_LRO) {
8183                 dev->features &= ~NETIF_F_LRO;
8184                 bp->flags &= ~TPA_ENABLE_FLAG;
8185                 changed = 1;
8186         }
8187
8188         if (changed && netif_running(dev)) {
8189                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8190                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8191         }
8192
8193         return rc;
8194 }
8195
8196 static void bnx2x_get_ringparam(struct net_device *dev,
8197                                 struct ethtool_ringparam *ering)
8198 {
8199         struct bnx2x *bp = netdev_priv(dev);
8200
8201         ering->rx_max_pending = MAX_RX_AVAIL;
8202         ering->rx_mini_max_pending = 0;
8203         ering->rx_jumbo_max_pending = 0;
8204
8205         ering->rx_pending = bp->rx_ring_size;
8206         ering->rx_mini_pending = 0;
8207         ering->rx_jumbo_pending = 0;
8208
8209         ering->tx_max_pending = MAX_TX_AVAIL;
8210         ering->tx_pending = bp->tx_ring_size;
8211 }
8212
8213 static int bnx2x_set_ringparam(struct net_device *dev,
8214                                struct ethtool_ringparam *ering)
8215 {
8216         struct bnx2x *bp = netdev_priv(dev);
8217         int rc = 0;
8218
8219         if ((ering->rx_pending > MAX_RX_AVAIL) ||
8220             (ering->tx_pending > MAX_TX_AVAIL) ||
8221             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8222                 return -EINVAL;
8223
8224         bp->rx_ring_size = ering->rx_pending;
8225         bp->tx_ring_size = ering->tx_pending;
8226
8227         if (netif_running(dev)) {
8228                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8229                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8230         }
8231
8232         return rc;
8233 }
8234
8235 static void bnx2x_get_pauseparam(struct net_device *dev,
8236                                  struct ethtool_pauseparam *epause)
8237 {
8238         struct bnx2x *bp = netdev_priv(dev);
8239
8240         epause->autoneg = (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
8241                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8242
8243         epause->rx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_RX) ==
8244                             FLOW_CTRL_RX);
8245         epause->tx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_TX) ==
8246                             FLOW_CTRL_TX);
8247
8248         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8249            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
8250            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8251 }
8252
8253 static int bnx2x_set_pauseparam(struct net_device *dev,
8254                                 struct ethtool_pauseparam *epause)
8255 {
8256         struct bnx2x *bp = netdev_priv(dev);
8257
8258         if (IS_E1HMF(bp))
8259                 return 0;
8260
8261         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8262            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
8263            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8264
8265         bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8266
8267         if (epause->rx_pause)
8268                 bp->link_params.req_flow_ctrl |= FLOW_CTRL_RX;
8269
8270         if (epause->tx_pause)
8271                 bp->link_params.req_flow_ctrl |= FLOW_CTRL_TX;
8272
8273         if (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO)
8274                 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
8275
8276         if (epause->autoneg) {
8277                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8278                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8279                         return -EINVAL;
8280                 }
8281
8282                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8283                         bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8284         }
8285
8286         DP(NETIF_MSG_LINK,
8287            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
8288
8289         if (netif_running(dev)) {
8290                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8291                 bnx2x_link_set(bp);
8292         }
8293
8294         return 0;
8295 }
8296
8297 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8298 {
8299         struct bnx2x *bp = netdev_priv(dev);
8300
8301         return bp->rx_csum;
8302 }
8303
8304 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8305 {
8306         struct bnx2x *bp = netdev_priv(dev);
8307
8308         bp->rx_csum = data;
8309         return 0;
8310 }
8311
8312 static int bnx2x_set_tso(struct net_device *dev, u32 data)
8313 {
8314         if (data) {
8315                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8316                 dev->features |= NETIF_F_TSO6;
8317         } else {
8318                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8319                 dev->features &= ~NETIF_F_TSO6;
8320         }
8321
8322         return 0;
8323 }
8324
8325 static const struct {
8326         char string[ETH_GSTRING_LEN];
8327 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8328         { "register_test (offline)" },
8329         { "memory_test (offline)" },
8330         { "loopback_test (offline)" },
8331         { "nvram_test (online)" },
8332         { "interrupt_test (online)" },
8333         { "link_test (online)" },
8334         { "idle check (online)" },
8335         { "MC errors (online)" }
8336 };
8337
8338 static int bnx2x_self_test_count(struct net_device *dev)
8339 {
8340         return BNX2X_NUM_TESTS;
8341 }
8342
8343 static int bnx2x_test_registers(struct bnx2x *bp)
8344 {
8345         int idx, i, rc = -ENODEV;
8346         u32 wr_val = 0;
8347         static const struct {
8348                 u32  offset0;
8349                 u32  offset1;
8350                 u32  mask;
8351         } reg_tbl[] = {
8352 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
8353                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
8354                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
8355                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
8356                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
8357                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
8358                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
8359                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
8360                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
8361                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
8362 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
8363                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
8364                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
8365                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
8366                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
8367                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8368                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
8369                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
8370                 { NIG_REG_EGRESS_MNG0_FIFO,           20, 0xffffffff },
8371                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
8372 /* 20 */        { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
8373                 { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
8374                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
8375                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
8376                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
8377                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
8378                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
8379                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
8380                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
8381                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
8382 /* 30 */        { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
8383                 { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
8384                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
8385                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
8386                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8387                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
8388                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8389                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
8390
8391                 { 0xffffffff, 0, 0x00000000 }
8392         };
8393
8394         if (!netif_running(bp->dev))
8395                 return rc;
8396
8397         /* Repeat the test twice:
8398            First by writing 0x00000000, second by writing 0xffffffff */
8399         for (idx = 0; idx < 2; idx++) {
8400
8401                 switch (idx) {
8402                 case 0:
8403                         wr_val = 0;
8404                         break;
8405                 case 1:
8406                         wr_val = 0xffffffff;
8407                         break;
8408                 }
8409
8410                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8411                         u32 offset, mask, save_val, val;
8412                         int port = BP_PORT(bp);
8413
8414                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8415                         mask = reg_tbl[i].mask;
8416
8417                         save_val = REG_RD(bp, offset);
8418
8419                         REG_WR(bp, offset, wr_val);
8420                         val = REG_RD(bp, offset);
8421
8422                         /* Restore the original register's value */
8423                         REG_WR(bp, offset, save_val);
8424
8425                         /* verify that value is as expected value */
8426                         if ((val & mask) != (wr_val & mask))
8427                                 goto test_reg_exit;
8428                 }
8429         }
8430
8431         rc = 0;
8432
8433 test_reg_exit:
8434         return rc;
8435 }
8436
8437 static int bnx2x_test_memory(struct bnx2x *bp)
8438 {
8439         int i, j, rc = -ENODEV;
8440         u32 val;
8441         static const struct {
8442                 u32 offset;
8443                 int size;
8444         } mem_tbl[] = {
8445                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
8446                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8447                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
8448                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
8449                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
8450                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
8451                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
8452
8453                 { 0xffffffff, 0 }
8454         };
8455         static const struct {
8456                 char *name;
8457                 u32 offset;
8458                 u32 mask;
8459         } prty_tbl[] = {
8460                 { "CCM_REG_CCM_PRTY_STS",     CCM_REG_CCM_PRTY_STS,     0 },
8461                 { "CFC_REG_CFC_PRTY_STS",     CFC_REG_CFC_PRTY_STS,     0 },
8462                 { "DMAE_REG_DMAE_PRTY_STS",   DMAE_REG_DMAE_PRTY_STS,   0 },
8463                 { "TCM_REG_TCM_PRTY_STS",     TCM_REG_TCM_PRTY_STS,     0 },
8464                 { "UCM_REG_UCM_PRTY_STS",     UCM_REG_UCM_PRTY_STS,     0 },
8465                 { "XCM_REG_XCM_PRTY_STS",     XCM_REG_XCM_PRTY_STS,     0x1 },
8466
8467                 { NULL, 0xffffffff, 0 }
8468         };
8469
8470         if (!netif_running(bp->dev))
8471                 return rc;
8472
8473         /* Go through all the memories */
8474         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8475                 for (j = 0; j < mem_tbl[i].size; j++)
8476                         REG_RD(bp, mem_tbl[i].offset + j*4);
8477
8478         /* Check the parity status */
8479         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8480                 val = REG_RD(bp, prty_tbl[i].offset);
8481                 if (val & ~(prty_tbl[i].mask)) {
8482                         DP(NETIF_MSG_HW,
8483                            "%s is 0x%x\n", prty_tbl[i].name, val);
8484                         goto test_mem_exit;
8485                 }
8486         }
8487
8488         rc = 0;
8489
8490 test_mem_exit:
8491         return rc;
8492 }
8493
8494 static void bnx2x_netif_start(struct bnx2x *bp)
8495 {
8496         int i;
8497
8498         if (atomic_dec_and_test(&bp->intr_sem)) {
8499                 if (netif_running(bp->dev)) {
8500                         bnx2x_int_enable(bp);
8501                         for_each_queue(bp, i)
8502                                 napi_enable(&bnx2x_fp(bp, i, napi));
8503                         if (bp->state == BNX2X_STATE_OPEN)
8504                                 netif_wake_queue(bp->dev);
8505                 }
8506         }
8507 }
8508
8509 static void bnx2x_netif_stop(struct bnx2x *bp)
8510 {
8511         int i;
8512
8513         if (netif_running(bp->dev)) {
8514                 netif_tx_disable(bp->dev);
8515                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
8516                 for_each_queue(bp, i)
8517                         napi_disable(&bnx2x_fp(bp, i, napi));
8518         }
8519         bnx2x_int_disable_sync(bp);
8520 }
8521
8522 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8523 {
8524         int cnt = 1000;
8525
8526         if (link_up)
8527                 while (bnx2x_link_test(bp) && cnt--)
8528                         msleep(10);
8529 }
8530
8531 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8532 {
8533         unsigned int pkt_size, num_pkts, i;
8534         struct sk_buff *skb;
8535         unsigned char *packet;
8536         struct bnx2x_fastpath *fp = &bp->fp[0];
8537         u16 tx_start_idx, tx_idx;
8538         u16 rx_start_idx, rx_idx;
8539         u16 pkt_prod;
8540         struct sw_tx_bd *tx_buf;
8541         struct eth_tx_bd *tx_bd;
8542         dma_addr_t mapping;
8543         union eth_rx_cqe *cqe;
8544         u8 cqe_fp_flags;
8545         struct sw_rx_bd *rx_buf;
8546         u16 len;
8547         int rc = -ENODEV;
8548
8549         if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8550                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8551                 bnx2x_phy_hw_lock(bp);
8552                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8553                 bnx2x_phy_hw_unlock(bp);
8554
8555         } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8556                 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8557                 bnx2x_phy_hw_lock(bp);
8558                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8559                 bnx2x_phy_hw_unlock(bp);
8560                 /* wait until link state is restored */
8561                 bnx2x_wait_for_link(bp, link_up);
8562
8563         } else
8564                 return -EINVAL;
8565
8566         pkt_size = 1514;
8567         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8568         if (!skb) {
8569                 rc = -ENOMEM;
8570                 goto test_loopback_exit;
8571         }
8572         packet = skb_put(skb, pkt_size);
8573         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8574         memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8575         for (i = ETH_HLEN; i < pkt_size; i++)
8576                 packet[i] = (unsigned char) (i & 0xff);
8577
8578         num_pkts = 0;
8579         tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8580         rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8581
8582         pkt_prod = fp->tx_pkt_prod++;
8583         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8584         tx_buf->first_bd = fp->tx_bd_prod;
8585         tx_buf->skb = skb;
8586
8587         tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8588         mapping = pci_map_single(bp->pdev, skb->data,
8589                                  skb_headlen(skb), PCI_DMA_TODEVICE);
8590         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8591         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8592         tx_bd->nbd = cpu_to_le16(1);
8593         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8594         tx_bd->vlan = cpu_to_le16(pkt_prod);
8595         tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8596                                        ETH_TX_BD_FLAGS_END_BD);
8597         tx_bd->general_data = ((UNICAST_ADDRESS <<
8598                                 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8599
8600         fp->hw_tx_prods->bds_prod =
8601                 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8602         mb(); /* FW restriction: must not reorder writing nbd and packets */
8603         fp->hw_tx_prods->packets_prod =
8604                 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8605         DOORBELL(bp, FP_IDX(fp), 0);
8606
8607         mmiowb();
8608
8609         num_pkts++;
8610         fp->tx_bd_prod++;
8611         bp->dev->trans_start = jiffies;
8612
8613         udelay(100);
8614
8615         tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8616         if (tx_idx != tx_start_idx + num_pkts)
8617                 goto test_loopback_exit;
8618
8619         rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8620         if (rx_idx != rx_start_idx + num_pkts)
8621                 goto test_loopback_exit;
8622
8623         cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8624         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8625         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8626                 goto test_loopback_rx_exit;
8627
8628         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8629         if (len != pkt_size)
8630                 goto test_loopback_rx_exit;
8631
8632         rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8633         skb = rx_buf->skb;
8634         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8635         for (i = ETH_HLEN; i < pkt_size; i++)
8636                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8637                         goto test_loopback_rx_exit;
8638
8639         rc = 0;
8640
8641 test_loopback_rx_exit:
8642         bp->dev->last_rx = jiffies;
8643
8644         fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8645         fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8646         fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8647         fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8648
8649         /* Update producers */
8650         bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8651                              fp->rx_sge_prod);
8652         mmiowb(); /* keep prod updates ordered */
8653
8654 test_loopback_exit:
8655         bp->link_params.loopback_mode = LOOPBACK_NONE;
8656
8657         return rc;
8658 }
8659
8660 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8661 {
8662         int rc = 0;
8663
8664         if (!netif_running(bp->dev))
8665                 return BNX2X_LOOPBACK_FAILED;
8666
8667         bnx2x_netif_stop(bp);
8668
8669         if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8670                 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8671                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8672         }
8673
8674         if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8675                 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8676                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8677         }
8678
8679         bnx2x_netif_start(bp);
8680
8681         return rc;
8682 }
8683
8684 #define CRC32_RESIDUAL                  0xdebb20e3
8685
8686 static int bnx2x_test_nvram(struct bnx2x *bp)
8687 {
8688         static const struct {
8689                 int offset;
8690                 int size;
8691         } nvram_tbl[] = {
8692                 {     0,  0x14 }, /* bootstrap */
8693                 {  0x14,  0xec }, /* dir */
8694                 { 0x100, 0x350 }, /* manuf_info */
8695                 { 0x450,  0xf0 }, /* feature_info */
8696                 { 0x640,  0x64 }, /* upgrade_key_info */
8697                 { 0x6a4,  0x64 },
8698                 { 0x708,  0x70 }, /* manuf_key_info */
8699                 { 0x778,  0x70 },
8700                 {     0,     0 }
8701         };
8702         u32 buf[0x350 / 4];
8703         u8 *data = (u8 *)buf;
8704         int i, rc;
8705         u32 magic, csum;
8706
8707         rc = bnx2x_nvram_read(bp, 0, data, 4);
8708         if (rc) {
8709                 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8710                 goto test_nvram_exit;
8711         }
8712
8713         magic = be32_to_cpu(buf[0]);
8714         if (magic != 0x669955aa) {
8715                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8716                 rc = -ENODEV;
8717                 goto test_nvram_exit;
8718         }
8719
8720         for (i = 0; nvram_tbl[i].size; i++) {
8721
8722                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8723                                       nvram_tbl[i].size);
8724                 if (rc) {
8725                         DP(NETIF_MSG_PROBE,
8726                            "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8727                         goto test_nvram_exit;
8728                 }
8729
8730                 csum = ether_crc_le(nvram_tbl[i].size, data);
8731                 if (csum != CRC32_RESIDUAL) {
8732                         DP(NETIF_MSG_PROBE,
8733                            "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8734                         rc = -ENODEV;
8735                         goto test_nvram_exit;
8736                 }
8737         }
8738
8739 test_nvram_exit:
8740         return rc;
8741 }
8742
8743 static int bnx2x_test_intr(struct bnx2x *bp)
8744 {
8745         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8746         int i, rc;
8747
8748         if (!netif_running(bp->dev))
8749                 return -ENODEV;
8750
8751         config->hdr.length_6b = 0;
8752         config->hdr.offset = 0;
8753         config->hdr.client_id = BP_CL_ID(bp);
8754         config->hdr.reserved1 = 0;
8755
8756         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8757                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8758                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8759         if (rc == 0) {
8760                 bp->set_mac_pending++;
8761                 for (i = 0; i < 10; i++) {
8762                         if (!bp->set_mac_pending)
8763                                 break;
8764                         msleep_interruptible(10);
8765                 }
8766                 if (i == 10)
8767                         rc = -ENODEV;
8768         }
8769
8770         return rc;
8771 }
8772
8773 static void bnx2x_self_test(struct net_device *dev,
8774                             struct ethtool_test *etest, u64 *buf)
8775 {
8776         struct bnx2x *bp = netdev_priv(dev);
8777
8778         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8779
8780         if (!netif_running(dev))
8781                 return;
8782
8783         /* offline tests are not suppoerted in MF mode */
8784         if (IS_E1HMF(bp))
8785                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8786
8787         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8788                 u8 link_up;
8789
8790                 link_up = bp->link_vars.link_up;
8791                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8792                 bnx2x_nic_load(bp, LOAD_DIAG);
8793                 /* wait until link state is restored */
8794                 bnx2x_wait_for_link(bp, link_up);
8795
8796                 if (bnx2x_test_registers(bp) != 0) {
8797                         buf[0] = 1;
8798                         etest->flags |= ETH_TEST_FL_FAILED;
8799                 }
8800                 if (bnx2x_test_memory(bp) != 0) {
8801                         buf[1] = 1;
8802                         etest->flags |= ETH_TEST_FL_FAILED;
8803                 }
8804                 buf[2] = bnx2x_test_loopback(bp, link_up);
8805                 if (buf[2] != 0)
8806                         etest->flags |= ETH_TEST_FL_FAILED;
8807
8808                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8809                 bnx2x_nic_load(bp, LOAD_NORMAL);
8810                 /* wait until link state is restored */
8811                 bnx2x_wait_for_link(bp, link_up);
8812         }
8813         if (bnx2x_test_nvram(bp) != 0) {
8814                 buf[3] = 1;
8815                 etest->flags |= ETH_TEST_FL_FAILED;
8816         }
8817         if (bnx2x_test_intr(bp) != 0) {
8818                 buf[4] = 1;
8819                 etest->flags |= ETH_TEST_FL_FAILED;
8820         }
8821         if (bp->port.pmf)
8822                 if (bnx2x_link_test(bp) != 0) {
8823                         buf[5] = 1;
8824                         etest->flags |= ETH_TEST_FL_FAILED;
8825                 }
8826         buf[7] = bnx2x_mc_assert(bp);
8827         if (buf[7] != 0)
8828                 etest->flags |= ETH_TEST_FL_FAILED;
8829
8830 #ifdef BNX2X_EXTRA_DEBUG
8831         bnx2x_panic_dump(bp);
8832 #endif
8833 }
8834
8835 static const struct {
8836         long offset;
8837         int size;
8838         u32 flags;
8839         char string[ETH_GSTRING_LEN];
8840 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
8841 /* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),     8, 1, "rx_bytes" },
8842         { STATS_OFFSET32(error_bytes_received_hi),     8, 1, "rx_error_bytes" },
8843         { STATS_OFFSET32(total_bytes_transmitted_hi),  8, 1, "tx_bytes" },
8844         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), 8, 0, "tx_error_bytes" },
8845         { STATS_OFFSET32(total_unicast_packets_received_hi),
8846                                                 8, 1, "rx_ucast_packets" },
8847         { STATS_OFFSET32(total_multicast_packets_received_hi),
8848                                                 8, 1, "rx_mcast_packets" },
8849         { STATS_OFFSET32(total_broadcast_packets_received_hi),
8850                                                 8, 1, "rx_bcast_packets" },
8851         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
8852                                                 8, 1, "tx_packets" },
8853         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
8854                                                 8, 0, "tx_mac_errors" },
8855 /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
8856                                                 8, 0, "tx_carrier_errors" },
8857         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
8858                                                 8, 0, "rx_crc_errors" },
8859         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
8860                                                 8, 0, "rx_align_errors" },
8861         { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
8862                                                 8, 0, "tx_single_collisions" },
8863         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
8864                                                 8, 0, "tx_multi_collisions" },
8865         { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
8866                                                 8, 0, "tx_deferred" },
8867         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
8868                                                 8, 0, "tx_excess_collisions" },
8869         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
8870                                                 8, 0, "tx_late_collisions" },
8871         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
8872                                                 8, 0, "tx_total_collisions" },
8873         { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
8874                                                 8, 0, "rx_fragments" },
8875 /* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi), 8, 0, "rx_jabbers" },
8876         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
8877                                                 8, 0, "rx_undersize_packets" },
8878         { STATS_OFFSET32(jabber_packets_received),
8879                                                 4, 1, "rx_oversize_packets" },
8880         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
8881                                                 8, 0, "tx_64_byte_packets" },
8882         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
8883                                         8, 0, "tx_65_to_127_byte_packets" },
8884         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
8885                                         8, 0, "tx_128_to_255_byte_packets" },
8886         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
8887                                         8, 0, "tx_256_to_511_byte_packets" },
8888         { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
8889                                         8, 0, "tx_512_to_1023_byte_packets" },
8890         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
8891                                         8, 0, "tx_1024_to_1522_byte_packets" },
8892         { STATS_OFFSET32(etherstatspktsover1522octets_hi),
8893                                         8, 0, "tx_1523_to_9022_byte_packets" },
8894 /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
8895                                                 8, 0, "rx_xon_frames" },
8896         { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
8897                                                 8, 0, "rx_xoff_frames" },
8898         { STATS_OFFSET32(tx_stat_outxonsent_hi),  8, 0, "tx_xon_frames" },
8899         { STATS_OFFSET32(tx_stat_outxoffsent_hi), 8, 0, "tx_xoff_frames" },
8900         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
8901                                                 8, 0, "rx_mac_ctrl_frames" },
8902         { STATS_OFFSET32(mac_filter_discard),   4, 1, "rx_filtered_packets" },
8903         { STATS_OFFSET32(no_buff_discard),      4, 1, "rx_discards" },
8904         { STATS_OFFSET32(xxoverflow_discard),   4, 1, "rx_fw_discards" },
8905         { STATS_OFFSET32(brb_drop_hi),          8, 1, "brb_discard" },
8906 /* 39 */{ STATS_OFFSET32(brb_truncate_discard), 8, 1, "brb_truncate" }
8907 };
8908
8909 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
8910 {
8911         struct bnx2x *bp = netdev_priv(dev);
8912         int i, j;
8913
8914         switch (stringset) {
8915         case ETH_SS_STATS:
8916                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
8917                         if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags))
8918                                 continue;
8919                         strcpy(buf + j*ETH_GSTRING_LEN,
8920                                bnx2x_stats_arr[i].string);
8921                         j++;
8922                 }
8923                 break;
8924
8925         case ETH_SS_TEST:
8926                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
8927                 break;
8928         }
8929 }
8930
8931 static int bnx2x_get_stats_count(struct net_device *dev)
8932 {
8933         struct bnx2x *bp = netdev_priv(dev);
8934         int i, num_stats = 0;
8935
8936         for (i = 0; i < BNX2X_NUM_STATS; i++) {
8937                 if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags))
8938                         continue;
8939                 num_stats++;
8940         }
8941         return num_stats;
8942 }
8943
8944 static void bnx2x_get_ethtool_stats(struct net_device *dev,
8945                                     struct ethtool_stats *stats, u64 *buf)
8946 {
8947         struct bnx2x *bp = netdev_priv(dev);
8948         u32 *hw_stats = (u32 *)&bp->eth_stats;
8949         int i, j;
8950
8951         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
8952                 if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags))
8953                         continue;
8954
8955                 if (bnx2x_stats_arr[i].size == 0) {
8956                         /* skip this counter */
8957                         buf[j] = 0;
8958                         j++;
8959                         continue;
8960                 }
8961                 if (bnx2x_stats_arr[i].size == 4) {
8962                         /* 4-byte counter */
8963                         buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
8964                         j++;
8965                         continue;
8966                 }
8967                 /* 8-byte counter */
8968                 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
8969                                   *(hw_stats + bnx2x_stats_arr[i].offset + 1));
8970                 j++;
8971         }
8972 }
8973
8974 static int bnx2x_phys_id(struct net_device *dev, u32 data)
8975 {
8976         struct bnx2x *bp = netdev_priv(dev);
8977         int port = BP_PORT(bp);
8978         int i;
8979
8980         if (!netif_running(dev))
8981                 return 0;
8982
8983         if (!bp->port.pmf)
8984                 return 0;
8985
8986         if (data == 0)
8987                 data = 2;
8988
8989         for (i = 0; i < (data * 2); i++) {
8990                 if ((i % 2) == 0)
8991                         bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
8992                                       bp->link_params.hw_led_mode,
8993                                       bp->link_params.chip_id);
8994                 else
8995                         bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
8996                                       bp->link_params.hw_led_mode,
8997                                       bp->link_params.chip_id);
8998
8999                 msleep_interruptible(500);
9000                 if (signal_pending(current))
9001                         break;
9002         }
9003
9004         if (bp->link_vars.link_up)
9005                 bnx2x_set_led(bp, port, LED_MODE_OPER,
9006                               bp->link_vars.line_speed,
9007                               bp->link_params.hw_led_mode,
9008                               bp->link_params.chip_id);
9009
9010         return 0;
9011 }
9012
9013 static struct ethtool_ops bnx2x_ethtool_ops = {
9014         .get_settings           = bnx2x_get_settings,
9015         .set_settings           = bnx2x_set_settings,
9016         .get_drvinfo            = bnx2x_get_drvinfo,
9017         .get_wol                = bnx2x_get_wol,
9018         .set_wol                = bnx2x_set_wol,
9019         .get_msglevel           = bnx2x_get_msglevel,
9020         .set_msglevel           = bnx2x_set_msglevel,
9021         .nway_reset             = bnx2x_nway_reset,
9022         .get_link               = ethtool_op_get_link,
9023         .get_eeprom_len         = bnx2x_get_eeprom_len,
9024         .get_eeprom             = bnx2x_get_eeprom,
9025         .set_eeprom             = bnx2x_set_eeprom,
9026         .get_coalesce           = bnx2x_get_coalesce,
9027         .set_coalesce           = bnx2x_set_coalesce,
9028         .get_ringparam          = bnx2x_get_ringparam,
9029         .set_ringparam          = bnx2x_set_ringparam,
9030         .get_pauseparam         = bnx2x_get_pauseparam,
9031         .set_pauseparam         = bnx2x_set_pauseparam,
9032         .get_rx_csum            = bnx2x_get_rx_csum,
9033         .set_rx_csum            = bnx2x_set_rx_csum,
9034         .get_tx_csum            = ethtool_op_get_tx_csum,
9035         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
9036         .set_flags              = bnx2x_set_flags,
9037         .get_flags              = ethtool_op_get_flags,
9038         .get_sg                 = ethtool_op_get_sg,
9039         .set_sg                 = ethtool_op_set_sg,
9040         .get_tso                = ethtool_op_get_tso,
9041         .set_tso                = bnx2x_set_tso,
9042         .self_test_count        = bnx2x_self_test_count,
9043         .self_test              = bnx2x_self_test,
9044         .get_strings            = bnx2x_get_strings,
9045         .phys_id                = bnx2x_phys_id,
9046         .get_stats_count        = bnx2x_get_stats_count,
9047         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
9048 };
9049
9050 /* end of ethtool_ops */
9051
9052 /****************************************************************************
9053 * General service functions
9054 ****************************************************************************/
9055
9056 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9057 {
9058         u16 pmcsr;
9059
9060         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9061
9062         switch (state) {
9063         case PCI_D0:
9064                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9065                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9066                                        PCI_PM_CTRL_PME_STATUS));
9067
9068                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9069                 /* delay required during transition out of D3hot */
9070                         msleep(20);
9071                 break;
9072
9073         case PCI_D3hot:
9074                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9075                 pmcsr |= 3;
9076
9077                 if (bp->wol)
9078                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9079
9080                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9081                                       pmcsr);
9082
9083                 /* No more memory access after this point until
9084                 * device is brought back to D0.
9085                 */
9086                 break;
9087
9088         default:
9089                 return -EINVAL;
9090         }
9091         return 0;
9092 }
9093
9094 /*
9095  * net_device service functions
9096  */
9097
9098 static int bnx2x_poll(struct napi_struct *napi, int budget)
9099 {
9100         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9101                                                  napi);
9102         struct bnx2x *bp = fp->bp;
9103         int work_done = 0;
9104
9105 #ifdef BNX2X_STOP_ON_ERROR
9106         if (unlikely(bp->panic))
9107                 goto poll_panic;
9108 #endif
9109
9110         prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9111         prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9112         prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9113
9114         bnx2x_update_fpsb_idx(fp);
9115
9116         if ((fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) ||
9117             (fp->tx_pkt_prod != fp->tx_pkt_cons))
9118                 bnx2x_tx_int(fp, budget);
9119
9120         if (le16_to_cpu(*fp->rx_cons_sb) != fp->rx_comp_cons)
9121                 work_done = bnx2x_rx_int(fp, budget);
9122
9123         rmb(); /* bnx2x_has_work() reads the status block */
9124
9125         /* must not complete if we consumed full budget */
9126         if ((work_done < budget) && !bnx2x_has_work(fp)) {
9127
9128 #ifdef BNX2X_STOP_ON_ERROR
9129 poll_panic:
9130 #endif
9131                 netif_rx_complete(bp->dev, napi);
9132
9133                 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9134                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9135                 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
9136                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9137         }
9138         return work_done;
9139 }
9140
9141
9142 /* we split the first BD into headers and data BDs
9143  * to ease the pain of our fellow micocode engineers
9144  * we use one mapping for both BDs
9145  * So far this has only been observed to happen
9146  * in Other Operating Systems(TM)
9147  */
9148 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9149                                    struct bnx2x_fastpath *fp,
9150                                    struct eth_tx_bd **tx_bd, u16 hlen,
9151                                    u16 bd_prod, int nbd)
9152 {
9153         struct eth_tx_bd *h_tx_bd = *tx_bd;
9154         struct eth_tx_bd *d_tx_bd;
9155         dma_addr_t mapping;
9156         int old_len = le16_to_cpu(h_tx_bd->nbytes);
9157
9158         /* first fix first BD */
9159         h_tx_bd->nbd = cpu_to_le16(nbd);
9160         h_tx_bd->nbytes = cpu_to_le16(hlen);
9161
9162         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9163            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9164            h_tx_bd->addr_lo, h_tx_bd->nbd);
9165
9166         /* now get a new data BD
9167          * (after the pbd) and fill it */
9168         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9169         d_tx_bd = &fp->tx_desc_ring[bd_prod];
9170
9171         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9172                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9173
9174         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9175         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9176         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9177         d_tx_bd->vlan = 0;
9178         /* this marks the BD as one that has no individual mapping
9179          * the FW ignores this flag in a BD not marked start
9180          */
9181         d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9182         DP(NETIF_MSG_TX_QUEUED,
9183            "TSO split data size is %d (%x:%x)\n",
9184            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9185
9186         /* update tx_bd for marking the last BD flag */
9187         *tx_bd = d_tx_bd;
9188
9189         return bd_prod;
9190 }
9191
9192 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9193 {
9194         if (fix > 0)
9195                 csum = (u16) ~csum_fold(csum_sub(csum,
9196                                 csum_partial(t_header - fix, fix, 0)));
9197
9198         else if (fix < 0)
9199                 csum = (u16) ~csum_fold(csum_add(csum,
9200                                 csum_partial(t_header, -fix, 0)));
9201
9202         return swab16(csum);
9203 }
9204
9205 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9206 {
9207         u32 rc;
9208
9209         if (skb->ip_summed != CHECKSUM_PARTIAL)
9210                 rc = XMIT_PLAIN;
9211
9212         else {
9213                 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9214                         rc = XMIT_CSUM_V6;
9215                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9216                                 rc |= XMIT_CSUM_TCP;
9217
9218                 } else {
9219                         rc = XMIT_CSUM_V4;
9220                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9221                                 rc |= XMIT_CSUM_TCP;
9222                 }
9223         }
9224
9225         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9226                 rc |= XMIT_GSO_V4;
9227
9228         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9229                 rc |= XMIT_GSO_V6;
9230
9231         return rc;
9232 }
9233
9234 /* check if packet requires linearization (packet is too fragmented) */
9235 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9236                              u32 xmit_type)
9237 {
9238         int to_copy = 0;
9239         int hlen = 0;
9240         int first_bd_sz = 0;
9241
9242         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9243         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9244
9245                 if (xmit_type & XMIT_GSO) {
9246                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9247                         /* Check if LSO packet needs to be copied:
9248                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9249                         int wnd_size = MAX_FETCH_BD - 3;
9250                         /* Number of widnows to check */
9251                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9252                         int wnd_idx = 0;
9253                         int frag_idx = 0;
9254                         u32 wnd_sum = 0;
9255
9256                         /* Headers length */
9257                         hlen = (int)(skb_transport_header(skb) - skb->data) +
9258                                 tcp_hdrlen(skb);
9259
9260                         /* Amount of data (w/o headers) on linear part of SKB*/
9261                         first_bd_sz = skb_headlen(skb) - hlen;
9262
9263                         wnd_sum  = first_bd_sz;
9264
9265                         /* Calculate the first sum - it's special */
9266                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9267                                 wnd_sum +=
9268                                         skb_shinfo(skb)->frags[frag_idx].size;
9269
9270                         /* If there was data on linear skb data - check it */
9271                         if (first_bd_sz > 0) {
9272                                 if (unlikely(wnd_sum < lso_mss)) {
9273                                         to_copy = 1;
9274                                         goto exit_lbl;
9275                                 }
9276
9277                                 wnd_sum -= first_bd_sz;
9278                         }
9279
9280                         /* Others are easier: run through the frag list and
9281                            check all windows */
9282                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9283                                 wnd_sum +=
9284                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9285
9286                                 if (unlikely(wnd_sum < lso_mss)) {
9287                                         to_copy = 1;
9288                                         break;
9289                                 }
9290                                 wnd_sum -=
9291                                         skb_shinfo(skb)->frags[wnd_idx].size;
9292                         }
9293
9294                 } else {
9295                         /* in non-LSO too fragmented packet should always
9296                            be linearized */
9297                         to_copy = 1;
9298                 }
9299         }
9300
9301 exit_lbl:
9302         if (unlikely(to_copy))
9303                 DP(NETIF_MSG_TX_QUEUED,
9304                    "Linearization IS REQUIRED for %s packet. "
9305                    "num_frags %d  hlen %d  first_bd_sz %d\n",
9306                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9307                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9308
9309         return to_copy;
9310 }
9311
9312 /* called with netif_tx_lock
9313  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9314  * netif_wake_queue()
9315  */
9316 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9317 {
9318         struct bnx2x *bp = netdev_priv(dev);
9319         struct bnx2x_fastpath *fp;
9320         struct sw_tx_bd *tx_buf;
9321         struct eth_tx_bd *tx_bd;
9322         struct eth_tx_parse_bd *pbd = NULL;
9323         u16 pkt_prod, bd_prod;
9324         int nbd, fp_index;
9325         dma_addr_t mapping;
9326         u32 xmit_type = bnx2x_xmit_type(bp, skb);
9327         int vlan_off = (bp->e1hov ? 4 : 0);
9328         int i;
9329         u8 hlen = 0;
9330
9331 #ifdef BNX2X_STOP_ON_ERROR
9332         if (unlikely(bp->panic))
9333                 return NETDEV_TX_BUSY;
9334 #endif
9335
9336         fp_index = (smp_processor_id() % bp->num_queues);
9337         fp = &bp->fp[fp_index];
9338
9339         if (unlikely(bnx2x_tx_avail(bp->fp) <
9340                                         (skb_shinfo(skb)->nr_frags + 3))) {
9341                 bp->eth_stats.driver_xoff++,
9342                 netif_stop_queue(dev);
9343                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9344                 return NETDEV_TX_BUSY;
9345         }
9346
9347         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
9348            "  gso type %x  xmit_type %x\n",
9349            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9350            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9351
9352         /* First, check if we need to linearaize the skb
9353            (due to FW restrictions) */
9354         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9355                 /* Statistics of linearization */
9356                 bp->lin_cnt++;
9357                 if (skb_linearize(skb) != 0) {
9358                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9359                            "silently dropping this SKB\n");
9360                         dev_kfree_skb_any(skb);
9361                         return 0;
9362                 }
9363         }
9364
9365         /*
9366         Please read carefully. First we use one BD which we mark as start,
9367         then for TSO or xsum we have a parsing info BD,
9368         and only then we have the rest of the TSO BDs.
9369         (don't forget to mark the last one as last,
9370         and to unmap only AFTER you write to the BD ...)
9371         And above all, all pdb sizes are in words - NOT DWORDS!
9372         */
9373
9374         pkt_prod = fp->tx_pkt_prod++;
9375         bd_prod = TX_BD(fp->tx_bd_prod);
9376
9377         /* get a tx_buf and first BD */
9378         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9379         tx_bd = &fp->tx_desc_ring[bd_prod];
9380
9381         tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9382         tx_bd->general_data = (UNICAST_ADDRESS <<
9383                                ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9384         tx_bd->general_data |= 1; /* header nbd */
9385
9386         /* remember the first BD of the packet */
9387         tx_buf->first_bd = fp->tx_bd_prod;
9388         tx_buf->skb = skb;
9389
9390         DP(NETIF_MSG_TX_QUEUED,
9391            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
9392            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9393
9394         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9395                 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9396                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9397                 vlan_off += 4;
9398         } else
9399                 tx_bd->vlan = cpu_to_le16(pkt_prod);
9400
9401         if (xmit_type) {
9402
9403                 /* turn on parsing and get a BD */
9404                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9405                 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9406
9407                 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9408         }
9409
9410         if (xmit_type & XMIT_CSUM) {
9411                 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
9412
9413                 /* for now NS flag is not used in Linux */
9414                 pbd->global_data = (hlen |
9415                                     ((skb->protocol == ntohs(ETH_P_8021Q)) <<
9416                                      ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9417
9418                 pbd->ip_hlen = (skb_transport_header(skb) -
9419                                 skb_network_header(skb)) / 2;
9420
9421                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
9422
9423                 pbd->total_hlen = cpu_to_le16(hlen);
9424                 hlen = hlen*2 - vlan_off;
9425
9426                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9427
9428                 if (xmit_type & XMIT_CSUM_V4)
9429                         tx_bd->bd_flags.as_bitfield |=
9430                                                 ETH_TX_BD_FLAGS_IP_CSUM;
9431                 else
9432                         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9433
9434                 if (xmit_type & XMIT_CSUM_TCP) {
9435                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9436
9437                 } else {
9438                         s8 fix = SKB_CS_OFF(skb); /* signed! */
9439
9440                         pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9441                         pbd->cs_offset = fix / 2;
9442
9443                         DP(NETIF_MSG_TX_QUEUED,
9444                            "hlen %d  offset %d  fix %d  csum before fix %x\n",
9445                            le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9446                            SKB_CS(skb));
9447
9448                         /* HW bug: fixup the CSUM */
9449                         pbd->tcp_pseudo_csum =
9450                                 bnx2x_csum_fix(skb_transport_header(skb),
9451                                                SKB_CS(skb), fix);
9452
9453                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9454                            pbd->tcp_pseudo_csum);
9455                 }
9456         }
9457
9458         mapping = pci_map_single(bp->pdev, skb->data,
9459                                  skb_headlen(skb), PCI_DMA_TODEVICE);
9460
9461         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9462         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9463         nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2);
9464         tx_bd->nbd = cpu_to_le16(nbd);
9465         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9466
9467         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
9468            "  nbytes %d  flags %x  vlan %x\n",
9469            tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9470            le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9471            le16_to_cpu(tx_bd->vlan));
9472
9473         if (xmit_type & XMIT_GSO) {
9474
9475                 DP(NETIF_MSG_TX_QUEUED,
9476                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
9477                    skb->len, hlen, skb_headlen(skb),
9478                    skb_shinfo(skb)->gso_size);
9479
9480                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9481
9482                 if (unlikely(skb_headlen(skb) > hlen))
9483                         bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9484                                                  bd_prod, ++nbd);
9485
9486                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9487                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9488                 pbd->tcp_flags = pbd_tcp_flags(skb);
9489
9490                 if (xmit_type & XMIT_GSO_V4) {
9491                         pbd->ip_id = swab16(ip_hdr(skb)->id);
9492                         pbd->tcp_pseudo_csum =
9493                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9494                                                           ip_hdr(skb)->daddr,
9495                                                           0, IPPROTO_TCP, 0));
9496
9497                 } else
9498                         pbd->tcp_pseudo_csum =
9499                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9500                                                         &ipv6_hdr(skb)->daddr,
9501                                                         0, IPPROTO_TCP, 0));
9502
9503                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9504         }
9505
9506         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9507                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9508
9509                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9510                 tx_bd = &fp->tx_desc_ring[bd_prod];
9511
9512                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9513                                        frag->size, PCI_DMA_TODEVICE);
9514
9515                 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9516                 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9517                 tx_bd->nbytes = cpu_to_le16(frag->size);
9518                 tx_bd->vlan = cpu_to_le16(pkt_prod);
9519                 tx_bd->bd_flags.as_bitfield = 0;
9520
9521                 DP(NETIF_MSG_TX_QUEUED,
9522                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d  flags %x\n",
9523                    i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9524                    le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
9525         }
9526
9527         /* now at last mark the BD as the last BD */
9528         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9529
9530         DP(NETIF_MSG_TX_QUEUED, "last bd @%p  flags %x\n",
9531            tx_bd, tx_bd->bd_flags.as_bitfield);
9532
9533         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9534
9535         /* now send a tx doorbell, counting the next BD
9536          * if the packet contains or ends with it
9537          */
9538         if (TX_BD_POFF(bd_prod) < nbd)
9539                 nbd++;
9540
9541         if (pbd)
9542                 DP(NETIF_MSG_TX_QUEUED,
9543                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
9544                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
9545                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9546                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9547                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
9548
9549         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
9550
9551         fp->hw_tx_prods->bds_prod =
9552                 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
9553         mb(); /* FW restriction: must not reorder writing nbd and packets */
9554         fp->hw_tx_prods->packets_prod =
9555                 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9556         DOORBELL(bp, FP_IDX(fp), 0);
9557
9558         mmiowb();
9559
9560         fp->tx_bd_prod += nbd;
9561         dev->trans_start = jiffies;
9562
9563         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9564                 netif_stop_queue(dev);
9565                 bp->eth_stats.driver_xoff++;
9566                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9567                         netif_wake_queue(dev);
9568         }
9569         fp->tx_pkt++;
9570
9571         return NETDEV_TX_OK;
9572 }
9573
9574 /* called with rtnl_lock */
9575 static int bnx2x_open(struct net_device *dev)
9576 {
9577         struct bnx2x *bp = netdev_priv(dev);
9578
9579         bnx2x_set_power_state(bp, PCI_D0);
9580
9581         return bnx2x_nic_load(bp, LOAD_OPEN);
9582 }
9583
9584 /* called with rtnl_lock */
9585 static int bnx2x_close(struct net_device *dev)
9586 {
9587         struct bnx2x *bp = netdev_priv(dev);
9588
9589         /* Unload the driver, release IRQs */
9590         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9591         if (atomic_read(&bp->pdev->enable_cnt) == 1)
9592                 if (!CHIP_REV_IS_SLOW(bp))
9593                         bnx2x_set_power_state(bp, PCI_D3hot);
9594
9595         return 0;
9596 }
9597
9598 /* called with netif_tx_lock from set_multicast */
9599 static void bnx2x_set_rx_mode(struct net_device *dev)
9600 {
9601         struct bnx2x *bp = netdev_priv(dev);
9602         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9603         int port = BP_PORT(bp);
9604
9605         if (bp->state != BNX2X_STATE_OPEN) {
9606                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9607                 return;
9608         }
9609
9610         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9611
9612         if (dev->flags & IFF_PROMISC)
9613                 rx_mode = BNX2X_RX_MODE_PROMISC;
9614
9615         else if ((dev->flags & IFF_ALLMULTI) ||
9616                  ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9617                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9618
9619         else { /* some multicasts */
9620                 if (CHIP_IS_E1(bp)) {
9621                         int i, old, offset;
9622                         struct dev_mc_list *mclist;
9623                         struct mac_configuration_cmd *config =
9624                                                 bnx2x_sp(bp, mcast_config);
9625
9626                         for (i = 0, mclist = dev->mc_list;
9627                              mclist && (i < dev->mc_count);
9628                              i++, mclist = mclist->next) {
9629
9630                                 config->config_table[i].
9631                                         cam_entry.msb_mac_addr =
9632                                         swab16(*(u16 *)&mclist->dmi_addr[0]);
9633                                 config->config_table[i].
9634                                         cam_entry.middle_mac_addr =
9635                                         swab16(*(u16 *)&mclist->dmi_addr[2]);
9636                                 config->config_table[i].
9637                                         cam_entry.lsb_mac_addr =
9638                                         swab16(*(u16 *)&mclist->dmi_addr[4]);
9639                                 config->config_table[i].cam_entry.flags =
9640                                                         cpu_to_le16(port);
9641                                 config->config_table[i].
9642                                         target_table_entry.flags = 0;
9643                                 config->config_table[i].
9644                                         target_table_entry.client_id = 0;
9645                                 config->config_table[i].
9646                                         target_table_entry.vlan_id = 0;
9647
9648                                 DP(NETIF_MSG_IFUP,
9649                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9650                                    config->config_table[i].
9651                                                 cam_entry.msb_mac_addr,
9652                                    config->config_table[i].
9653                                                 cam_entry.middle_mac_addr,
9654                                    config->config_table[i].
9655                                                 cam_entry.lsb_mac_addr);
9656                         }
9657                         old = config->hdr.length_6b;
9658                         if (old > i) {
9659                                 for (; i < old; i++) {
9660                                         if (CAM_IS_INVALID(config->
9661                                                            config_table[i])) {
9662                                                 i--; /* already invalidated */
9663                                                 break;
9664                                         }
9665                                         /* invalidate */
9666                                         CAM_INVALIDATE(config->
9667                                                        config_table[i]);
9668                                 }
9669                         }
9670
9671                         if (CHIP_REV_IS_SLOW(bp))
9672                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9673                         else
9674                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
9675
9676                         config->hdr.length_6b = i;
9677                         config->hdr.offset = offset;
9678                         config->hdr.client_id = BP_CL_ID(bp);
9679                         config->hdr.reserved1 = 0;
9680
9681                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9682                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9683                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9684                                       0);
9685                 } else { /* E1H */
9686                         /* Accept one or more multicasts */
9687                         struct dev_mc_list *mclist;
9688                         u32 mc_filter[MC_HASH_SIZE];
9689                         u32 crc, bit, regidx;
9690                         int i;
9691
9692                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9693
9694                         for (i = 0, mclist = dev->mc_list;
9695                              mclist && (i < dev->mc_count);
9696                              i++, mclist = mclist->next) {
9697
9698                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: "
9699                                    "%02x:%02x:%02x:%02x:%02x:%02x\n",
9700                                    mclist->dmi_addr[0], mclist->dmi_addr[1],
9701                                    mclist->dmi_addr[2], mclist->dmi_addr[3],
9702                                    mclist->dmi_addr[4], mclist->dmi_addr[5]);
9703
9704                                 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9705                                 bit = (crc >> 24) & 0xff;
9706                                 regidx = bit >> 5;
9707                                 bit &= 0x1f;
9708                                 mc_filter[regidx] |= (1 << bit);
9709                         }
9710
9711                         for (i = 0; i < MC_HASH_SIZE; i++)
9712                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9713                                        mc_filter[i]);
9714                 }
9715         }
9716
9717         bp->rx_mode = rx_mode;
9718         bnx2x_set_storm_rx_mode(bp);
9719 }
9720
9721 /* called with rtnl_lock */
9722 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9723 {
9724         struct sockaddr *addr = p;
9725         struct bnx2x *bp = netdev_priv(dev);
9726
9727         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
9728                 return -EINVAL;
9729
9730         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9731         if (netif_running(dev)) {
9732                 if (CHIP_IS_E1(bp))
9733                         bnx2x_set_mac_addr_e1(bp);
9734                 else
9735                         bnx2x_set_mac_addr_e1h(bp);
9736         }
9737
9738         return 0;
9739 }
9740
9741 /* called with rtnl_lock */
9742 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9743 {
9744         struct mii_ioctl_data *data = if_mii(ifr);
9745         struct bnx2x *bp = netdev_priv(dev);
9746         int err;
9747
9748         switch (cmd) {
9749         case SIOCGMIIPHY:
9750                 data->phy_id = bp->port.phy_addr;
9751
9752                 /* fallthrough */
9753
9754         case SIOCGMIIREG: {
9755                 u16 mii_regval;
9756
9757                 if (!netif_running(dev))
9758                         return -EAGAIN;
9759
9760                 mutex_lock(&bp->port.phy_mutex);
9761                 err = bnx2x_cl45_read(bp, BP_PORT(bp), 0, bp->port.phy_addr,
9762                                       DEFAULT_PHY_DEV_ADDR,
9763                                       (data->reg_num & 0x1f), &mii_regval);
9764                 data->val_out = mii_regval;
9765                 mutex_unlock(&bp->port.phy_mutex);
9766                 return err;
9767         }
9768
9769         case SIOCSMIIREG:
9770                 if (!capable(CAP_NET_ADMIN))
9771                         return -EPERM;
9772
9773                 if (!netif_running(dev))
9774                         return -EAGAIN;
9775
9776                 mutex_lock(&bp->port.phy_mutex);
9777                 err = bnx2x_cl45_write(bp, BP_PORT(bp), 0, bp->port.phy_addr,
9778                                        DEFAULT_PHY_DEV_ADDR,
9779                                        (data->reg_num & 0x1f), data->val_in);
9780                 mutex_unlock(&bp->port.phy_mutex);
9781                 return err;
9782
9783         default:
9784                 /* do nothing */
9785                 break;
9786         }
9787
9788         return -EOPNOTSUPP;
9789 }
9790
9791 /* called with rtnl_lock */
9792 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9793 {
9794         struct bnx2x *bp = netdev_priv(dev);
9795         int rc = 0;
9796
9797         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9798             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9799                 return -EINVAL;
9800
9801         /* This does not race with packet allocation
9802          * because the actual alloc size is
9803          * only updated as part of load
9804          */
9805         dev->mtu = new_mtu;
9806
9807         if (netif_running(dev)) {
9808                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9809                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9810         }
9811
9812         return rc;
9813 }
9814
9815 static void bnx2x_tx_timeout(struct net_device *dev)
9816 {
9817         struct bnx2x *bp = netdev_priv(dev);
9818
9819 #ifdef BNX2X_STOP_ON_ERROR
9820         if (!bp->panic)
9821                 bnx2x_panic();
9822 #endif
9823         /* This allows the netif to be shutdown gracefully before resetting */
9824         schedule_work(&bp->reset_task);
9825 }
9826
9827 #ifdef BCM_VLAN
9828 /* called with rtnl_lock */
9829 static void bnx2x_vlan_rx_register(struct net_device *dev,
9830                                    struct vlan_group *vlgrp)
9831 {
9832         struct bnx2x *bp = netdev_priv(dev);
9833
9834         bp->vlgrp = vlgrp;
9835         if (netif_running(dev))
9836                 bnx2x_set_client_config(bp);
9837 }
9838
9839 #endif
9840
9841 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9842 static void poll_bnx2x(struct net_device *dev)
9843 {
9844         struct bnx2x *bp = netdev_priv(dev);
9845
9846         disable_irq(bp->pdev->irq);
9847         bnx2x_interrupt(bp->pdev->irq, dev);
9848         enable_irq(bp->pdev->irq);
9849 }
9850 #endif
9851
9852 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
9853                                     struct net_device *dev)
9854 {
9855         struct bnx2x *bp;
9856         int rc;
9857
9858         SET_NETDEV_DEV(dev, &pdev->dev);
9859         bp = netdev_priv(dev);
9860
9861         bp->dev = dev;
9862         bp->pdev = pdev;
9863         bp->flags = 0;
9864         bp->func = PCI_FUNC(pdev->devfn);
9865
9866         rc = pci_enable_device(pdev);
9867         if (rc) {
9868                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
9869                 goto err_out;
9870         }
9871
9872         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9873                 printk(KERN_ERR PFX "Cannot find PCI device base address,"
9874                        " aborting\n");
9875                 rc = -ENODEV;
9876                 goto err_out_disable;
9877         }
9878
9879         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
9880                 printk(KERN_ERR PFX "Cannot find second PCI device"
9881                        " base address, aborting\n");
9882                 rc = -ENODEV;
9883                 goto err_out_disable;
9884         }
9885
9886         if (atomic_read(&pdev->enable_cnt) == 1) {
9887                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9888                 if (rc) {
9889                         printk(KERN_ERR PFX "Cannot obtain PCI resources,"
9890                                " aborting\n");
9891                         goto err_out_disable;
9892                 }
9893
9894                 pci_set_master(pdev);
9895                 pci_save_state(pdev);
9896         }
9897
9898         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9899         if (bp->pm_cap == 0) {
9900                 printk(KERN_ERR PFX "Cannot find power management"
9901                        " capability, aborting\n");
9902                 rc = -EIO;
9903                 goto err_out_release;
9904         }
9905
9906         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
9907         if (bp->pcie_cap == 0) {
9908                 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
9909                        " aborting\n");
9910                 rc = -EIO;
9911                 goto err_out_release;
9912         }
9913
9914         if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
9915                 bp->flags |= USING_DAC_FLAG;
9916                 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
9917                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
9918                                " failed, aborting\n");
9919                         rc = -EIO;
9920                         goto err_out_release;
9921                 }
9922
9923         } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
9924                 printk(KERN_ERR PFX "System does not support DMA,"
9925                        " aborting\n");
9926                 rc = -EIO;
9927                 goto err_out_release;
9928         }
9929
9930         dev->mem_start = pci_resource_start(pdev, 0);
9931         dev->base_addr = dev->mem_start;
9932         dev->mem_end = pci_resource_end(pdev, 0);
9933
9934         dev->irq = pdev->irq;
9935
9936         bp->regview = ioremap_nocache(dev->base_addr,
9937                                       pci_resource_len(pdev, 0));
9938         if (!bp->regview) {
9939                 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
9940                 rc = -ENOMEM;
9941                 goto err_out_release;
9942         }
9943
9944         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
9945                                         min_t(u64, BNX2X_DB_SIZE,
9946                                               pci_resource_len(pdev, 2)));
9947         if (!bp->doorbells) {
9948                 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
9949                 rc = -ENOMEM;
9950                 goto err_out_unmap;
9951         }
9952
9953         bnx2x_set_power_state(bp, PCI_D0);
9954
9955         /* clean indirect addresses */
9956         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
9957                                PCICFG_VENDOR_ID_OFFSET);
9958         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
9959         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
9960         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
9961         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
9962
9963         dev->hard_start_xmit = bnx2x_start_xmit;
9964         dev->watchdog_timeo = TX_TIMEOUT;
9965
9966         dev->ethtool_ops = &bnx2x_ethtool_ops;
9967         dev->open = bnx2x_open;
9968         dev->stop = bnx2x_close;
9969         dev->set_multicast_list = bnx2x_set_rx_mode;
9970         dev->set_mac_address = bnx2x_change_mac_addr;
9971         dev->do_ioctl = bnx2x_ioctl;
9972         dev->change_mtu = bnx2x_change_mtu;
9973         dev->tx_timeout = bnx2x_tx_timeout;
9974 #ifdef BCM_VLAN
9975         dev->vlan_rx_register = bnx2x_vlan_rx_register;
9976 #endif
9977 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9978         dev->poll_controller = poll_bnx2x;
9979 #endif
9980         dev->features |= NETIF_F_SG;
9981         dev->features |= NETIF_F_HW_CSUM;
9982         if (bp->flags & USING_DAC_FLAG)
9983                 dev->features |= NETIF_F_HIGHDMA;
9984 #ifdef BCM_VLAN
9985         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
9986 #endif
9987         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9988         dev->features |= NETIF_F_TSO6;
9989
9990         return 0;
9991
9992 err_out_unmap:
9993         if (bp->regview) {
9994                 iounmap(bp->regview);
9995                 bp->regview = NULL;
9996         }
9997         if (bp->doorbells) {
9998                 iounmap(bp->doorbells);
9999                 bp->doorbells = NULL;
10000         }
10001
10002 err_out_release:
10003         if (atomic_read(&pdev->enable_cnt) == 1)
10004                 pci_release_regions(pdev);
10005
10006 err_out_disable:
10007         pci_disable_device(pdev);
10008         pci_set_drvdata(pdev, NULL);
10009
10010 err_out:
10011         return rc;
10012 }
10013
10014 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10015 {
10016         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10017
10018         val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10019         return val;
10020 }
10021
10022 /* return value of 1=2.5GHz 2=5GHz */
10023 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10024 {
10025         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10026
10027         val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10028         return val;
10029 }
10030
10031 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10032                                     const struct pci_device_id *ent)
10033 {
10034         static int version_printed;
10035         struct net_device *dev = NULL;
10036         struct bnx2x *bp;
10037         int rc;
10038         DECLARE_MAC_BUF(mac);
10039
10040         if (version_printed++ == 0)
10041                 printk(KERN_INFO "%s", version);
10042
10043         /* dev zeroed in init_etherdev */
10044         dev = alloc_etherdev(sizeof(*bp));
10045         if (!dev) {
10046                 printk(KERN_ERR PFX "Cannot allocate net device\n");
10047                 return -ENOMEM;
10048         }
10049
10050         netif_carrier_off(dev);
10051
10052         bp = netdev_priv(dev);
10053         bp->msglevel = debug;
10054
10055         rc = bnx2x_init_dev(pdev, dev);
10056         if (rc < 0) {
10057                 free_netdev(dev);
10058                 return rc;
10059         }
10060
10061         rc = register_netdev(dev);
10062         if (rc) {
10063                 dev_err(&pdev->dev, "Cannot register net device\n");
10064                 goto init_one_exit;
10065         }
10066
10067         pci_set_drvdata(pdev, dev);
10068
10069         rc = bnx2x_init_bp(bp);
10070         if (rc) {
10071                 unregister_netdev(dev);
10072                 goto init_one_exit;
10073         }
10074
10075         bp->common.name = board_info[ent->driver_data].name;
10076         printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10077                " IRQ %d, ", dev->name, bp->common.name,
10078                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10079                bnx2x_get_pcie_width(bp),
10080                (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10081                dev->base_addr, bp->pdev->irq);
10082         printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
10083         return 0;
10084
10085 init_one_exit:
10086         if (bp->regview)
10087                 iounmap(bp->regview);
10088
10089         if (bp->doorbells)
10090                 iounmap(bp->doorbells);
10091
10092         free_netdev(dev);
10093
10094         if (atomic_read(&pdev->enable_cnt) == 1)
10095                 pci_release_regions(pdev);
10096
10097         pci_disable_device(pdev);
10098         pci_set_drvdata(pdev, NULL);
10099
10100         return rc;
10101 }
10102
10103 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10104 {
10105         struct net_device *dev = pci_get_drvdata(pdev);
10106         struct bnx2x *bp;
10107
10108         if (!dev) {
10109                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10110                 return;
10111         }
10112         bp = netdev_priv(dev);
10113
10114         unregister_netdev(dev);
10115
10116         if (bp->regview)
10117                 iounmap(bp->regview);
10118
10119         if (bp->doorbells)
10120                 iounmap(bp->doorbells);
10121
10122         free_netdev(dev);
10123
10124         if (atomic_read(&pdev->enable_cnt) == 1)
10125                 pci_release_regions(pdev);
10126
10127         pci_disable_device(pdev);
10128         pci_set_drvdata(pdev, NULL);
10129 }
10130
10131 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10132 {
10133         struct net_device *dev = pci_get_drvdata(pdev);
10134         struct bnx2x *bp;
10135
10136         if (!dev) {
10137                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10138                 return -ENODEV;
10139         }
10140         bp = netdev_priv(dev);
10141
10142         rtnl_lock();
10143
10144         pci_save_state(pdev);
10145
10146         if (!netif_running(dev)) {
10147                 rtnl_unlock();
10148                 return 0;
10149         }
10150
10151         netif_device_detach(dev);
10152
10153         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10154
10155         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10156
10157         rtnl_unlock();
10158
10159         return 0;
10160 }
10161
10162 static int bnx2x_resume(struct pci_dev *pdev)
10163 {
10164         struct net_device *dev = pci_get_drvdata(pdev);
10165         struct bnx2x *bp;
10166         int rc;
10167
10168         if (!dev) {
10169                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10170                 return -ENODEV;
10171         }
10172         bp = netdev_priv(dev);
10173
10174         rtnl_lock();
10175
10176         pci_restore_state(pdev);
10177
10178         if (!netif_running(dev)) {
10179                 rtnl_unlock();
10180                 return 0;
10181         }
10182
10183         bnx2x_set_power_state(bp, PCI_D0);
10184         netif_device_attach(dev);
10185
10186         rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10187
10188         rtnl_unlock();
10189
10190         return rc;
10191 }
10192
10193 /**
10194  * bnx2x_io_error_detected - called when PCI error is detected
10195  * @pdev: Pointer to PCI device
10196  * @state: The current pci connection state
10197  *
10198  * This function is called after a PCI bus error affecting
10199  * this device has been detected.
10200  */
10201 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10202                                                 pci_channel_state_t state)
10203 {
10204         struct net_device *dev = pci_get_drvdata(pdev);
10205         struct bnx2x *bp = netdev_priv(dev);
10206
10207         rtnl_lock();
10208
10209         netif_device_detach(dev);
10210
10211         if (netif_running(dev))
10212                 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10213
10214         pci_disable_device(pdev);
10215
10216         rtnl_unlock();
10217
10218         /* Request a slot reset */
10219         return PCI_ERS_RESULT_NEED_RESET;
10220 }
10221
10222 /**
10223  * bnx2x_io_slot_reset - called after the PCI bus has been reset
10224  * @pdev: Pointer to PCI device
10225  *
10226  * Restart the card from scratch, as if from a cold-boot.
10227  */
10228 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10229 {
10230         struct net_device *dev = pci_get_drvdata(pdev);
10231         struct bnx2x *bp = netdev_priv(dev);
10232
10233         rtnl_lock();
10234
10235         if (pci_enable_device(pdev)) {
10236                 dev_err(&pdev->dev,
10237                         "Cannot re-enable PCI device after reset\n");
10238                 rtnl_unlock();
10239                 return PCI_ERS_RESULT_DISCONNECT;
10240         }
10241
10242         pci_set_master(pdev);
10243         pci_restore_state(pdev);
10244
10245         if (netif_running(dev))
10246                 bnx2x_set_power_state(bp, PCI_D0);
10247
10248         rtnl_unlock();
10249
10250         return PCI_ERS_RESULT_RECOVERED;
10251 }
10252
10253 /**
10254  * bnx2x_io_resume - called when traffic can start flowing again
10255  * @pdev: Pointer to PCI device
10256  *
10257  * This callback is called when the error recovery driver tells us that
10258  * its OK to resume normal operation.
10259  */
10260 static void bnx2x_io_resume(struct pci_dev *pdev)
10261 {
10262         struct net_device *dev = pci_get_drvdata(pdev);
10263         struct bnx2x *bp = netdev_priv(dev);
10264
10265         rtnl_lock();
10266
10267         if (netif_running(dev))
10268                 bnx2x_nic_load(bp, LOAD_OPEN);
10269
10270         netif_device_attach(dev);
10271
10272         rtnl_unlock();
10273 }
10274
10275 static struct pci_error_handlers bnx2x_err_handler = {
10276         .error_detected = bnx2x_io_error_detected,
10277         .slot_reset = bnx2x_io_slot_reset,
10278         .resume = bnx2x_io_resume,
10279 };
10280
10281 static struct pci_driver bnx2x_pci_driver = {
10282         .name        = DRV_MODULE_NAME,
10283         .id_table    = bnx2x_pci_tbl,
10284         .probe       = bnx2x_init_one,
10285         .remove      = __devexit_p(bnx2x_remove_one),
10286         .suspend     = bnx2x_suspend,
10287         .resume      = bnx2x_resume,
10288         .err_handler = &bnx2x_err_handler,
10289 };
10290
10291 static int __init bnx2x_init(void)
10292 {
10293         return pci_register_driver(&bnx2x_pci_driver);
10294 }
10295
10296 static void __exit bnx2x_cleanup(void)
10297 {
10298         pci_unregister_driver(&bnx2x_pci_driver);
10299 }
10300
10301 module_init(bnx2x_init);
10302 module_exit(bnx2x_cleanup);
10303