bnx2x: Driver info
[safe/jmp/linux-2.6] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2008 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #ifdef NETIF_F_HW_VLAN_TX
42         #include <linux/if_vlan.h>
43 #endif
44 #include <net/ip.h>
45 #include <net/tcp.h>
46 #include <net/checksum.h>
47 #include <linux/version.h>
48 #include <net/ip6_checksum.h>
49 #include <linux/workqueue.h>
50 #include <linux/crc32.h>
51 #include <linux/crc32c.h>
52 #include <linux/prefetch.h>
53 #include <linux/zlib.h>
54 #include <linux/io.h>
55
56 #include "bnx2x_reg.h"
57 #include "bnx2x_fw_defs.h"
58 #include "bnx2x_hsi.h"
59 #include "bnx2x_link.h"
60 #include "bnx2x.h"
61 #include "bnx2x_init.h"
62
63 #define DRV_MODULE_VERSION      "1.45.6"
64 #define DRV_MODULE_RELDATE      "2008/06/23"
65 #define BNX2X_BC_VER            0x040200
66
67 /* Time in jiffies before concluding the transmitter is hung */
68 #define TX_TIMEOUT              (5*HZ)
69
70 static char version[] __devinitdata =
71         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
72         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
73
74 MODULE_AUTHOR("Eliezer Tamir");
75 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
76 MODULE_LICENSE("GPL");
77 MODULE_VERSION(DRV_MODULE_VERSION);
78
79 static int disable_tpa;
80 static int use_inta;
81 static int poll;
82 static int debug;
83 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
84 static int use_multi;
85
86 module_param(disable_tpa, int, 0);
87 module_param(use_inta, int, 0);
88 module_param(poll, int, 0);
89 module_param(debug, int, 0);
90 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
91 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
92 MODULE_PARM_DESC(poll, "use polling (for debug)");
93 MODULE_PARM_DESC(debug, "default debug msglevel");
94
95 #ifdef BNX2X_MULTI
96 module_param(use_multi, int, 0);
97 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
98 #endif
99
100 enum bnx2x_board_type {
101         BCM57710 = 0,
102         BCM57711 = 1,
103         BCM57711E = 2,
104 };
105
106 /* indexed by board_type, above */
107 static struct {
108         char *name;
109 } board_info[] __devinitdata = {
110         { "Broadcom NetXtreme II BCM57710 XGb" },
111         { "Broadcom NetXtreme II BCM57711 XGb" },
112         { "Broadcom NetXtreme II BCM57711E XGb" }
113 };
114
115
116 static const struct pci_device_id bnx2x_pci_tbl[] = {
117         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
119         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
123         { 0 }
124 };
125
126 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
127
128 /****************************************************************************
129 * General service functions
130 ****************************************************************************/
131
132 /* used only at init
133  * locking is done by mcp
134  */
135 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
136 {
137         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140                                PCICFG_VENDOR_ID_OFFSET);
141 }
142
143 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
144 {
145         u32 val;
146
147         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150                                PCICFG_VENDOR_ID_OFFSET);
151
152         return val;
153 }
154
155 static const u32 dmae_reg_go_c[] = {
156         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160 };
161
162 /* copy command into DMAE command memory and set DMAE command go */
163 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
164                             int idx)
165 {
166         u32 cmd_offset;
167         int i;
168
169         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
172
173                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
175         }
176         REG_WR(bp, dmae_reg_go_c[idx], 1);
177 }
178
179 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
180                       u32 len32)
181 {
182         struct dmae_command *dmae = &bp->init_dmae;
183         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
184         int cnt = 200;
185
186         if (!bp->dmae_ready) {
187                 u32 *data = bnx2x_sp(bp, wb_data[0]);
188
189                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
190                    "  using indirect\n", dst_addr, len32);
191                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
192                 return;
193         }
194
195         mutex_lock(&bp->dmae_mutex);
196
197         memset(dmae, 0, sizeof(struct dmae_command));
198
199         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
202 #ifdef __BIG_ENDIAN
203                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
204 #else
205                         DMAE_CMD_ENDIANITY_DW_SWAP |
206 #endif
207                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
209         dmae->src_addr_lo = U64_LO(dma_addr);
210         dmae->src_addr_hi = U64_HI(dma_addr);
211         dmae->dst_addr_lo = dst_addr >> 2;
212         dmae->dst_addr_hi = 0;
213         dmae->len = len32;
214         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
216         dmae->comp_val = DMAE_COMP_VAL;
217
218         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
219            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
220                     "dst_addr [%x:%08x (%08x)]\n"
221            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
222            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
225         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
226            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
228
229         *wb_comp = 0;
230
231         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
232
233         udelay(5);
234
235         while (*wb_comp != DMAE_COMP_VAL) {
236                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237
238                 if (!cnt) {
239                         BNX2X_ERR("dmae timeout!\n");
240                         break;
241                 }
242                 cnt--;
243                 /* adjust delay for emulation/FPGA */
244                 if (CHIP_REV_IS_SLOW(bp))
245                         msleep(100);
246                 else
247                         udelay(5);
248         }
249
250         mutex_unlock(&bp->dmae_mutex);
251 }
252
253 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
254 {
255         struct dmae_command *dmae = &bp->init_dmae;
256         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
257         int cnt = 200;
258
259         if (!bp->dmae_ready) {
260                 u32 *data = bnx2x_sp(bp, wb_data[0]);
261                 int i;
262
263                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
264                    "  using indirect\n", src_addr, len32);
265                 for (i = 0; i < len32; i++)
266                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
267                 return;
268         }
269
270         mutex_lock(&bp->dmae_mutex);
271
272         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
273         memset(dmae, 0, sizeof(struct dmae_command));
274
275         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
276                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
277                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
278 #ifdef __BIG_ENDIAN
279                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
280 #else
281                         DMAE_CMD_ENDIANITY_DW_SWAP |
282 #endif
283                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
284                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
285         dmae->src_addr_lo = src_addr >> 2;
286         dmae->src_addr_hi = 0;
287         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
288         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
289         dmae->len = len32;
290         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
291         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
292         dmae->comp_val = DMAE_COMP_VAL;
293
294         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
295            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
296                     "dst_addr [%x:%08x (%08x)]\n"
297            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
298            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
299            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
300            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
301
302         *wb_comp = 0;
303
304         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
305
306         udelay(5);
307
308         while (*wb_comp != DMAE_COMP_VAL) {
309
310                 if (!cnt) {
311                         BNX2X_ERR("dmae timeout!\n");
312                         break;
313                 }
314                 cnt--;
315                 /* adjust delay for emulation/FPGA */
316                 if (CHIP_REV_IS_SLOW(bp))
317                         msleep(100);
318                 else
319                         udelay(5);
320         }
321         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
322            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
323            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
324
325         mutex_unlock(&bp->dmae_mutex);
326 }
327
328 /* used only for slowpath so not inlined */
329 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
330 {
331         u32 wb_write[2];
332
333         wb_write[0] = val_hi;
334         wb_write[1] = val_lo;
335         REG_WR_DMAE(bp, reg, wb_write, 2);
336 }
337
338 #ifdef USE_WB_RD
339 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
340 {
341         u32 wb_data[2];
342
343         REG_RD_DMAE(bp, reg, wb_data, 2);
344
345         return HILO_U64(wb_data[0], wb_data[1]);
346 }
347 #endif
348
349 static int bnx2x_mc_assert(struct bnx2x *bp)
350 {
351         char last_idx;
352         int i, rc = 0;
353         u32 row0, row1, row2, row3;
354
355         /* XSTORM */
356         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
357                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
358         if (last_idx)
359                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
360
361         /* print the asserts */
362         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
363
364                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365                               XSTORM_ASSERT_LIST_OFFSET(i));
366                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
368                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
370                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
372
373                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
374                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
375                                   " 0x%08x 0x%08x 0x%08x\n",
376                                   i, row3, row2, row1, row0);
377                         rc++;
378                 } else {
379                         break;
380                 }
381         }
382
383         /* TSTORM */
384         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
385                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
386         if (last_idx)
387                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
388
389         /* print the asserts */
390         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
391
392                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393                               TSTORM_ASSERT_LIST_OFFSET(i));
394                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
396                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
398                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
400
401                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
402                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
403                                   " 0x%08x 0x%08x 0x%08x\n",
404                                   i, row3, row2, row1, row0);
405                         rc++;
406                 } else {
407                         break;
408                 }
409         }
410
411         /* CSTORM */
412         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
413                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
414         if (last_idx)
415                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
416
417         /* print the asserts */
418         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
419
420                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421                               CSTORM_ASSERT_LIST_OFFSET(i));
422                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
424                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
426                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
428
429                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
430                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
431                                   " 0x%08x 0x%08x 0x%08x\n",
432                                   i, row3, row2, row1, row0);
433                         rc++;
434                 } else {
435                         break;
436                 }
437         }
438
439         /* USTORM */
440         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
441                            USTORM_ASSERT_LIST_INDEX_OFFSET);
442         if (last_idx)
443                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
444
445         /* print the asserts */
446         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
447
448                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
449                               USTORM_ASSERT_LIST_OFFSET(i));
450                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
451                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
452                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
453                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
454                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
455                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
456
457                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
458                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
459                                   " 0x%08x 0x%08x 0x%08x\n",
460                                   i, row3, row2, row1, row0);
461                         rc++;
462                 } else {
463                         break;
464                 }
465         }
466
467         return rc;
468 }
469
470 static void bnx2x_fw_dump(struct bnx2x *bp)
471 {
472         u32 mark, offset;
473         u32 data[9];
474         int word;
475
476         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
477         mark = ((mark + 0x3) & ~0x3);
478         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
479
480         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
481                 for (word = 0; word < 8; word++)
482                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
483                                                   offset + 4*word));
484                 data[8] = 0x0;
485                 printk(KERN_CONT "%s", (char *)data);
486         }
487         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
488                 for (word = 0; word < 8; word++)
489                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
490                                                   offset + 4*word));
491                 data[8] = 0x0;
492                 printk(KERN_CONT "%s", (char *)data);
493         }
494         printk("\n" KERN_ERR PFX "end of fw dump\n");
495 }
496
497 static void bnx2x_panic_dump(struct bnx2x *bp)
498 {
499         int i;
500         u16 j, start, end;
501
502         bp->stats_state = STATS_STATE_DISABLED;
503         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
504
505         BNX2X_ERR("begin crash dump -----------------\n");
506
507         for_each_queue(bp, i) {
508                 struct bnx2x_fastpath *fp = &bp->fp[i];
509                 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
510
511                 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
512                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
513                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
514                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
515                 BNX2X_ERR("          rx_bd_prod(%x)  rx_bd_cons(%x)"
516                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
517                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
518                           fp->rx_bd_prod, fp->rx_bd_cons,
519                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
520                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
521                 BNX2X_ERR("          rx_sge_prod(%x)  last_max_sge(%x)"
522                           "  fp_c_idx(%x)  *sb_c_idx(%x)  fp_u_idx(%x)"
523                           "  *sb_u_idx(%x)  bd data(%x,%x)\n",
524                           fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
525                           fp->status_blk->c_status_block.status_block_index,
526                           fp->fp_u_idx,
527                           fp->status_blk->u_status_block.status_block_index,
528                           hw_prods->packets_prod, hw_prods->bds_prod);
529
530                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
531                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
532                 for (j = start; j < end; j++) {
533                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
534
535                         BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
536                                   sw_bd->skb, sw_bd->first_bd);
537                 }
538
539                 start = TX_BD(fp->tx_bd_cons - 10);
540                 end = TX_BD(fp->tx_bd_cons + 254);
541                 for (j = start; j < end; j++) {
542                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
543
544                         BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
545                                   j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
546                 }
547
548                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
549                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
550                 for (j = start; j < end; j++) {
551                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
552                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
553
554                         BNX2X_ERR("rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
555                                   j, rx_bd[1], rx_bd[0], sw_bd->skb);
556                 }
557
558                 start = 0;
559                 end = RX_SGE_CNT*NUM_RX_SGE_PAGES;
560                 for (j = start; j < end; j++) {
561                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
562                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
563
564                         BNX2X_ERR("rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
565                                   j, rx_sge[1], rx_sge[0], sw_page->page);
566                 }
567
568                 start = RCQ_BD(fp->rx_comp_cons - 10);
569                 end = RCQ_BD(fp->rx_comp_cons + 503);
570                 for (j = start; j < end; j++) {
571                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
572
573                         BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
574                                   j, cqe[0], cqe[1], cqe[2], cqe[3]);
575                 }
576         }
577
578         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
579                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
580                   "  spq_prod_idx(%u)\n",
581                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
582                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
583
584         bnx2x_fw_dump(bp);
585         bnx2x_mc_assert(bp);
586         BNX2X_ERR("end crash dump -----------------\n");
587 }
588
589 static void bnx2x_int_enable(struct bnx2x *bp)
590 {
591         int port = BP_PORT(bp);
592         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
593         u32 val = REG_RD(bp, addr);
594         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
595
596         if (msix) {
597                 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
598                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
599                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
600         } else {
601                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
602                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
603                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
604                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
605
606                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
607                    val, port, addr, msix);
608
609                 REG_WR(bp, addr, val);
610
611                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
612         }
613
614         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  MSI-X %d\n",
615            val, port, addr, msix);
616
617         REG_WR(bp, addr, val);
618
619         if (CHIP_IS_E1H(bp)) {
620                 /* init leading/trailing edge */
621                 if (IS_E1HMF(bp)) {
622                         val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
623                         if (bp->port.pmf)
624                                 /* enable nig attention */
625                                 val |= 0x0100;
626                 } else
627                         val = 0xffff;
628
629                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
630                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
631         }
632 }
633
634 static void bnx2x_int_disable(struct bnx2x *bp)
635 {
636         int port = BP_PORT(bp);
637         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
638         u32 val = REG_RD(bp, addr);
639
640         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
641                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
642                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
643                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
644
645         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
646            val, port, addr);
647
648         REG_WR(bp, addr, val);
649         if (REG_RD(bp, addr) != val)
650                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
651 }
652
653 static void bnx2x_int_disable_sync(struct bnx2x *bp)
654 {
655         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
656         int i;
657
658         /* disable interrupt handling */
659         atomic_inc(&bp->intr_sem);
660         /* prevent the HW from sending interrupts */
661         bnx2x_int_disable(bp);
662
663         /* make sure all ISRs are done */
664         if (msix) {
665                 for_each_queue(bp, i)
666                         synchronize_irq(bp->msix_table[i].vector);
667
668                 /* one more for the Slow Path IRQ */
669                 synchronize_irq(bp->msix_table[i].vector);
670         } else
671                 synchronize_irq(bp->pdev->irq);
672
673         /* make sure sp_task is not running */
674         cancel_work_sync(&bp->sp_task);
675 }
676
677 /* fast path */
678
679 /*
680  * General service functions
681  */
682
683 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
684                                 u8 storm, u16 index, u8 op, u8 update)
685 {
686         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
687                        COMMAND_REG_INT_ACK);
688         struct igu_ack_register igu_ack;
689
690         igu_ack.status_block_index = index;
691         igu_ack.sb_id_and_flags =
692                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
693                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
694                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
695                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
696
697         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
698            (*(u32 *)&igu_ack), hc_addr);
699         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
700 }
701
702 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
703 {
704         struct host_status_block *fpsb = fp->status_blk;
705         u16 rc = 0;
706
707         barrier(); /* status block is written to by the chip */
708         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
709                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
710                 rc |= 1;
711         }
712         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
713                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
714                 rc |= 2;
715         }
716         return rc;
717 }
718
719 static u16 bnx2x_ack_int(struct bnx2x *bp)
720 {
721         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
722                        COMMAND_REG_SIMD_MASK);
723         u32 result = REG_RD(bp, hc_addr);
724
725         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
726            result, hc_addr);
727
728         return result;
729 }
730
731
732 /*
733  * fast path service functions
734  */
735
736 /* free skb in the packet ring at pos idx
737  * return idx of last bd freed
738  */
739 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
740                              u16 idx)
741 {
742         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
743         struct eth_tx_bd *tx_bd;
744         struct sk_buff *skb = tx_buf->skb;
745         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
746         int nbd;
747
748         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
749            idx, tx_buf, skb);
750
751         /* unmap first bd */
752         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
753         tx_bd = &fp->tx_desc_ring[bd_idx];
754         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
755                          BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
756
757         nbd = le16_to_cpu(tx_bd->nbd) - 1;
758         new_cons = nbd + tx_buf->first_bd;
759 #ifdef BNX2X_STOP_ON_ERROR
760         if (nbd > (MAX_SKB_FRAGS + 2)) {
761                 BNX2X_ERR("BAD nbd!\n");
762                 bnx2x_panic();
763         }
764 #endif
765
766         /* Skip a parse bd and the TSO split header bd
767            since they have no mapping */
768         if (nbd)
769                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
770
771         if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
772                                            ETH_TX_BD_FLAGS_TCP_CSUM |
773                                            ETH_TX_BD_FLAGS_SW_LSO)) {
774                 if (--nbd)
775                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
776                 tx_bd = &fp->tx_desc_ring[bd_idx];
777                 /* is this a TSO split header bd? */
778                 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
779                         if (--nbd)
780                                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
781                 }
782         }
783
784         /* now free frags */
785         while (nbd > 0) {
786
787                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
788                 tx_bd = &fp->tx_desc_ring[bd_idx];
789                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
790                                BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
791                 if (--nbd)
792                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
793         }
794
795         /* release skb */
796         WARN_ON(!skb);
797         dev_kfree_skb(skb);
798         tx_buf->first_bd = 0;
799         tx_buf->skb = NULL;
800
801         return new_cons;
802 }
803
804 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
805 {
806         s16 used;
807         u16 prod;
808         u16 cons;
809
810         barrier(); /* Tell compiler that prod and cons can change */
811         prod = fp->tx_bd_prod;
812         cons = fp->tx_bd_cons;
813
814         /* NUM_TX_RINGS = number of "next-page" entries
815            It will be used as a threshold */
816         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
817
818 #ifdef BNX2X_STOP_ON_ERROR
819         WARN_ON(used < 0);
820         WARN_ON(used > fp->bp->tx_ring_size);
821         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
822 #endif
823
824         return (s16)(fp->bp->tx_ring_size) - used;
825 }
826
827 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
828 {
829         struct bnx2x *bp = fp->bp;
830         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
831         int done = 0;
832
833 #ifdef BNX2X_STOP_ON_ERROR
834         if (unlikely(bp->panic))
835                 return;
836 #endif
837
838         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
839         sw_cons = fp->tx_pkt_cons;
840
841         while (sw_cons != hw_cons) {
842                 u16 pkt_cons;
843
844                 pkt_cons = TX_BD(sw_cons);
845
846                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
847
848                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
849                    hw_cons, sw_cons, pkt_cons);
850
851 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
852                         rmb();
853                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
854                 }
855 */
856                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
857                 sw_cons++;
858                 done++;
859
860                 if (done == work)
861                         break;
862         }
863
864         fp->tx_pkt_cons = sw_cons;
865         fp->tx_bd_cons = bd_cons;
866
867         /* Need to make the tx_cons update visible to start_xmit()
868          * before checking for netif_queue_stopped().  Without the
869          * memory barrier, there is a small possibility that start_xmit()
870          * will miss it and cause the queue to be stopped forever.
871          */
872         smp_mb();
873
874         /* TBD need a thresh? */
875         if (unlikely(netif_queue_stopped(bp->dev))) {
876
877                 netif_tx_lock(bp->dev);
878
879                 if (netif_queue_stopped(bp->dev) &&
880                     (bp->state == BNX2X_STATE_OPEN) &&
881                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
882                         netif_wake_queue(bp->dev);
883
884                 netif_tx_unlock(bp->dev);
885         }
886 }
887
888 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
889                            union eth_rx_cqe *rr_cqe)
890 {
891         struct bnx2x *bp = fp->bp;
892         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
893         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
894
895         DP(BNX2X_MSG_SP,
896            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
897            FP_IDX(fp), cid, command, bp->state,
898            rr_cqe->ramrod_cqe.ramrod_type);
899
900         bp->spq_left++;
901
902         if (FP_IDX(fp)) {
903                 switch (command | fp->state) {
904                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
905                                                 BNX2X_FP_STATE_OPENING):
906                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
907                            cid);
908                         fp->state = BNX2X_FP_STATE_OPEN;
909                         break;
910
911                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
912                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
913                            cid);
914                         fp->state = BNX2X_FP_STATE_HALTED;
915                         break;
916
917                 default:
918                         BNX2X_ERR("unexpected MC reply (%d)  "
919                                   "fp->state is %x\n", command, fp->state);
920                         break;
921                 }
922                 mb(); /* force bnx2x_wait_ramrod() to see the change */
923                 return;
924         }
925
926         switch (command | bp->state) {
927         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
928                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
929                 bp->state = BNX2X_STATE_OPEN;
930                 break;
931
932         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
933                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
934                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
935                 fp->state = BNX2X_FP_STATE_HALTED;
936                 break;
937
938         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
939                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
940                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
941                 break;
942
943         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
944         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
945                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
946                 bp->set_mac_pending = 0;
947                 break;
948
949         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
950                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
951                 break;
952
953         default:
954                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
955                           command, bp->state);
956                 break;
957         }
958         mb(); /* force bnx2x_wait_ramrod() to see the change */
959 }
960
961 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
962                                      struct bnx2x_fastpath *fp, u16 index)
963 {
964         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
965         struct page *page = sw_buf->page;
966         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
967
968         /* Skip "next page" elements */
969         if (!page)
970                 return;
971
972         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
973                        BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
974         __free_pages(page, PAGES_PER_SGE_SHIFT);
975
976         sw_buf->page = NULL;
977         sge->addr_hi = 0;
978         sge->addr_lo = 0;
979 }
980
981 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
982                                            struct bnx2x_fastpath *fp, int last)
983 {
984         int i;
985
986         for (i = 0; i < last; i++)
987                 bnx2x_free_rx_sge(bp, fp, i);
988 }
989
990 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
991                                      struct bnx2x_fastpath *fp, u16 index)
992 {
993         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
994         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
995         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
996         dma_addr_t mapping;
997
998         if (unlikely(page == NULL))
999                 return -ENOMEM;
1000
1001         mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
1002                                PCI_DMA_FROMDEVICE);
1003         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1004                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1005                 return -ENOMEM;
1006         }
1007
1008         sw_buf->page = page;
1009         pci_unmap_addr_set(sw_buf, mapping, mapping);
1010
1011         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1012         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1013
1014         return 0;
1015 }
1016
1017 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1018                                      struct bnx2x_fastpath *fp, u16 index)
1019 {
1020         struct sk_buff *skb;
1021         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1022         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1023         dma_addr_t mapping;
1024
1025         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1026         if (unlikely(skb == NULL))
1027                 return -ENOMEM;
1028
1029         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1030                                  PCI_DMA_FROMDEVICE);
1031         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1032                 dev_kfree_skb(skb);
1033                 return -ENOMEM;
1034         }
1035
1036         rx_buf->skb = skb;
1037         pci_unmap_addr_set(rx_buf, mapping, mapping);
1038
1039         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1040         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1041
1042         return 0;
1043 }
1044
1045 /* note that we are not allocating a new skb,
1046  * we are just moving one from cons to prod
1047  * we are not creating a new mapping,
1048  * so there is no need to check for dma_mapping_error().
1049  */
1050 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1051                                struct sk_buff *skb, u16 cons, u16 prod)
1052 {
1053         struct bnx2x *bp = fp->bp;
1054         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1055         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1056         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1057         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1058
1059         pci_dma_sync_single_for_device(bp->pdev,
1060                                        pci_unmap_addr(cons_rx_buf, mapping),
1061                                        bp->rx_offset + RX_COPY_THRESH,
1062                                        PCI_DMA_FROMDEVICE);
1063
1064         prod_rx_buf->skb = cons_rx_buf->skb;
1065         pci_unmap_addr_set(prod_rx_buf, mapping,
1066                            pci_unmap_addr(cons_rx_buf, mapping));
1067         *prod_bd = *cons_bd;
1068 }
1069
1070 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1071                                              u16 idx)
1072 {
1073         u16 last_max = fp->last_max_sge;
1074
1075         if (SUB_S16(idx, last_max) > 0)
1076                 fp->last_max_sge = idx;
1077 }
1078
1079 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1080 {
1081         int i, j;
1082
1083         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1084                 int idx = RX_SGE_CNT * i - 1;
1085
1086                 for (j = 0; j < 2; j++) {
1087                         SGE_MASK_CLEAR_BIT(fp, idx);
1088                         idx--;
1089                 }
1090         }
1091 }
1092
1093 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1094                                   struct eth_fast_path_rx_cqe *fp_cqe)
1095 {
1096         struct bnx2x *bp = fp->bp;
1097         u16 sge_len = BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1098                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1099                       BCM_PAGE_SHIFT;
1100         u16 last_max, last_elem, first_elem;
1101         u16 delta = 0;
1102         u16 i;
1103
1104         if (!sge_len)
1105                 return;
1106
1107         /* First mark all used pages */
1108         for (i = 0; i < sge_len; i++)
1109                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1110
1111         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1112            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1113
1114         /* Here we assume that the last SGE index is the biggest */
1115         prefetch((void *)(fp->sge_mask));
1116         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1117
1118         last_max = RX_SGE(fp->last_max_sge);
1119         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1120         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1121
1122         /* If ring is not full */
1123         if (last_elem + 1 != first_elem)
1124                 last_elem++;
1125
1126         /* Now update the prod */
1127         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1128                 if (likely(fp->sge_mask[i]))
1129                         break;
1130
1131                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1132                 delta += RX_SGE_MASK_ELEM_SZ;
1133         }
1134
1135         if (delta > 0) {
1136                 fp->rx_sge_prod += delta;
1137                 /* clear page-end entries */
1138                 bnx2x_clear_sge_mask_next_elems(fp);
1139         }
1140
1141         DP(NETIF_MSG_RX_STATUS,
1142            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1143            fp->last_max_sge, fp->rx_sge_prod);
1144 }
1145
1146 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1147 {
1148         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1149         memset(fp->sge_mask, 0xff,
1150                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1151
1152         /* Clear the two last indeces in the page to 1:
1153            these are the indeces that correspond to the "next" element,
1154            hence will never be indicated and should be removed from
1155            the calculations. */
1156         bnx2x_clear_sge_mask_next_elems(fp);
1157 }
1158
1159 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1160                             struct sk_buff *skb, u16 cons, u16 prod)
1161 {
1162         struct bnx2x *bp = fp->bp;
1163         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1164         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1165         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1166         dma_addr_t mapping;
1167
1168         /* move empty skb from pool to prod and map it */
1169         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1170         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1171                                  bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1172         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1173
1174         /* move partial skb from cons to pool (don't unmap yet) */
1175         fp->tpa_pool[queue] = *cons_rx_buf;
1176
1177         /* mark bin state as start - print error if current state != stop */
1178         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1179                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1180
1181         fp->tpa_state[queue] = BNX2X_TPA_START;
1182
1183         /* point prod_bd to new skb */
1184         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1185         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1186
1187 #ifdef BNX2X_STOP_ON_ERROR
1188         fp->tpa_queue_used |= (1 << queue);
1189 #ifdef __powerpc64__
1190         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1191 #else
1192         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1193 #endif
1194            fp->tpa_queue_used);
1195 #endif
1196 }
1197
1198 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1199                                struct sk_buff *skb,
1200                                struct eth_fast_path_rx_cqe *fp_cqe,
1201                                u16 cqe_idx)
1202 {
1203         struct sw_rx_page *rx_pg, old_rx_pg;
1204         struct page *sge;
1205         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1206         u32 i, frag_len, frag_size, pages;
1207         int err;
1208         int j;
1209
1210         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1211         pages = BCM_PAGE_ALIGN(frag_size) >> BCM_PAGE_SHIFT;
1212
1213         /* This is needed in order to enable forwarding support */
1214         if (frag_size)
1215                 skb_shinfo(skb)->gso_size = min((u32)BCM_PAGE_SIZE,
1216                                                max(frag_size, (u32)len_on_bd));
1217
1218 #ifdef BNX2X_STOP_ON_ERROR
1219         if (pages > 8*PAGES_PER_SGE) {
1220                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1221                           pages, cqe_idx);
1222                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1223                           fp_cqe->pkt_len, len_on_bd);
1224                 bnx2x_panic();
1225                 return -EINVAL;
1226         }
1227 #endif
1228
1229         /* Run through the SGL and compose the fragmented skb */
1230         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1231                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1232
1233                 /* FW gives the indices of the SGE as if the ring is an array
1234                    (meaning that "next" element will consume 2 indices) */
1235                 frag_len = min(frag_size, (u32)(BCM_PAGE_SIZE*PAGES_PER_SGE));
1236                 rx_pg = &fp->rx_page_ring[sge_idx];
1237                 sge = rx_pg->page;
1238                 old_rx_pg = *rx_pg;
1239
1240                 /* If we fail to allocate a substitute page, we simply stop
1241                    where we are and drop the whole packet */
1242                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1243                 if (unlikely(err)) {
1244                         bp->eth_stats.rx_skb_alloc_failed++;
1245                         return err;
1246                 }
1247
1248                 /* Unmap the page as we r going to pass it to the stack */
1249                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1250                               BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1251
1252                 /* Add one frag and update the appropriate fields in the skb */
1253                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1254
1255                 skb->data_len += frag_len;
1256                 skb->truesize += frag_len;
1257                 skb->len += frag_len;
1258
1259                 frag_size -= frag_len;
1260         }
1261
1262         return 0;
1263 }
1264
1265 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1266                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1267                            u16 cqe_idx)
1268 {
1269         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1270         struct sk_buff *skb = rx_buf->skb;
1271         /* alloc new skb */
1272         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1273
1274         /* Unmap skb in the pool anyway, as we are going to change
1275            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1276            fails. */
1277         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1278                          bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1279
1280         if (likely(new_skb)) {
1281                 /* fix ip xsum and give it to the stack */
1282                 /* (no need to map the new skb) */
1283
1284                 prefetch(skb);
1285                 prefetch(((char *)(skb)) + 128);
1286
1287 #ifdef BNX2X_STOP_ON_ERROR
1288                 if (pad + len > bp->rx_buf_size) {
1289                         BNX2X_ERR("skb_put is about to fail...  "
1290                                   "pad %d  len %d  rx_buf_size %d\n",
1291                                   pad, len, bp->rx_buf_size);
1292                         bnx2x_panic();
1293                         return;
1294                 }
1295 #endif
1296
1297                 skb_reserve(skb, pad);
1298                 skb_put(skb, len);
1299
1300                 skb->protocol = eth_type_trans(skb, bp->dev);
1301                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1302
1303                 {
1304                         struct iphdr *iph;
1305
1306                         iph = (struct iphdr *)skb->data;
1307                         iph->check = 0;
1308                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1309                 }
1310
1311                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1312                                          &cqe->fast_path_cqe, cqe_idx)) {
1313 #ifdef BCM_VLAN
1314                         if ((bp->vlgrp != NULL) &&
1315                             (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1316                              PARSING_FLAGS_VLAN))
1317                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1318                                                 le16_to_cpu(cqe->fast_path_cqe.
1319                                                             vlan_tag));
1320                         else
1321 #endif
1322                                 netif_receive_skb(skb);
1323                 } else {
1324                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1325                            " - dropping packet!\n");
1326                         dev_kfree_skb(skb);
1327                 }
1328
1329                 bp->dev->last_rx = jiffies;
1330
1331                 /* put new skb in bin */
1332                 fp->tpa_pool[queue].skb = new_skb;
1333
1334         } else {
1335                 /* else drop the packet and keep the buffer in the bin */
1336                 DP(NETIF_MSG_RX_STATUS,
1337                    "Failed to allocate new skb - dropping packet!\n");
1338                 bp->eth_stats.rx_skb_alloc_failed++;
1339         }
1340
1341         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1342 }
1343
1344 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1345                                         struct bnx2x_fastpath *fp,
1346                                         u16 bd_prod, u16 rx_comp_prod,
1347                                         u16 rx_sge_prod)
1348 {
1349         struct tstorm_eth_rx_producers rx_prods = {0};
1350         int i;
1351
1352         /* Update producers */
1353         rx_prods.bd_prod = bd_prod;
1354         rx_prods.cqe_prod = rx_comp_prod;
1355         rx_prods.sge_prod = rx_sge_prod;
1356
1357         for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1358                 REG_WR(bp, BAR_TSTRORM_INTMEM +
1359                        TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1360                        ((u32 *)&rx_prods)[i]);
1361
1362         DP(NETIF_MSG_RX_STATUS,
1363            "Wrote: bd_prod %u  cqe_prod %u  sge_prod %u\n",
1364            bd_prod, rx_comp_prod, rx_sge_prod);
1365 }
1366
1367 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1368 {
1369         struct bnx2x *bp = fp->bp;
1370         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1371         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1372         int rx_pkt = 0;
1373         u16 queue;
1374
1375 #ifdef BNX2X_STOP_ON_ERROR
1376         if (unlikely(bp->panic))
1377                 return 0;
1378 #endif
1379
1380         /* CQ "next element" is of the size of the regular element,
1381            that's why it's ok here */
1382         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1383         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1384                 hw_comp_cons++;
1385
1386         bd_cons = fp->rx_bd_cons;
1387         bd_prod = fp->rx_bd_prod;
1388         bd_prod_fw = bd_prod;
1389         sw_comp_cons = fp->rx_comp_cons;
1390         sw_comp_prod = fp->rx_comp_prod;
1391
1392         /* Memory barrier necessary as speculative reads of the rx
1393          * buffer can be ahead of the index in the status block
1394          */
1395         rmb();
1396
1397         DP(NETIF_MSG_RX_STATUS,
1398            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1399            FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1400
1401         while (sw_comp_cons != hw_comp_cons) {
1402                 struct sw_rx_bd *rx_buf = NULL;
1403                 struct sk_buff *skb;
1404                 union eth_rx_cqe *cqe;
1405                 u8 cqe_fp_flags;
1406                 u16 len, pad;
1407
1408                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1409                 bd_prod = RX_BD(bd_prod);
1410                 bd_cons = RX_BD(bd_cons);
1411
1412                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1413                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1414
1415                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1416                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1417                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1418                    cqe->fast_path_cqe.rss_hash_result,
1419                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1420                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1421
1422                 /* is this a slowpath msg? */
1423                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1424                         bnx2x_sp_event(fp, cqe);
1425                         goto next_cqe;
1426
1427                 /* this is an rx packet */
1428                 } else {
1429                         rx_buf = &fp->rx_buf_ring[bd_cons];
1430                         skb = rx_buf->skb;
1431                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1432                         pad = cqe->fast_path_cqe.placement_offset;
1433
1434                         /* If CQE is marked both TPA_START and TPA_END
1435                            it is a non-TPA CQE */
1436                         if ((!fp->disable_tpa) &&
1437                             (TPA_TYPE(cqe_fp_flags) !=
1438                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1439                                 queue = cqe->fast_path_cqe.queue_index;
1440
1441                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1442                                         DP(NETIF_MSG_RX_STATUS,
1443                                            "calling tpa_start on queue %d\n",
1444                                            queue);
1445
1446                                         bnx2x_tpa_start(fp, queue, skb,
1447                                                         bd_cons, bd_prod);
1448                                         goto next_rx;
1449                                 }
1450
1451                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1452                                         DP(NETIF_MSG_RX_STATUS,
1453                                            "calling tpa_stop on queue %d\n",
1454                                            queue);
1455
1456                                         if (!BNX2X_RX_SUM_FIX(cqe))
1457                                                 BNX2X_ERR("STOP on none TCP "
1458                                                           "data\n");
1459
1460                                         /* This is a size of the linear data
1461                                            on this skb */
1462                                         len = le16_to_cpu(cqe->fast_path_cqe.
1463                                                                 len_on_bd);
1464                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1465                                                     len, cqe, comp_ring_cons);
1466 #ifdef BNX2X_STOP_ON_ERROR
1467                                         if (bp->panic)
1468                                                 return -EINVAL;
1469 #endif
1470
1471                                         bnx2x_update_sge_prod(fp,
1472                                                         &cqe->fast_path_cqe);
1473                                         goto next_cqe;
1474                                 }
1475                         }
1476
1477                         pci_dma_sync_single_for_device(bp->pdev,
1478                                         pci_unmap_addr(rx_buf, mapping),
1479                                                        pad + RX_COPY_THRESH,
1480                                                        PCI_DMA_FROMDEVICE);
1481                         prefetch(skb);
1482                         prefetch(((char *)(skb)) + 128);
1483
1484                         /* is this an error packet? */
1485                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1486                                 DP(NETIF_MSG_RX_ERR,
1487                                    "ERROR  flags %x  rx packet %u\n",
1488                                    cqe_fp_flags, sw_comp_cons);
1489                                 bp->eth_stats.rx_err_discard_pkt++;
1490                                 goto reuse_rx;
1491                         }
1492
1493                         /* Since we don't have a jumbo ring
1494                          * copy small packets if mtu > 1500
1495                          */
1496                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1497                             (len <= RX_COPY_THRESH)) {
1498                                 struct sk_buff *new_skb;
1499
1500                                 new_skb = netdev_alloc_skb(bp->dev,
1501                                                            len + pad);
1502                                 if (new_skb == NULL) {
1503                                         DP(NETIF_MSG_RX_ERR,
1504                                            "ERROR  packet dropped "
1505                                            "because of alloc failure\n");
1506                                         bp->eth_stats.rx_skb_alloc_failed++;
1507                                         goto reuse_rx;
1508                                 }
1509
1510                                 /* aligned copy */
1511                                 skb_copy_from_linear_data_offset(skb, pad,
1512                                                     new_skb->data + pad, len);
1513                                 skb_reserve(new_skb, pad);
1514                                 skb_put(new_skb, len);
1515
1516                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1517
1518                                 skb = new_skb;
1519
1520                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1521                                 pci_unmap_single(bp->pdev,
1522                                         pci_unmap_addr(rx_buf, mapping),
1523                                                  bp->rx_buf_use_size,
1524                                                  PCI_DMA_FROMDEVICE);
1525                                 skb_reserve(skb, pad);
1526                                 skb_put(skb, len);
1527
1528                         } else {
1529                                 DP(NETIF_MSG_RX_ERR,
1530                                    "ERROR  packet dropped because "
1531                                    "of alloc failure\n");
1532                                 bp->eth_stats.rx_skb_alloc_failed++;
1533 reuse_rx:
1534                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1535                                 goto next_rx;
1536                         }
1537
1538                         skb->protocol = eth_type_trans(skb, bp->dev);
1539
1540                         skb->ip_summed = CHECKSUM_NONE;
1541                         if (bp->rx_csum) {
1542                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1543                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1544                                 else
1545                                         bp->eth_stats.hw_csum_err++;
1546                         }
1547                 }
1548
1549 #ifdef BCM_VLAN
1550                 if ((bp->vlgrp != NULL) &&
1551                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1552                      PARSING_FLAGS_VLAN))
1553                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1554                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1555                 else
1556 #endif
1557                         netif_receive_skb(skb);
1558
1559                 bp->dev->last_rx = jiffies;
1560
1561 next_rx:
1562                 rx_buf->skb = NULL;
1563
1564                 bd_cons = NEXT_RX_IDX(bd_cons);
1565                 bd_prod = NEXT_RX_IDX(bd_prod);
1566                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1567                 rx_pkt++;
1568 next_cqe:
1569                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1570                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1571
1572                 if (rx_pkt == budget)
1573                         break;
1574         } /* while */
1575
1576         fp->rx_bd_cons = bd_cons;
1577         fp->rx_bd_prod = bd_prod_fw;
1578         fp->rx_comp_cons = sw_comp_cons;
1579         fp->rx_comp_prod = sw_comp_prod;
1580
1581         /* Update producers */
1582         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1583                              fp->rx_sge_prod);
1584         mmiowb(); /* keep prod updates ordered */
1585
1586         fp->rx_pkt += rx_pkt;
1587         fp->rx_calls++;
1588
1589         return rx_pkt;
1590 }
1591
1592 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1593 {
1594         struct bnx2x_fastpath *fp = fp_cookie;
1595         struct bnx2x *bp = fp->bp;
1596         struct net_device *dev = bp->dev;
1597         int index = FP_IDX(fp);
1598
1599         /* Return here if interrupt is disabled */
1600         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1601                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1602                 return IRQ_HANDLED;
1603         }
1604
1605         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1606            index, FP_SB_ID(fp));
1607         bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1608
1609 #ifdef BNX2X_STOP_ON_ERROR
1610         if (unlikely(bp->panic))
1611                 return IRQ_HANDLED;
1612 #endif
1613
1614         prefetch(fp->rx_cons_sb);
1615         prefetch(fp->tx_cons_sb);
1616         prefetch(&fp->status_blk->c_status_block.status_block_index);
1617         prefetch(&fp->status_blk->u_status_block.status_block_index);
1618
1619         netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
1620
1621         return IRQ_HANDLED;
1622 }
1623
1624 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1625 {
1626         struct net_device *dev = dev_instance;
1627         struct bnx2x *bp = netdev_priv(dev);
1628         u16 status = bnx2x_ack_int(bp);
1629         u16 mask;
1630
1631         /* Return here if interrupt is shared and it's not for us */
1632         if (unlikely(status == 0)) {
1633                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1634                 return IRQ_NONE;
1635         }
1636         DP(NETIF_MSG_INTR, "got an interrupt  status %u\n", status);
1637
1638 #ifdef BNX2X_STOP_ON_ERROR
1639         if (unlikely(bp->panic))
1640                 return IRQ_HANDLED;
1641 #endif
1642
1643         /* Return here if interrupt is disabled */
1644         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1645                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1646                 return IRQ_HANDLED;
1647         }
1648
1649         mask = 0x2 << bp->fp[0].sb_id;
1650         if (status & mask) {
1651                 struct bnx2x_fastpath *fp = &bp->fp[0];
1652
1653                 prefetch(fp->rx_cons_sb);
1654                 prefetch(fp->tx_cons_sb);
1655                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1656                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1657
1658                 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1659
1660                 status &= ~mask;
1661         }
1662
1663
1664         if (unlikely(status & 0x1)) {
1665                 schedule_work(&bp->sp_task);
1666
1667                 status &= ~0x1;
1668                 if (!status)
1669                         return IRQ_HANDLED;
1670         }
1671
1672         if (status)
1673                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1674                    status);
1675
1676         return IRQ_HANDLED;
1677 }
1678
1679 /* end of fast path */
1680
1681 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1682
1683 /* Link */
1684
1685 /*
1686  * General service functions
1687  */
1688
1689 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1690 {
1691         u32 lock_status;
1692         u32 resource_bit = (1 << resource);
1693         int func = BP_FUNC(bp);
1694         u32 hw_lock_control_reg;
1695         int cnt;
1696
1697         /* Validating that the resource is within range */
1698         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1699                 DP(NETIF_MSG_HW,
1700                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1701                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1702                 return -EINVAL;
1703         }
1704
1705         if (func <= 5) {
1706                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1707         } else {
1708                 hw_lock_control_reg =
1709                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1710         }
1711
1712         /* Validating that the resource is not already taken */
1713         lock_status = REG_RD(bp, hw_lock_control_reg);
1714         if (lock_status & resource_bit) {
1715                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1716                    lock_status, resource_bit);
1717                 return -EEXIST;
1718         }
1719
1720         /* Try for 1 second every 5ms */
1721         for (cnt = 0; cnt < 200; cnt++) {
1722                 /* Try to acquire the lock */
1723                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1724                 lock_status = REG_RD(bp, hw_lock_control_reg);
1725                 if (lock_status & resource_bit)
1726                         return 0;
1727
1728                 msleep(5);
1729         }
1730         DP(NETIF_MSG_HW, "Timeout\n");
1731         return -EAGAIN;
1732 }
1733
1734 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1735 {
1736         u32 lock_status;
1737         u32 resource_bit = (1 << resource);
1738         int func = BP_FUNC(bp);
1739         u32 hw_lock_control_reg;
1740
1741         /* Validating that the resource is within range */
1742         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1743                 DP(NETIF_MSG_HW,
1744                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1745                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1746                 return -EINVAL;
1747         }
1748
1749         if (func <= 5) {
1750                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1751         } else {
1752                 hw_lock_control_reg =
1753                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1754         }
1755
1756         /* Validating that the resource is currently taken */
1757         lock_status = REG_RD(bp, hw_lock_control_reg);
1758         if (!(lock_status & resource_bit)) {
1759                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1760                    lock_status, resource_bit);
1761                 return -EFAULT;
1762         }
1763
1764         REG_WR(bp, hw_lock_control_reg, resource_bit);
1765         return 0;
1766 }
1767
1768 /* HW Lock for shared dual port PHYs */
1769 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1770 {
1771         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1772
1773         mutex_lock(&bp->port.phy_mutex);
1774
1775         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1776             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1777                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1778 }
1779
1780 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1781 {
1782         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1783
1784         if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1785             (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1786                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1787
1788         mutex_unlock(&bp->port.phy_mutex);
1789 }
1790
1791 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1792 {
1793         /* The GPIO should be swapped if swap register is set and active */
1794         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1795                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1796         int gpio_shift = gpio_num +
1797                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1798         u32 gpio_mask = (1 << gpio_shift);
1799         u32 gpio_reg;
1800
1801         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1802                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1803                 return -EINVAL;
1804         }
1805
1806         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1807         /* read GPIO and mask except the float bits */
1808         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1809
1810         switch (mode) {
1811         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1812                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1813                    gpio_num, gpio_shift);
1814                 /* clear FLOAT and set CLR */
1815                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1816                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1817                 break;
1818
1819         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1820                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1821                    gpio_num, gpio_shift);
1822                 /* clear FLOAT and set SET */
1823                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1824                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1825                 break;
1826
1827         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1828                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1829                    gpio_num, gpio_shift);
1830                 /* set FLOAT */
1831                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1832                 break;
1833
1834         default:
1835                 break;
1836         }
1837
1838         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1839         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1840
1841         return 0;
1842 }
1843
1844 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1845 {
1846         u32 spio_mask = (1 << spio_num);
1847         u32 spio_reg;
1848
1849         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1850             (spio_num > MISC_REGISTERS_SPIO_7)) {
1851                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1852                 return -EINVAL;
1853         }
1854
1855         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1856         /* read SPIO and mask except the float bits */
1857         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1858
1859         switch (mode) {
1860         case MISC_REGISTERS_SPIO_OUTPUT_LOW :
1861                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1862                 /* clear FLOAT and set CLR */
1863                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1864                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1865                 break;
1866
1867         case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
1868                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1869                 /* clear FLOAT and set SET */
1870                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1871                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1872                 break;
1873
1874         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1875                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1876                 /* set FLOAT */
1877                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1878                 break;
1879
1880         default:
1881                 break;
1882         }
1883
1884         REG_WR(bp, MISC_REG_SPIO, spio_reg);
1885         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1886
1887         return 0;
1888 }
1889
1890 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1891 {
1892         switch (bp->link_vars.ieee_fc) {
1893         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1894                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1895                                           ADVERTISED_Pause);
1896                 break;
1897         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1898                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1899                                          ADVERTISED_Pause);
1900                 break;
1901         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1902                 bp->port.advertising |= ADVERTISED_Asym_Pause;
1903                 break;
1904         default:
1905                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1906                                           ADVERTISED_Pause);
1907                 break;
1908         }
1909 }
1910
1911 static void bnx2x_link_report(struct bnx2x *bp)
1912 {
1913         if (bp->link_vars.link_up) {
1914                 if (bp->state == BNX2X_STATE_OPEN)
1915                         netif_carrier_on(bp->dev);
1916                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1917
1918                 printk("%d Mbps ", bp->link_vars.line_speed);
1919
1920                 if (bp->link_vars.duplex == DUPLEX_FULL)
1921                         printk("full duplex");
1922                 else
1923                         printk("half duplex");
1924
1925                 if (bp->link_vars.flow_ctrl != FLOW_CTRL_NONE) {
1926                         if (bp->link_vars.flow_ctrl & FLOW_CTRL_RX) {
1927                                 printk(", receive ");
1928                                 if (bp->link_vars.flow_ctrl & FLOW_CTRL_TX)
1929                                         printk("& transmit ");
1930                         } else {
1931                                 printk(", transmit ");
1932                         }
1933                         printk("flow control ON");
1934                 }
1935                 printk("\n");
1936
1937         } else { /* link_down */
1938                 netif_carrier_off(bp->dev);
1939                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1940         }
1941 }
1942
1943 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1944 {
1945         if (!BP_NOMCP(bp)) {
1946                 u8 rc;
1947
1948                 /* Initialize link parameters structure variables */
1949                 /* It is recommended to turn off RX FC for jumbo frames
1950                    for better performance */
1951                 if (IS_E1HMF(bp))
1952                         bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH;
1953                 else if (bp->dev->mtu > 5000)
1954                         bp->link_params.req_fc_auto_adv = FLOW_CTRL_TX;
1955                 else
1956                         bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH;
1957
1958                 bnx2x_acquire_phy_lock(bp);
1959                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1960                 bnx2x_release_phy_lock(bp);
1961
1962                 if (bp->link_vars.link_up)
1963                         bnx2x_link_report(bp);
1964
1965                 bnx2x_calc_fc_adv(bp);
1966
1967                 return rc;
1968         }
1969         BNX2X_ERR("Bootcode is missing -not initializing link\n");
1970         return -EINVAL;
1971 }
1972
1973 static void bnx2x_link_set(struct bnx2x *bp)
1974 {
1975         if (!BP_NOMCP(bp)) {
1976                 bnx2x_acquire_phy_lock(bp);
1977                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1978                 bnx2x_release_phy_lock(bp);
1979
1980                 bnx2x_calc_fc_adv(bp);
1981         } else
1982                 BNX2X_ERR("Bootcode is missing -not setting link\n");
1983 }
1984
1985 static void bnx2x__link_reset(struct bnx2x *bp)
1986 {
1987         if (!BP_NOMCP(bp)) {
1988                 bnx2x_acquire_phy_lock(bp);
1989                 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
1990                 bnx2x_release_phy_lock(bp);
1991         } else
1992                 BNX2X_ERR("Bootcode is missing -not resetting link\n");
1993 }
1994
1995 static u8 bnx2x_link_test(struct bnx2x *bp)
1996 {
1997         u8 rc;
1998
1999         bnx2x_acquire_phy_lock(bp);
2000         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2001         bnx2x_release_phy_lock(bp);
2002
2003         return rc;
2004 }
2005
2006 /* Calculates the sum of vn_min_rates.
2007    It's needed for further normalizing of the min_rates.
2008
2009    Returns:
2010      sum of vn_min_rates
2011        or
2012      0 - if all the min_rates are 0.
2013      In the later case fainess algorithm should be deactivated.
2014      If not all min_rates are zero then those that are zeroes will
2015      be set to 1.
2016  */
2017 static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2018 {
2019         int i, port = BP_PORT(bp);
2020         u32 wsum = 0;
2021         int all_zero = 1;
2022
2023         for (i = 0; i < E1HVN_MAX; i++) {
2024                 u32 vn_cfg =
2025                         SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2026                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2027                                      FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2028                 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2029                         /* If min rate is zero - set it to 1 */
2030                         if (!vn_min_rate)
2031                                 vn_min_rate = DEF_MIN_RATE;
2032                         else
2033                                 all_zero = 0;
2034
2035                         wsum += vn_min_rate;
2036                 }
2037         }
2038
2039         /* ... only if all min rates are zeros - disable FAIRNESS */
2040         if (all_zero)
2041                 return 0;
2042
2043         return wsum;
2044 }
2045
2046 static void bnx2x_init_port_minmax(struct bnx2x *bp,
2047                                    int en_fness,
2048                                    u16 port_rate,
2049                                    struct cmng_struct_per_port *m_cmng_port)
2050 {
2051         u32 r_param = port_rate / 8;
2052         int port = BP_PORT(bp);
2053         int i;
2054
2055         memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2056
2057         /* Enable minmax only if we are in e1hmf mode */
2058         if (IS_E1HMF(bp)) {
2059                 u32 fair_periodic_timeout_usec;
2060                 u32 t_fair;
2061
2062                 /* Enable rate shaping and fairness */
2063                 m_cmng_port->flags.cmng_vn_enable = 1;
2064                 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2065                 m_cmng_port->flags.rate_shaping_enable = 1;
2066
2067                 if (!en_fness)
2068                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2069                            "  fairness will be disabled\n");
2070
2071                 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2072                 m_cmng_port->rs_vars.rs_periodic_timeout =
2073                                                 RS_PERIODIC_TIMEOUT_USEC / 4;
2074
2075                 /* this is the threshold below which no timer arming will occur
2076                    1.25 coefficient is for the threshold to be a little bigger
2077                    than the real time, to compensate for timer in-accuracy */
2078                 m_cmng_port->rs_vars.rs_threshold =
2079                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2080
2081                 /* resolution of fairness timer */
2082                 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2083                 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2084                 t_fair = T_FAIR_COEF / port_rate;
2085
2086                 /* this is the threshold below which we won't arm
2087                    the timer anymore */
2088                 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2089
2090                 /* we multiply by 1e3/8 to get bytes/msec.
2091                    We don't want the credits to pass a credit
2092                    of the T_FAIR*FAIR_MEM (algorithm resolution) */
2093                 m_cmng_port->fair_vars.upper_bound =
2094                                                 r_param * t_fair * FAIR_MEM;
2095                 /* since each tick is 4 usec */
2096                 m_cmng_port->fair_vars.fairness_timeout =
2097                                                 fair_periodic_timeout_usec / 4;
2098
2099         } else {
2100                 /* Disable rate shaping and fairness */
2101                 m_cmng_port->flags.cmng_vn_enable = 0;
2102                 m_cmng_port->flags.fairness_enable = 0;
2103                 m_cmng_port->flags.rate_shaping_enable = 0;
2104
2105                 DP(NETIF_MSG_IFUP,
2106                    "Single function mode  minmax will be disabled\n");
2107         }
2108
2109         /* Store it to internal memory */
2110         for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2111                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2112                        XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2113                        ((u32 *)(m_cmng_port))[i]);
2114 }
2115
2116 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2117                                    u32 wsum, u16 port_rate,
2118                                  struct cmng_struct_per_port *m_cmng_port)
2119 {
2120         struct rate_shaping_vars_per_vn m_rs_vn;
2121         struct fairness_vars_per_vn m_fair_vn;
2122         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2123         u16 vn_min_rate, vn_max_rate;
2124         int i;
2125
2126         /* If function is hidden - set min and max to zeroes */
2127         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2128                 vn_min_rate = 0;
2129                 vn_max_rate = 0;
2130
2131         } else {
2132                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2133                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2134                 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2135                    if current min rate is zero - set it to 1.
2136                    This is a requirment of the algorithm. */
2137                 if ((vn_min_rate == 0) && wsum)
2138                         vn_min_rate = DEF_MIN_RATE;
2139                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2140                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2141         }
2142
2143         DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d  vn_max_rate=%d  "
2144            "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2145
2146         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2147         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2148
2149         /* global vn counter - maximal Mbps for this vn */
2150         m_rs_vn.vn_counter.rate = vn_max_rate;
2151
2152         /* quota - number of bytes transmitted in this period */
2153         m_rs_vn.vn_counter.quota =
2154                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2155
2156 #ifdef BNX2X_PER_PROT_QOS
2157         /* per protocol counter */
2158         for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2159                 /* maximal Mbps for this protocol */
2160                 m_rs_vn.protocol_counters[protocol].rate =
2161                                                 protocol_max_rate[protocol];
2162                 /* the quota in each timer period -
2163                    number of bytes transmitted in this period */
2164                 m_rs_vn.protocol_counters[protocol].quota =
2165                         (u32)(rs_periodic_timeout_usec *
2166                           ((double)m_rs_vn.
2167                                    protocol_counters[protocol].rate/8));
2168         }
2169 #endif
2170
2171         if (wsum) {
2172                 /* credit for each period of the fairness algorithm:
2173                    number of bytes in T_FAIR (the vn share the port rate).
2174                    wsum should not be larger than 10000, thus
2175                    T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2176                 m_fair_vn.vn_credit_delta =
2177                         max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2178                             (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2179                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2180                    m_fair_vn.vn_credit_delta);
2181         }
2182
2183 #ifdef BNX2X_PER_PROT_QOS
2184         do {
2185                 u32 protocolWeightSum = 0;
2186
2187                 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2188                         protocolWeightSum +=
2189                                         drvInit.protocol_min_rate[protocol];
2190                 /* per protocol counter -
2191                    NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2192                 if (protocolWeightSum > 0) {
2193                         for (protocol = 0;
2194                              protocol < NUM_OF_PROTOCOLS; protocol++)
2195                                 /* credit for each period of the
2196                                    fairness algorithm - number of bytes in
2197                                    T_FAIR (the protocol share the vn rate) */
2198                                 m_fair_vn.protocol_credit_delta[protocol] =
2199                                         (u32)((vn_min_rate / 8) * t_fair *
2200                                         protocol_min_rate / protocolWeightSum);
2201                 }
2202         } while (0);
2203 #endif
2204
2205         /* Store it to internal memory */
2206         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2207                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2208                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2209                        ((u32 *)(&m_rs_vn))[i]);
2210
2211         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2212                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2213                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2214                        ((u32 *)(&m_fair_vn))[i]);
2215 }
2216
2217 /* This function is called upon link interrupt */
2218 static void bnx2x_link_attn(struct bnx2x *bp)
2219 {
2220         int vn;
2221
2222         /* Make sure that we are synced with the current statistics */
2223         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2224
2225         bnx2x_acquire_phy_lock(bp);
2226         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2227         bnx2x_release_phy_lock(bp);
2228
2229         if (bp->link_vars.link_up) {
2230
2231                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2232                         struct host_port_stats *pstats;
2233
2234                         pstats = bnx2x_sp(bp, port_stats);
2235                         /* reset old bmac stats */
2236                         memset(&(pstats->mac_stx[0]), 0,
2237                                sizeof(struct mac_stx));
2238                 }
2239                 if ((bp->state == BNX2X_STATE_OPEN) ||
2240                     (bp->state == BNX2X_STATE_DISABLED))
2241                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2242         }
2243
2244         /* indicate link status */
2245         bnx2x_link_report(bp);
2246
2247         if (IS_E1HMF(bp)) {
2248                 int func;
2249
2250                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2251                         if (vn == BP_E1HVN(bp))
2252                                 continue;
2253
2254                         func = ((vn << 1) | BP_PORT(bp));
2255
2256                         /* Set the attention towards other drivers
2257                            on the same port */
2258                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2259                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2260                 }
2261         }
2262
2263         if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2264                 struct cmng_struct_per_port m_cmng_port;
2265                 u32 wsum;
2266                 int port = BP_PORT(bp);
2267
2268                 /* Init RATE SHAPING and FAIRNESS contexts */
2269                 wsum = bnx2x_calc_vn_wsum(bp);
2270                 bnx2x_init_port_minmax(bp, (int)wsum,
2271                                         bp->link_vars.line_speed,
2272                                         &m_cmng_port);
2273                 if (IS_E1HMF(bp))
2274                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2275                                 bnx2x_init_vn_minmax(bp, 2*vn + port,
2276                                         wsum, bp->link_vars.line_speed,
2277                                                      &m_cmng_port);
2278         }
2279 }
2280
2281 static void bnx2x__link_status_update(struct bnx2x *bp)
2282 {
2283         if (bp->state != BNX2X_STATE_OPEN)
2284                 return;
2285
2286         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2287
2288         if (bp->link_vars.link_up)
2289                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2290         else
2291                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2292
2293         /* indicate link status */
2294         bnx2x_link_report(bp);
2295 }
2296
2297 static void bnx2x_pmf_update(struct bnx2x *bp)
2298 {
2299         int port = BP_PORT(bp);
2300         u32 val;
2301
2302         bp->port.pmf = 1;
2303         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2304
2305         /* enable nig attention */
2306         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2307         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2308         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2309
2310         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2311 }
2312
2313 /* end of Link */
2314
2315 /* slow path */
2316
2317 /*
2318  * General service functions
2319  */
2320
2321 /* the slow path queue is odd since completions arrive on the fastpath ring */
2322 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2323                          u32 data_hi, u32 data_lo, int common)
2324 {
2325         int func = BP_FUNC(bp);
2326
2327         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2328            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2329            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2330            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2331            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2332
2333 #ifdef BNX2X_STOP_ON_ERROR
2334         if (unlikely(bp->panic))
2335                 return -EIO;
2336 #endif
2337
2338         spin_lock_bh(&bp->spq_lock);
2339
2340         if (!bp->spq_left) {
2341                 BNX2X_ERR("BUG! SPQ ring full!\n");
2342                 spin_unlock_bh(&bp->spq_lock);
2343                 bnx2x_panic();
2344                 return -EBUSY;
2345         }
2346
2347         /* CID needs port number to be encoded int it */
2348         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2349                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2350                                      HW_CID(bp, cid)));
2351         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2352         if (common)
2353                 bp->spq_prod_bd->hdr.type |=
2354                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2355
2356         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2357         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2358
2359         bp->spq_left--;
2360
2361         if (bp->spq_prod_bd == bp->spq_last_bd) {
2362                 bp->spq_prod_bd = bp->spq;
2363                 bp->spq_prod_idx = 0;
2364                 DP(NETIF_MSG_TIMER, "end of spq\n");
2365
2366         } else {
2367                 bp->spq_prod_bd++;
2368                 bp->spq_prod_idx++;
2369         }
2370
2371         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2372                bp->spq_prod_idx);
2373
2374         spin_unlock_bh(&bp->spq_lock);
2375         return 0;
2376 }
2377
2378 /* acquire split MCP access lock register */
2379 static int bnx2x_acquire_alr(struct bnx2x *bp)
2380 {
2381         u32 i, j, val;
2382         int rc = 0;
2383
2384         might_sleep();
2385         i = 100;
2386         for (j = 0; j < i*10; j++) {
2387                 val = (1UL << 31);
2388                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2389                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2390                 if (val & (1L << 31))
2391                         break;
2392
2393                 msleep(5);
2394         }
2395         if (!(val & (1L << 31))) {
2396                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2397                 rc = -EBUSY;
2398         }
2399
2400         return rc;
2401 }
2402
2403 /* release split MCP access lock register */
2404 static void bnx2x_release_alr(struct bnx2x *bp)
2405 {
2406         u32 val = 0;
2407
2408         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2409 }
2410
2411 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2412 {
2413         struct host_def_status_block *def_sb = bp->def_status_blk;
2414         u16 rc = 0;
2415
2416         barrier(); /* status block is written to by the chip */
2417         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2418                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2419                 rc |= 1;
2420         }
2421         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2422                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2423                 rc |= 2;
2424         }
2425         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2426                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2427                 rc |= 4;
2428         }
2429         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2430                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2431                 rc |= 8;
2432         }
2433         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2434                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2435                 rc |= 16;
2436         }
2437         return rc;
2438 }
2439
2440 /*
2441  * slow path service functions
2442  */
2443
2444 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2445 {
2446         int port = BP_PORT(bp);
2447         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2448                        COMMAND_REG_ATTN_BITS_SET);
2449         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2450                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2451         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2452                                        NIG_REG_MASK_INTERRUPT_PORT0;
2453         u32 aeu_mask;
2454
2455         if (bp->attn_state & asserted)
2456                 BNX2X_ERR("IGU ERROR\n");
2457
2458         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2459         aeu_mask = REG_RD(bp, aeu_addr);
2460
2461         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2462            aeu_mask, asserted);
2463         aeu_mask &= ~(asserted & 0xff);
2464         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2465
2466         REG_WR(bp, aeu_addr, aeu_mask);
2467         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2468
2469         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2470         bp->attn_state |= asserted;
2471         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2472
2473         if (asserted & ATTN_HARD_WIRED_MASK) {
2474                 if (asserted & ATTN_NIG_FOR_FUNC) {
2475
2476                         /* save nig interrupt mask */
2477                         bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2478                         REG_WR(bp, nig_int_mask_addr, 0);
2479
2480                         bnx2x_link_attn(bp);
2481
2482                         /* handle unicore attn? */
2483                 }
2484                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2485                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2486
2487                 if (asserted & GPIO_2_FUNC)
2488                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2489
2490                 if (asserted & GPIO_3_FUNC)
2491                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2492
2493                 if (asserted & GPIO_4_FUNC)
2494                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2495
2496                 if (port == 0) {
2497                         if (asserted & ATTN_GENERAL_ATTN_1) {
2498                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2499                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2500                         }
2501                         if (asserted & ATTN_GENERAL_ATTN_2) {
2502                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2503                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2504                         }
2505                         if (asserted & ATTN_GENERAL_ATTN_3) {
2506                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2507                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2508                         }
2509                 } else {
2510                         if (asserted & ATTN_GENERAL_ATTN_4) {
2511                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2512                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2513                         }
2514                         if (asserted & ATTN_GENERAL_ATTN_5) {
2515                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2516                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2517                         }
2518                         if (asserted & ATTN_GENERAL_ATTN_6) {
2519                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2520                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2521                         }
2522                 }
2523
2524         } /* if hardwired */
2525
2526         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2527            asserted, hc_addr);
2528         REG_WR(bp, hc_addr, asserted);
2529
2530         /* now set back the mask */
2531         if (asserted & ATTN_NIG_FOR_FUNC)
2532                 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2533 }
2534
2535 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2536 {
2537         int port = BP_PORT(bp);
2538         int reg_offset;
2539         u32 val;
2540
2541         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2542                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2543
2544         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2545
2546                 val = REG_RD(bp, reg_offset);
2547                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2548                 REG_WR(bp, reg_offset, val);
2549
2550                 BNX2X_ERR("SPIO5 hw attention\n");
2551
2552                 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
2553                 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2554                         /* Fan failure attention */
2555
2556                         /* The PHY reset is controlled by GPIO 1 */
2557                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2558                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2559                         /* Low power mode is controlled by GPIO 2 */
2560                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2561                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2562                         /* mark the failure */
2563                         bp->link_params.ext_phy_config &=
2564                                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2565                         bp->link_params.ext_phy_config |=
2566                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2567                         SHMEM_WR(bp,
2568                                  dev_info.port_hw_config[port].
2569                                                         external_phy_config,
2570                                  bp->link_params.ext_phy_config);
2571                         /* log the failure */
2572                         printk(KERN_ERR PFX "Fan Failure on Network"
2573                                " Controller %s has caused the driver to"
2574                                " shutdown the card to prevent permanent"
2575                                " damage.  Please contact Dell Support for"
2576                                " assistance\n", bp->dev->name);
2577                         break;
2578
2579                 default:
2580                         break;
2581                 }
2582         }
2583
2584         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2585
2586                 val = REG_RD(bp, reg_offset);
2587                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2588                 REG_WR(bp, reg_offset, val);
2589
2590                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2591                           (attn & HW_INTERRUT_ASSERT_SET_0));
2592                 bnx2x_panic();
2593         }
2594 }
2595
2596 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2597 {
2598         u32 val;
2599
2600         if (attn & BNX2X_DOORQ_ASSERT) {
2601
2602                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2603                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2604                 /* DORQ discard attention */
2605                 if (val & 0x2)
2606                         BNX2X_ERR("FATAL error from DORQ\n");
2607         }
2608
2609         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2610
2611                 int port = BP_PORT(bp);
2612                 int reg_offset;
2613
2614                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2615                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2616
2617                 val = REG_RD(bp, reg_offset);
2618                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2619                 REG_WR(bp, reg_offset, val);
2620
2621                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2622                           (attn & HW_INTERRUT_ASSERT_SET_1));
2623                 bnx2x_panic();
2624         }
2625 }
2626
2627 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2628 {
2629         u32 val;
2630
2631         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2632
2633                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2634                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2635                 /* CFC error attention */
2636                 if (val & 0x2)
2637                         BNX2X_ERR("FATAL error from CFC\n");
2638         }
2639
2640         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2641
2642                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2643                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2644                 /* RQ_USDMDP_FIFO_OVERFLOW */
2645                 if (val & 0x18000)
2646                         BNX2X_ERR("FATAL error from PXP\n");
2647         }
2648
2649         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2650
2651                 int port = BP_PORT(bp);
2652                 int reg_offset;
2653
2654                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2655                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2656
2657                 val = REG_RD(bp, reg_offset);
2658                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2659                 REG_WR(bp, reg_offset, val);
2660
2661                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2662                           (attn & HW_INTERRUT_ASSERT_SET_2));
2663                 bnx2x_panic();
2664         }
2665 }
2666
2667 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2668 {
2669         u32 val;
2670
2671         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2672
2673                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2674                         int func = BP_FUNC(bp);
2675
2676                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2677                         bnx2x__link_status_update(bp);
2678                         if (SHMEM_RD(bp, func_mb[func].drv_status) &
2679                                                         DRV_STATUS_PMF)
2680                                 bnx2x_pmf_update(bp);
2681
2682                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2683
2684                         BNX2X_ERR("MC assert!\n");
2685                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2686                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2687                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2688                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2689                         bnx2x_panic();
2690
2691                 } else if (attn & BNX2X_MCP_ASSERT) {
2692
2693                         BNX2X_ERR("MCP assert!\n");
2694                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2695                         bnx2x_fw_dump(bp);
2696
2697                 } else
2698                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2699         }
2700
2701         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2702                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2703                 if (attn & BNX2X_GRC_TIMEOUT) {
2704                         val = CHIP_IS_E1H(bp) ?
2705                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2706                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2707                 }
2708                 if (attn & BNX2X_GRC_RSV) {
2709                         val = CHIP_IS_E1H(bp) ?
2710                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2711                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2712                 }
2713                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2714         }
2715 }
2716
2717 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2718 {
2719         struct attn_route attn;
2720         struct attn_route group_mask;
2721         int port = BP_PORT(bp);
2722         int index;
2723         u32 reg_addr;
2724         u32 val;
2725         u32 aeu_mask;
2726
2727         /* need to take HW lock because MCP or other port might also
2728            try to handle this event */
2729         bnx2x_acquire_alr(bp);
2730
2731         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2732         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2733         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2734         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2735         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2736            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2737
2738         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2739                 if (deasserted & (1 << index)) {
2740                         group_mask = bp->attn_group[index];
2741
2742                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2743                            index, group_mask.sig[0], group_mask.sig[1],
2744                            group_mask.sig[2], group_mask.sig[3]);
2745
2746                         bnx2x_attn_int_deasserted3(bp,
2747                                         attn.sig[3] & group_mask.sig[3]);
2748                         bnx2x_attn_int_deasserted1(bp,
2749                                         attn.sig[1] & group_mask.sig[1]);
2750                         bnx2x_attn_int_deasserted2(bp,
2751                                         attn.sig[2] & group_mask.sig[2]);
2752                         bnx2x_attn_int_deasserted0(bp,
2753                                         attn.sig[0] & group_mask.sig[0]);
2754
2755                         if ((attn.sig[0] & group_mask.sig[0] &
2756                                                 HW_PRTY_ASSERT_SET_0) ||
2757                             (attn.sig[1] & group_mask.sig[1] &
2758                                                 HW_PRTY_ASSERT_SET_1) ||
2759                             (attn.sig[2] & group_mask.sig[2] &
2760                                                 HW_PRTY_ASSERT_SET_2))
2761                                BNX2X_ERR("FATAL HW block parity attention\n");
2762                 }
2763         }
2764
2765         bnx2x_release_alr(bp);
2766
2767         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2768
2769         val = ~deasserted;
2770         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2771            val, reg_addr);
2772         REG_WR(bp, reg_addr, val);
2773
2774         if (~bp->attn_state & deasserted)
2775                 BNX2X_ERR("IGU ERROR\n");
2776
2777         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2778                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2779
2780         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2781         aeu_mask = REG_RD(bp, reg_addr);
2782
2783         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2784            aeu_mask, deasserted);
2785         aeu_mask |= (deasserted & 0xff);
2786         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2787
2788         REG_WR(bp, reg_addr, aeu_mask);
2789         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2790
2791         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2792         bp->attn_state &= ~deasserted;
2793         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2794 }
2795
2796 static void bnx2x_attn_int(struct bnx2x *bp)
2797 {
2798         /* read local copy of bits */
2799         u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2800         u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2801         u32 attn_state = bp->attn_state;
2802
2803         /* look for changed bits */
2804         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2805         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2806
2807         DP(NETIF_MSG_HW,
2808            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2809            attn_bits, attn_ack, asserted, deasserted);
2810
2811         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2812                 BNX2X_ERR("BAD attention state\n");
2813
2814         /* handle bits that were raised */
2815         if (asserted)
2816                 bnx2x_attn_int_asserted(bp, asserted);
2817
2818         if (deasserted)
2819                 bnx2x_attn_int_deasserted(bp, deasserted);
2820 }
2821
2822 static void bnx2x_sp_task(struct work_struct *work)
2823 {
2824         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
2825         u16 status;
2826
2827
2828         /* Return here if interrupt is disabled */
2829         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2830                 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
2831                 return;
2832         }
2833
2834         status = bnx2x_update_dsb_idx(bp);
2835 /*      if (status == 0)                                     */
2836 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2837
2838         DP(BNX2X_MSG_SP, "got a slowpath interrupt (updated %x)\n", status);
2839
2840         /* HW attentions */
2841         if (status & 0x1)
2842                 bnx2x_attn_int(bp);
2843
2844         /* CStorm events: query_stats, port delete ramrod */
2845         if (status & 0x2)
2846                 bp->stats_pending = 0;
2847
2848         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2849                      IGU_INT_NOP, 1);
2850         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2851                      IGU_INT_NOP, 1);
2852         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2853                      IGU_INT_NOP, 1);
2854         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2855                      IGU_INT_NOP, 1);
2856         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2857                      IGU_INT_ENABLE, 1);
2858
2859 }
2860
2861 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2862 {
2863         struct net_device *dev = dev_instance;
2864         struct bnx2x *bp = netdev_priv(dev);
2865
2866         /* Return here if interrupt is disabled */
2867         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2868                 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
2869                 return IRQ_HANDLED;
2870         }
2871
2872         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
2873
2874 #ifdef BNX2X_STOP_ON_ERROR
2875         if (unlikely(bp->panic))
2876                 return IRQ_HANDLED;
2877 #endif
2878
2879         schedule_work(&bp->sp_task);
2880
2881         return IRQ_HANDLED;
2882 }
2883
2884 /* end of slow path */
2885
2886 /* Statistics */
2887
2888 /****************************************************************************
2889 * Macros
2890 ****************************************************************************/
2891
2892 /* sum[hi:lo] += add[hi:lo] */
2893 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2894         do { \
2895                 s_lo += a_lo; \
2896                 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2897         } while (0)
2898
2899 /* difference = minuend - subtrahend */
2900 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2901         do { \
2902                 if (m_lo < s_lo) { \
2903                         /* underflow */ \
2904                         d_hi = m_hi - s_hi; \
2905                         if (d_hi > 0) { \
2906                         /* we can 'loan' 1 */ \
2907                                 d_hi--; \
2908                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2909                         } else { \
2910                         /* m_hi <= s_hi */ \
2911                                 d_hi = 0; \
2912                                 d_lo = 0; \
2913                         } \
2914                 } else { \
2915                         /* m_lo >= s_lo */ \
2916                         if (m_hi < s_hi) { \
2917                                 d_hi = 0; \
2918                                 d_lo = 0; \
2919                         } else { \
2920                         /* m_hi >= s_hi */ \
2921                                 d_hi = m_hi - s_hi; \
2922                                 d_lo = m_lo - s_lo; \
2923                         } \
2924                 } \
2925         } while (0)
2926
2927 #define UPDATE_STAT64(s, t) \
2928         do { \
2929                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2930                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2931                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2932                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2933                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2934                        pstats->mac_stx[1].t##_lo, diff.lo); \
2935         } while (0)
2936
2937 #define UPDATE_STAT64_NIG(s, t) \
2938         do { \
2939                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2940                         diff.lo, new->s##_lo, old->s##_lo); \
2941                 ADD_64(estats->t##_hi, diff.hi, \
2942                        estats->t##_lo, diff.lo); \
2943         } while (0)
2944
2945 /* sum[hi:lo] += add */
2946 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2947         do { \
2948                 s_lo += a; \
2949                 s_hi += (s_lo < a) ? 1 : 0; \
2950         } while (0)
2951
2952 #define UPDATE_EXTEND_STAT(s) \
2953         do { \
2954                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2955                               pstats->mac_stx[1].s##_lo, \
2956                               new->s); \
2957         } while (0)
2958
2959 #define UPDATE_EXTEND_TSTAT(s, t) \
2960         do { \
2961                 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2962                 old_tclient->s = le32_to_cpu(tclient->s); \
2963                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2964         } while (0)
2965
2966 #define UPDATE_EXTEND_XSTAT(s, t) \
2967         do { \
2968                 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2969                 old_xclient->s = le32_to_cpu(xclient->s); \
2970                 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2971         } while (0)
2972
2973 /*
2974  * General service functions
2975  */
2976
2977 static inline long bnx2x_hilo(u32 *hiref)
2978 {
2979         u32 lo = *(hiref + 1);
2980 #if (BITS_PER_LONG == 64)
2981         u32 hi = *hiref;
2982
2983         return HILO_U64(hi, lo);
2984 #else
2985         return lo;
2986 #endif
2987 }
2988
2989 /*
2990  * Init service functions
2991  */
2992
2993 static void bnx2x_storm_stats_post(struct bnx2x *bp)
2994 {
2995         if (!bp->stats_pending) {
2996                 struct eth_query_ramrod_data ramrod_data = {0};
2997                 int rc;
2998
2999                 ramrod_data.drv_counter = bp->stats_counter++;
3000                 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3001                 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3002
3003                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3004                                    ((u32 *)&ramrod_data)[1],
3005                                    ((u32 *)&ramrod_data)[0], 0);
3006                 if (rc == 0) {
3007                         /* stats ramrod has it's own slot on the spq */
3008                         bp->spq_left++;
3009                         bp->stats_pending = 1;
3010                 }
3011         }
3012 }
3013
3014 static void bnx2x_stats_init(struct bnx2x *bp)
3015 {
3016         int port = BP_PORT(bp);
3017
3018         bp->executer_idx = 0;
3019         bp->stats_counter = 0;
3020
3021         /* port stats */
3022         if (!BP_NOMCP(bp))
3023                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3024         else
3025                 bp->port.port_stx = 0;
3026         DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3027
3028         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3029         bp->port.old_nig_stats.brb_discard =
3030                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3031         bp->port.old_nig_stats.brb_truncate =
3032                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3033         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3034                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3035         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3036                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3037
3038         /* function stats */
3039         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3040         memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3041         memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3042         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3043
3044         bp->stats_state = STATS_STATE_DISABLED;
3045         if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3046                 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3047 }
3048
3049 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3050 {
3051         struct dmae_command *dmae = &bp->stats_dmae;
3052         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3053
3054         *stats_comp = DMAE_COMP_VAL;
3055
3056         /* loader */
3057         if (bp->executer_idx) {
3058                 int loader_idx = PMF_DMAE_C(bp);
3059
3060                 memset(dmae, 0, sizeof(struct dmae_command));
3061
3062                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3063                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3064                                 DMAE_CMD_DST_RESET |
3065 #ifdef __BIG_ENDIAN
3066                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3067 #else
3068                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3069 #endif
3070                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3071                                                DMAE_CMD_PORT_0) |
3072                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3073                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3074                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3075                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3076                                      sizeof(struct dmae_command) *
3077                                      (loader_idx + 1)) >> 2;
3078                 dmae->dst_addr_hi = 0;
3079                 dmae->len = sizeof(struct dmae_command) >> 2;
3080                 if (CHIP_IS_E1(bp))
3081                         dmae->len--;
3082                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3083                 dmae->comp_addr_hi = 0;
3084                 dmae->comp_val = 1;
3085
3086                 *stats_comp = 0;
3087                 bnx2x_post_dmae(bp, dmae, loader_idx);
3088
3089         } else if (bp->func_stx) {
3090                 *stats_comp = 0;
3091                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3092         }
3093 }
3094
3095 static int bnx2x_stats_comp(struct bnx2x *bp)
3096 {
3097         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3098         int cnt = 10;
3099
3100         might_sleep();
3101         while (*stats_comp != DMAE_COMP_VAL) {
3102                 if (!cnt) {
3103                         BNX2X_ERR("timeout waiting for stats finished\n");
3104                         break;
3105                 }
3106                 cnt--;
3107                 msleep(1);
3108         }
3109         return 1;
3110 }
3111
3112 /*
3113  * Statistics service functions
3114  */
3115
3116 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3117 {
3118         struct dmae_command *dmae;
3119         u32 opcode;
3120         int loader_idx = PMF_DMAE_C(bp);
3121         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3122
3123         /* sanity */
3124         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3125                 BNX2X_ERR("BUG!\n");
3126                 return;
3127         }
3128
3129         bp->executer_idx = 0;
3130
3131         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3132                   DMAE_CMD_C_ENABLE |
3133                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3134 #ifdef __BIG_ENDIAN
3135                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3136 #else
3137                   DMAE_CMD_ENDIANITY_DW_SWAP |
3138 #endif
3139                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3140                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3141
3142         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3143         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3144         dmae->src_addr_lo = bp->port.port_stx >> 2;
3145         dmae->src_addr_hi = 0;
3146         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3147         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3148         dmae->len = DMAE_LEN32_RD_MAX;
3149         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3150         dmae->comp_addr_hi = 0;
3151         dmae->comp_val = 1;
3152
3153         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3154         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3155         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3156         dmae->src_addr_hi = 0;
3157         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3158                                    DMAE_LEN32_RD_MAX * 4);
3159         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3160                                    DMAE_LEN32_RD_MAX * 4);
3161         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3162         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3163         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3164         dmae->comp_val = DMAE_COMP_VAL;
3165
3166         *stats_comp = 0;
3167         bnx2x_hw_stats_post(bp);
3168         bnx2x_stats_comp(bp);
3169 }
3170
3171 static void bnx2x_port_stats_init(struct bnx2x *bp)
3172 {
3173         struct dmae_command *dmae;
3174         int port = BP_PORT(bp);
3175         int vn = BP_E1HVN(bp);
3176         u32 opcode;
3177         int loader_idx = PMF_DMAE_C(bp);
3178         u32 mac_addr;
3179         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3180
3181         /* sanity */
3182         if (!bp->link_vars.link_up || !bp->port.pmf) {
3183                 BNX2X_ERR("BUG!\n");
3184                 return;
3185         }
3186
3187         bp->executer_idx = 0;
3188
3189         /* MCP */
3190         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3191                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3192                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3193 #ifdef __BIG_ENDIAN
3194                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3195 #else
3196                   DMAE_CMD_ENDIANITY_DW_SWAP |
3197 #endif
3198                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3199                   (vn << DMAE_CMD_E1HVN_SHIFT));
3200
3201         if (bp->port.port_stx) {
3202
3203                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3204                 dmae->opcode = opcode;
3205                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3206                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3207                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3208                 dmae->dst_addr_hi = 0;
3209                 dmae->len = sizeof(struct host_port_stats) >> 2;
3210                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3211                 dmae->comp_addr_hi = 0;
3212                 dmae->comp_val = 1;
3213         }
3214
3215         if (bp->func_stx) {
3216
3217                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3218                 dmae->opcode = opcode;
3219                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3220                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3221                 dmae->dst_addr_lo = bp->func_stx >> 2;
3222                 dmae->dst_addr_hi = 0;
3223                 dmae->len = sizeof(struct host_func_stats) >> 2;
3224                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3225                 dmae->comp_addr_hi = 0;
3226                 dmae->comp_val = 1;
3227         }
3228
3229         /* MAC */
3230         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3231                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3232                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3233 #ifdef __BIG_ENDIAN
3234                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3235 #else
3236                   DMAE_CMD_ENDIANITY_DW_SWAP |
3237 #endif
3238                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3239                   (vn << DMAE_CMD_E1HVN_SHIFT));
3240
3241         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3242
3243                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3244                                    NIG_REG_INGRESS_BMAC0_MEM);
3245
3246                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3247                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3248                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3249                 dmae->opcode = opcode;
3250                 dmae->src_addr_lo = (mac_addr +
3251                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3252                 dmae->src_addr_hi = 0;
3253                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3254                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3255                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3256                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3257                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3258                 dmae->comp_addr_hi = 0;
3259                 dmae->comp_val = 1;
3260
3261                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3262                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3263                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3264                 dmae->opcode = opcode;
3265                 dmae->src_addr_lo = (mac_addr +
3266                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3267                 dmae->src_addr_hi = 0;
3268                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3269                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3270                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3271                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3272                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3273                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3274                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3275                 dmae->comp_addr_hi = 0;
3276                 dmae->comp_val = 1;
3277
3278         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3279
3280                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3281
3282                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3283                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3284                 dmae->opcode = opcode;
3285                 dmae->src_addr_lo = (mac_addr +
3286                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3287                 dmae->src_addr_hi = 0;
3288                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3289                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3290                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3291                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3292                 dmae->comp_addr_hi = 0;
3293                 dmae->comp_val = 1;
3294
3295                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3296                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3297                 dmae->opcode = opcode;
3298                 dmae->src_addr_lo = (mac_addr +
3299                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3300                 dmae->src_addr_hi = 0;
3301                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3302                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3303                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3304                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3305                 dmae->len = 1;
3306                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3307                 dmae->comp_addr_hi = 0;
3308                 dmae->comp_val = 1;
3309
3310                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3311                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3312                 dmae->opcode = opcode;
3313                 dmae->src_addr_lo = (mac_addr +
3314                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3315                 dmae->src_addr_hi = 0;
3316                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3317                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3318                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3319                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3320                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3321                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3322                 dmae->comp_addr_hi = 0;
3323                 dmae->comp_val = 1;
3324         }
3325
3326         /* NIG */
3327         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3328         dmae->opcode = opcode;
3329         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3330                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3331         dmae->src_addr_hi = 0;
3332         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3333         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3334         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3335         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3336         dmae->comp_addr_hi = 0;
3337         dmae->comp_val = 1;
3338
3339         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3340         dmae->opcode = opcode;
3341         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3342                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3343         dmae->src_addr_hi = 0;
3344         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3345                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3346         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3347                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3348         dmae->len = (2*sizeof(u32)) >> 2;
3349         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3350         dmae->comp_addr_hi = 0;
3351         dmae->comp_val = 1;
3352
3353         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3354         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3355                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3356                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3357 #ifdef __BIG_ENDIAN
3358                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3359 #else
3360                         DMAE_CMD_ENDIANITY_DW_SWAP |
3361 #endif
3362                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3363                         (vn << DMAE_CMD_E1HVN_SHIFT));
3364         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3365                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3366         dmae->src_addr_hi = 0;
3367         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3368                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3369         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3370                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3371         dmae->len = (2*sizeof(u32)) >> 2;
3372         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3373         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3374         dmae->comp_val = DMAE_COMP_VAL;
3375
3376         *stats_comp = 0;
3377 }
3378
3379 static void bnx2x_func_stats_init(struct bnx2x *bp)
3380 {
3381         struct dmae_command *dmae = &bp->stats_dmae;
3382         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3383
3384         /* sanity */
3385         if (!bp->func_stx) {
3386                 BNX2X_ERR("BUG!\n");
3387                 return;
3388         }
3389
3390         bp->executer_idx = 0;
3391         memset(dmae, 0, sizeof(struct dmae_command));
3392
3393         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3394                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3395                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3396 #ifdef __BIG_ENDIAN
3397                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3398 #else
3399                         DMAE_CMD_ENDIANITY_DW_SWAP |
3400 #endif
3401                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3402                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3403         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3404         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3405         dmae->dst_addr_lo = bp->func_stx >> 2;
3406         dmae->dst_addr_hi = 0;
3407         dmae->len = sizeof(struct host_func_stats) >> 2;
3408         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3409         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3410         dmae->comp_val = DMAE_COMP_VAL;
3411
3412         *stats_comp = 0;
3413 }
3414
3415 static void bnx2x_stats_start(struct bnx2x *bp)
3416 {
3417         if (bp->port.pmf)
3418                 bnx2x_port_stats_init(bp);
3419
3420         else if (bp->func_stx)
3421                 bnx2x_func_stats_init(bp);
3422
3423         bnx2x_hw_stats_post(bp);
3424         bnx2x_storm_stats_post(bp);
3425 }
3426
3427 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3428 {
3429         bnx2x_stats_comp(bp);
3430         bnx2x_stats_pmf_update(bp);
3431         bnx2x_stats_start(bp);
3432 }
3433
3434 static void bnx2x_stats_restart(struct bnx2x *bp)
3435 {
3436         bnx2x_stats_comp(bp);
3437         bnx2x_stats_start(bp);
3438 }
3439
3440 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3441 {
3442         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3443         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3444         struct regpair diff;
3445
3446         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3447         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3448         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3449         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3450         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3451         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3452         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3453         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3454         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3455         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3456         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3457         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3458         UPDATE_STAT64(tx_stat_gt127,
3459                                 tx_stat_etherstatspkts65octetsto127octets);
3460         UPDATE_STAT64(tx_stat_gt255,
3461                                 tx_stat_etherstatspkts128octetsto255octets);
3462         UPDATE_STAT64(tx_stat_gt511,
3463                                 tx_stat_etherstatspkts256octetsto511octets);
3464         UPDATE_STAT64(tx_stat_gt1023,
3465                                 tx_stat_etherstatspkts512octetsto1023octets);
3466         UPDATE_STAT64(tx_stat_gt1518,
3467                                 tx_stat_etherstatspkts1024octetsto1522octets);
3468         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3469         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3470         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3471         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3472         UPDATE_STAT64(tx_stat_gterr,
3473                                 tx_stat_dot3statsinternalmactransmiterrors);
3474         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3475 }
3476
3477 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3478 {
3479         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3480         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3481
3482         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3483         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3484         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3485         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3486         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3487         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3488         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3489         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3490         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3491         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3492         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3493         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3494         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3495         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3496         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3497         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3498         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3499         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3500         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3501         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3502         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3503         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3504         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3505         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3506         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3507         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3508         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3509         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3510         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3511         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3512         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3513 }
3514
3515 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3516 {
3517         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3518         struct nig_stats *old = &(bp->port.old_nig_stats);
3519         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3520         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3521         struct regpair diff;
3522
3523         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3524                 bnx2x_bmac_stats_update(bp);
3525
3526         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3527                 bnx2x_emac_stats_update(bp);
3528
3529         else { /* unreached */
3530                 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3531                 return -1;
3532         }
3533
3534         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3535                       new->brb_discard - old->brb_discard);
3536         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3537                       new->brb_truncate - old->brb_truncate);
3538
3539         UPDATE_STAT64_NIG(egress_mac_pkt0,
3540                                         etherstatspkts1024octetsto1522octets);
3541         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3542
3543         memcpy(old, new, sizeof(struct nig_stats));
3544
3545         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3546                sizeof(struct mac_stx));
3547         estats->brb_drop_hi = pstats->brb_drop_hi;
3548         estats->brb_drop_lo = pstats->brb_drop_lo;
3549
3550         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3551
3552         return 0;
3553 }
3554
3555 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3556 {
3557         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3558         int cl_id = BP_CL_ID(bp);
3559         struct tstorm_per_port_stats *tport =
3560                                 &stats->tstorm_common.port_statistics;
3561         struct tstorm_per_client_stats *tclient =
3562                         &stats->tstorm_common.client_statistics[cl_id];
3563         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3564         struct xstorm_per_client_stats *xclient =
3565                         &stats->xstorm_common.client_statistics[cl_id];
3566         struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3567         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3568         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3569         u32 diff;
3570
3571         /* are storm stats valid? */
3572         if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3573                                                         bp->stats_counter) {
3574                 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3575                    "  tstorm counter (%d) != stats_counter (%d)\n",
3576                    tclient->stats_counter, bp->stats_counter);
3577                 return -1;
3578         }
3579         if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3580                                                         bp->stats_counter) {
3581                 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3582                    "  xstorm counter (%d) != stats_counter (%d)\n",
3583                    xclient->stats_counter, bp->stats_counter);
3584                 return -2;
3585         }
3586
3587         fstats->total_bytes_received_hi =
3588         fstats->valid_bytes_received_hi =
3589                                 le32_to_cpu(tclient->total_rcv_bytes.hi);
3590         fstats->total_bytes_received_lo =
3591         fstats->valid_bytes_received_lo =
3592                                 le32_to_cpu(tclient->total_rcv_bytes.lo);
3593
3594         estats->error_bytes_received_hi =
3595                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
3596         estats->error_bytes_received_lo =
3597                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
3598         ADD_64(estats->error_bytes_received_hi,
3599                estats->rx_stat_ifhcinbadoctets_hi,
3600                estats->error_bytes_received_lo,
3601                estats->rx_stat_ifhcinbadoctets_lo);
3602
3603         ADD_64(fstats->total_bytes_received_hi,
3604                estats->error_bytes_received_hi,
3605                fstats->total_bytes_received_lo,
3606                estats->error_bytes_received_lo);
3607
3608         UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
3609         UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3610                                 total_multicast_packets_received);
3611         UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3612                                 total_broadcast_packets_received);
3613
3614         fstats->total_bytes_transmitted_hi =
3615                                 le32_to_cpu(xclient->total_sent_bytes.hi);
3616         fstats->total_bytes_transmitted_lo =
3617                                 le32_to_cpu(xclient->total_sent_bytes.lo);
3618
3619         UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3620                                 total_unicast_packets_transmitted);
3621         UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3622                                 total_multicast_packets_transmitted);
3623         UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3624                                 total_broadcast_packets_transmitted);
3625
3626         memcpy(estats, &(fstats->total_bytes_received_hi),
3627                sizeof(struct host_func_stats) - 2*sizeof(u32));
3628
3629         estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3630         estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3631         estats->brb_truncate_discard =
3632                                 le32_to_cpu(tport->brb_truncate_discard);
3633         estats->mac_discard = le32_to_cpu(tport->mac_discard);
3634
3635         old_tclient->rcv_unicast_bytes.hi =
3636                                 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
3637         old_tclient->rcv_unicast_bytes.lo =
3638                                 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
3639         old_tclient->rcv_broadcast_bytes.hi =
3640                                 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3641         old_tclient->rcv_broadcast_bytes.lo =
3642                                 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3643         old_tclient->rcv_multicast_bytes.hi =
3644                                 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
3645         old_tclient->rcv_multicast_bytes.lo =
3646                                 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
3647         old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
3648
3649         old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3650         old_tclient->packets_too_big_discard =
3651                                 le32_to_cpu(tclient->packets_too_big_discard);
3652         estats->no_buff_discard =
3653         old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3654         old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3655
3656         old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3657         old_xclient->unicast_bytes_sent.hi =
3658                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3659         old_xclient->unicast_bytes_sent.lo =
3660                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3661         old_xclient->multicast_bytes_sent.hi =
3662                                 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3663         old_xclient->multicast_bytes_sent.lo =
3664                                 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3665         old_xclient->broadcast_bytes_sent.hi =
3666                                 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3667         old_xclient->broadcast_bytes_sent.lo =
3668                                 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3669
3670         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3671
3672         return 0;
3673 }
3674
3675 static void bnx2x_net_stats_update(struct bnx2x *bp)
3676 {
3677         struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3678         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3679         struct net_device_stats *nstats = &bp->dev->stats;
3680
3681         nstats->rx_packets =
3682                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3683                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3684                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3685
3686         nstats->tx_packets =
3687                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3688                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3689                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3690
3691         nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
3692
3693         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3694
3695         nstats->rx_dropped = old_tclient->checksum_discard +
3696                              estats->mac_discard;
3697         nstats->tx_dropped = 0;
3698
3699         nstats->multicast =
3700                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3701
3702         nstats->collisions =
3703                         estats->tx_stat_dot3statssinglecollisionframes_lo +
3704                         estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3705                         estats->tx_stat_dot3statslatecollisions_lo +
3706                         estats->tx_stat_dot3statsexcessivecollisions_lo;
3707
3708         estats->jabber_packets_received =
3709                                 old_tclient->packets_too_big_discard +
3710                                 estats->rx_stat_dot3statsframestoolong_lo;
3711
3712         nstats->rx_length_errors =
3713                                 estats->rx_stat_etherstatsundersizepkts_lo +
3714                                 estats->jabber_packets_received;
3715         nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
3716         nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3717         nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3718         nstats->rx_fifo_errors = old_tclient->no_buff_discard;
3719         nstats->rx_missed_errors = estats->xxoverflow_discard;
3720
3721         nstats->rx_errors = nstats->rx_length_errors +
3722                             nstats->rx_over_errors +
3723                             nstats->rx_crc_errors +
3724                             nstats->rx_frame_errors +
3725                             nstats->rx_fifo_errors +
3726                             nstats->rx_missed_errors;
3727
3728         nstats->tx_aborted_errors =
3729                         estats->tx_stat_dot3statslatecollisions_lo +
3730                         estats->tx_stat_dot3statsexcessivecollisions_lo;
3731         nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
3732         nstats->tx_fifo_errors = 0;
3733         nstats->tx_heartbeat_errors = 0;
3734         nstats->tx_window_errors = 0;
3735
3736         nstats->tx_errors = nstats->tx_aborted_errors +
3737                             nstats->tx_carrier_errors;
3738 }
3739
3740 static void bnx2x_stats_update(struct bnx2x *bp)
3741 {
3742         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3743         int update = 0;
3744
3745         if (*stats_comp != DMAE_COMP_VAL)
3746                 return;
3747
3748         if (bp->port.pmf)
3749                 update = (bnx2x_hw_stats_update(bp) == 0);
3750
3751         update |= (bnx2x_storm_stats_update(bp) == 0);
3752
3753         if (update)
3754                 bnx2x_net_stats_update(bp);
3755
3756         else {
3757                 if (bp->stats_pending) {
3758                         bp->stats_pending++;
3759                         if (bp->stats_pending == 3) {
3760                                 BNX2X_ERR("stats not updated for 3 times\n");
3761                                 bnx2x_panic();
3762                                 return;
3763                         }
3764                 }
3765         }
3766
3767         if (bp->msglevel & NETIF_MSG_TIMER) {
3768                 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3769                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3770                 struct net_device_stats *nstats = &bp->dev->stats;
3771                 int i;
3772
3773                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3774                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
3775                                   "  tx pkt (%lx)\n",
3776                        bnx2x_tx_avail(bp->fp),
3777                        le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3778                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
3779                                   "  rx pkt (%lx)\n",
3780                        (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3781                              bp->fp->rx_comp_cons),
3782                        le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3783                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u\n",
3784                        netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
3785                        estats->driver_xoff, estats->brb_drop_lo);
3786                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
3787                         "packets_too_big_discard %u  no_buff_discard %u  "
3788                         "mac_discard %u  mac_filter_discard %u  "
3789                         "xxovrflow_discard %u  brb_truncate_discard %u  "
3790                         "ttl0_discard %u\n",
3791                        old_tclient->checksum_discard,
3792                        old_tclient->packets_too_big_discard,
3793                        old_tclient->no_buff_discard, estats->mac_discard,
3794                        estats->mac_filter_discard, estats->xxoverflow_discard,
3795                        estats->brb_truncate_discard,
3796                        old_tclient->ttl0_discard);
3797
3798                 for_each_queue(bp, i) {
3799                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3800                                bnx2x_fp(bp, i, tx_pkt),
3801                                bnx2x_fp(bp, i, rx_pkt),
3802                                bnx2x_fp(bp, i, rx_calls));
3803                 }
3804         }
3805
3806         bnx2x_hw_stats_post(bp);
3807         bnx2x_storm_stats_post(bp);
3808 }
3809
3810 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3811 {
3812         struct dmae_command *dmae;
3813         u32 opcode;
3814         int loader_idx = PMF_DMAE_C(bp);
3815         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3816
3817         bp->executer_idx = 0;
3818
3819         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3820                   DMAE_CMD_C_ENABLE |
3821                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3822 #ifdef __BIG_ENDIAN
3823                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3824 #else
3825                   DMAE_CMD_ENDIANITY_DW_SWAP |
3826 #endif
3827                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3828                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3829
3830         if (bp->port.port_stx) {
3831
3832                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3833                 if (bp->func_stx)
3834                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3835                 else
3836                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3837                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3838                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3839                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3840                 dmae->dst_addr_hi = 0;
3841                 dmae->len = sizeof(struct host_port_stats) >> 2;
3842                 if (bp->func_stx) {
3843                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3844                         dmae->comp_addr_hi = 0;
3845                         dmae->comp_val = 1;
3846                 } else {
3847                         dmae->comp_addr_lo =
3848                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3849                         dmae->comp_addr_hi =
3850                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3851                         dmae->comp_val = DMAE_COMP_VAL;
3852
3853                         *stats_comp = 0;
3854                 }
3855         }
3856
3857         if (bp->func_stx) {
3858
3859                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3860                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3861                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3862                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3863                 dmae->dst_addr_lo = bp->func_stx >> 2;
3864                 dmae->dst_addr_hi = 0;
3865                 dmae->len = sizeof(struct host_func_stats) >> 2;
3866                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3867                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3868                 dmae->comp_val = DMAE_COMP_VAL;
3869
3870                 *stats_comp = 0;
3871         }
3872 }
3873
3874 static void bnx2x_stats_stop(struct bnx2x *bp)
3875 {
3876         int update = 0;
3877
3878         bnx2x_stats_comp(bp);
3879
3880         if (bp->port.pmf)
3881                 update = (bnx2x_hw_stats_update(bp) == 0);
3882
3883         update |= (bnx2x_storm_stats_update(bp) == 0);
3884
3885         if (update) {
3886                 bnx2x_net_stats_update(bp);
3887
3888                 if (bp->port.pmf)
3889                         bnx2x_port_stats_stop(bp);
3890
3891                 bnx2x_hw_stats_post(bp);
3892                 bnx2x_stats_comp(bp);
3893         }
3894 }
3895
3896 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3897 {
3898 }
3899
3900 static const struct {
3901         void (*action)(struct bnx2x *bp);
3902         enum bnx2x_stats_state next_state;
3903 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3904 /* state        event   */
3905 {
3906 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3907 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
3908 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3909 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3910 },
3911 {
3912 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
3913 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
3914 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
3915 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
3916 }
3917 };
3918
3919 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3920 {
3921         enum bnx2x_stats_state state = bp->stats_state;
3922
3923         bnx2x_stats_stm[state][event].action(bp);
3924         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3925
3926         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3927                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3928                    state, event, bp->stats_state);
3929 }
3930
3931 static void bnx2x_timer(unsigned long data)
3932 {
3933         struct bnx2x *bp = (struct bnx2x *) data;
3934
3935         if (!netif_running(bp->dev))
3936                 return;
3937
3938         if (atomic_read(&bp->intr_sem) != 0)
3939                 goto timer_restart;
3940
3941         if (poll) {
3942                 struct bnx2x_fastpath *fp = &bp->fp[0];
3943                 int rc;
3944
3945                 bnx2x_tx_int(fp, 1000);
3946                 rc = bnx2x_rx_int(fp, 1000);
3947         }
3948
3949         if (!BP_NOMCP(bp)) {
3950                 int func = BP_FUNC(bp);
3951                 u32 drv_pulse;
3952                 u32 mcp_pulse;
3953
3954                 ++bp->fw_drv_pulse_wr_seq;
3955                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3956                 /* TBD - add SYSTEM_TIME */
3957                 drv_pulse = bp->fw_drv_pulse_wr_seq;
3958                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
3959
3960                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
3961                              MCP_PULSE_SEQ_MASK);
3962                 /* The delta between driver pulse and mcp response
3963                  * should be 1 (before mcp response) or 0 (after mcp response)
3964                  */
3965                 if ((drv_pulse != mcp_pulse) &&
3966                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3967                         /* someone lost a heartbeat... */
3968                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3969                                   drv_pulse, mcp_pulse);
3970                 }
3971         }
3972
3973         if ((bp->state == BNX2X_STATE_OPEN) ||
3974             (bp->state == BNX2X_STATE_DISABLED))
3975                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
3976
3977 timer_restart:
3978         mod_timer(&bp->timer, jiffies + bp->current_interval);
3979 }
3980
3981 /* end of Statistics */
3982
3983 /* nic init */
3984
3985 /*
3986  * nic init service functions
3987  */
3988
3989 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
3990 {
3991         int port = BP_PORT(bp);
3992
3993         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
3994                         USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3995                         sizeof(struct ustorm_status_block)/4);
3996         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
3997                         CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3998                         sizeof(struct cstorm_status_block)/4);
3999 }
4000
4001 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4002                           dma_addr_t mapping, int sb_id)
4003 {
4004         int port = BP_PORT(bp);
4005         int func = BP_FUNC(bp);
4006         int index;
4007         u64 section;
4008
4009         /* USTORM */
4010         section = ((u64)mapping) + offsetof(struct host_status_block,
4011                                             u_status_block);
4012         sb->u_status_block.status_block_id = sb_id;
4013
4014         REG_WR(bp, BAR_USTRORM_INTMEM +
4015                USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4016         REG_WR(bp, BAR_USTRORM_INTMEM +
4017                ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4018                U64_HI(section));
4019         REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4020                 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4021
4022         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4023                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4024                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4025
4026         /* CSTORM */
4027         section = ((u64)mapping) + offsetof(struct host_status_block,
4028                                             c_status_block);
4029         sb->c_status_block.status_block_id = sb_id;
4030
4031         REG_WR(bp, BAR_CSTRORM_INTMEM +
4032                CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4033         REG_WR(bp, BAR_CSTRORM_INTMEM +
4034                ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4035                U64_HI(section));
4036         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4037                 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4038
4039         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4040                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4041                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4042
4043         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4044 }
4045
4046 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4047 {
4048         int func = BP_FUNC(bp);
4049
4050         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4051                         USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4052                         sizeof(struct ustorm_def_status_block)/4);
4053         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4054                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4055                         sizeof(struct cstorm_def_status_block)/4);
4056         bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4057                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4058                         sizeof(struct xstorm_def_status_block)/4);
4059         bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4060                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4061                         sizeof(struct tstorm_def_status_block)/4);
4062 }
4063
4064 static void bnx2x_init_def_sb(struct bnx2x *bp,
4065                               struct host_def_status_block *def_sb,
4066                               dma_addr_t mapping, int sb_id)
4067 {
4068         int port = BP_PORT(bp);
4069         int func = BP_FUNC(bp);
4070         int index, val, reg_offset;
4071         u64 section;
4072
4073         /* ATTN */
4074         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4075                                             atten_status_block);
4076         def_sb->atten_status_block.status_block_id = sb_id;
4077
4078         bp->attn_state = 0;
4079
4080         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4081                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4082
4083         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4084                 bp->attn_group[index].sig[0] = REG_RD(bp,
4085                                                      reg_offset + 0x10*index);
4086                 bp->attn_group[index].sig[1] = REG_RD(bp,
4087                                                reg_offset + 0x4 + 0x10*index);
4088                 bp->attn_group[index].sig[2] = REG_RD(bp,
4089                                                reg_offset + 0x8 + 0x10*index);
4090                 bp->attn_group[index].sig[3] = REG_RD(bp,
4091                                                reg_offset + 0xc + 0x10*index);
4092         }
4093
4094         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4095                              HC_REG_ATTN_MSG0_ADDR_L);
4096
4097         REG_WR(bp, reg_offset, U64_LO(section));
4098         REG_WR(bp, reg_offset + 4, U64_HI(section));
4099
4100         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4101
4102         val = REG_RD(bp, reg_offset);
4103         val |= sb_id;
4104         REG_WR(bp, reg_offset, val);
4105
4106         /* USTORM */
4107         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4108                                             u_def_status_block);
4109         def_sb->u_def_status_block.status_block_id = sb_id;
4110
4111         REG_WR(bp, BAR_USTRORM_INTMEM +
4112                USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4113         REG_WR(bp, BAR_USTRORM_INTMEM +
4114                ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4115                U64_HI(section));
4116         REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4117                 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4118
4119         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4120                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4121                          USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4122
4123         /* CSTORM */
4124         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4125                                             c_def_status_block);
4126         def_sb->c_def_status_block.status_block_id = sb_id;
4127
4128         REG_WR(bp, BAR_CSTRORM_INTMEM +
4129                CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4130         REG_WR(bp, BAR_CSTRORM_INTMEM +
4131                ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4132                U64_HI(section));
4133         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4134                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4135
4136         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4137                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4138                          CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4139
4140         /* TSTORM */
4141         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4142                                             t_def_status_block);
4143         def_sb->t_def_status_block.status_block_id = sb_id;
4144
4145         REG_WR(bp, BAR_TSTRORM_INTMEM +
4146                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4147         REG_WR(bp, BAR_TSTRORM_INTMEM +
4148                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4149                U64_HI(section));
4150         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4151                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4152
4153         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4154                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4155                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4156
4157         /* XSTORM */
4158         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4159                                             x_def_status_block);
4160         def_sb->x_def_status_block.status_block_id = sb_id;
4161
4162         REG_WR(bp, BAR_XSTRORM_INTMEM +
4163                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4164         REG_WR(bp, BAR_XSTRORM_INTMEM +
4165                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4166                U64_HI(section));
4167         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4168                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4169
4170         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4171                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4172                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4173
4174         bp->stats_pending = 0;
4175         bp->set_mac_pending = 0;
4176
4177         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4178 }
4179
4180 static void bnx2x_update_coalesce(struct bnx2x *bp)
4181 {
4182         int port = BP_PORT(bp);
4183         int i;
4184
4185         for_each_queue(bp, i) {
4186                 int sb_id = bp->fp[i].sb_id;
4187
4188                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4189                 REG_WR8(bp, BAR_USTRORM_INTMEM +
4190                         USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4191                                                     U_SB_ETH_RX_CQ_INDEX),
4192                         bp->rx_ticks/12);
4193                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4194                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4195                                                      U_SB_ETH_RX_CQ_INDEX),
4196                          bp->rx_ticks ? 0 : 1);
4197                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4198                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4199                                                      U_SB_ETH_RX_BD_INDEX),
4200                          bp->rx_ticks ? 0 : 1);
4201
4202                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4203                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4204                         CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4205                                                     C_SB_ETH_TX_CQ_INDEX),
4206                         bp->tx_ticks/12);
4207                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4208                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4209                                                      C_SB_ETH_TX_CQ_INDEX),
4210                          bp->tx_ticks ? 0 : 1);
4211         }
4212 }
4213
4214 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4215                                        struct bnx2x_fastpath *fp, int last)
4216 {
4217         int i;
4218
4219         for (i = 0; i < last; i++) {
4220                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4221                 struct sk_buff *skb = rx_buf->skb;
4222
4223                 if (skb == NULL) {
4224                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4225                         continue;
4226                 }
4227
4228                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4229                         pci_unmap_single(bp->pdev,
4230                                          pci_unmap_addr(rx_buf, mapping),
4231                                          bp->rx_buf_use_size,
4232                                          PCI_DMA_FROMDEVICE);
4233
4234                 dev_kfree_skb(skb);
4235                 rx_buf->skb = NULL;
4236         }
4237 }
4238
4239 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4240 {
4241         int func = BP_FUNC(bp);
4242         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4243                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4244         u16 ring_prod, cqe_ring_prod;
4245         int i, j;
4246
4247         bp->rx_buf_use_size = bp->dev->mtu;
4248         bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
4249         bp->rx_buf_size = bp->rx_buf_use_size + 64;
4250
4251         if (bp->flags & TPA_ENABLE_FLAG) {
4252                 DP(NETIF_MSG_IFUP,
4253                    "rx_buf_use_size %d  rx_buf_size %d  effective_mtu %d\n",
4254                    bp->rx_buf_use_size, bp->rx_buf_size,
4255                    bp->dev->mtu + ETH_OVREHEAD);
4256
4257                 for_each_queue(bp, j) {
4258                         struct bnx2x_fastpath *fp = &bp->fp[j];
4259
4260                         for (i = 0; i < max_agg_queues; i++) {
4261                                 fp->tpa_pool[i].skb =
4262                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4263                                 if (!fp->tpa_pool[i].skb) {
4264                                         BNX2X_ERR("Failed to allocate TPA "
4265                                                   "skb pool for queue[%d] - "
4266                                                   "disabling TPA on this "
4267                                                   "queue!\n", j);
4268                                         bnx2x_free_tpa_pool(bp, fp, i);
4269                                         fp->disable_tpa = 1;
4270                                         break;
4271                                 }
4272                                 pci_unmap_addr_set((struct sw_rx_bd *)
4273                                                         &bp->fp->tpa_pool[i],
4274                                                    mapping, 0);
4275                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4276                         }
4277                 }
4278         }
4279
4280         for_each_queue(bp, j) {
4281                 struct bnx2x_fastpath *fp = &bp->fp[j];
4282
4283                 fp->rx_bd_cons = 0;
4284                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4285                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4286
4287                 /* "next page" elements initialization */
4288                 /* SGE ring */
4289                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4290                         struct eth_rx_sge *sge;
4291
4292                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4293                         sge->addr_hi =
4294                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4295                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4296                         sge->addr_lo =
4297                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4298                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4299                 }
4300
4301                 bnx2x_init_sge_ring_bit_mask(fp);
4302
4303                 /* RX BD ring */
4304                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4305                         struct eth_rx_bd *rx_bd;
4306
4307                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4308                         rx_bd->addr_hi =
4309                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4310                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4311                         rx_bd->addr_lo =
4312                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4313                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4314                 }
4315
4316                 /* CQ ring */
4317                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4318                         struct eth_rx_cqe_next_page *nextpg;
4319
4320                         nextpg = (struct eth_rx_cqe_next_page *)
4321                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4322                         nextpg->addr_hi =
4323                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4324                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4325                         nextpg->addr_lo =
4326                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4327                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4328                 }
4329
4330                 /* Allocate SGEs and initialize the ring elements */
4331                 for (i = 0, ring_prod = 0;
4332                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4333
4334                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4335                                 BNX2X_ERR("was only able to allocate "
4336                                           "%d rx sges\n", i);
4337                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4338                                 /* Cleanup already allocated elements */
4339                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4340                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4341                                 fp->disable_tpa = 1;
4342                                 ring_prod = 0;
4343                                 break;
4344                         }
4345                         ring_prod = NEXT_SGE_IDX(ring_prod);
4346                 }
4347                 fp->rx_sge_prod = ring_prod;
4348
4349                 /* Allocate BDs and initialize BD ring */
4350                 fp->rx_comp_cons = 0;
4351                 cqe_ring_prod = ring_prod = 0;
4352                 for (i = 0; i < bp->rx_ring_size; i++) {
4353                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4354                                 BNX2X_ERR("was only able to allocate "
4355                                           "%d rx skbs\n", i);
4356                                 bp->eth_stats.rx_skb_alloc_failed++;
4357                                 break;
4358                         }
4359                         ring_prod = NEXT_RX_IDX(ring_prod);
4360                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4361                         WARN_ON(ring_prod <= i);
4362                 }
4363
4364                 fp->rx_bd_prod = ring_prod;
4365                 /* must not have more available CQEs than BDs */
4366                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4367                                        cqe_ring_prod);
4368                 fp->rx_pkt = fp->rx_calls = 0;
4369
4370                 /* Warning!
4371                  * this will generate an interrupt (to the TSTORM)
4372                  * must only be done after chip is initialized
4373                  */
4374                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4375                                      fp->rx_sge_prod);
4376                 if (j != 0)
4377                         continue;
4378
4379                 REG_WR(bp, BAR_USTRORM_INTMEM +
4380                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4381                        U64_LO(fp->rx_comp_mapping));
4382                 REG_WR(bp, BAR_USTRORM_INTMEM +
4383                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4384                        U64_HI(fp->rx_comp_mapping));
4385         }
4386 }
4387
4388 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4389 {
4390         int i, j;
4391
4392         for_each_queue(bp, j) {
4393                 struct bnx2x_fastpath *fp = &bp->fp[j];
4394
4395                 for (i = 1; i <= NUM_TX_RINGS; i++) {
4396                         struct eth_tx_bd *tx_bd =
4397                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4398
4399                         tx_bd->addr_hi =
4400                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4401                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4402                         tx_bd->addr_lo =
4403                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4404                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4405                 }
4406
4407                 fp->tx_pkt_prod = 0;
4408                 fp->tx_pkt_cons = 0;
4409                 fp->tx_bd_prod = 0;
4410                 fp->tx_bd_cons = 0;
4411                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4412                 fp->tx_pkt = 0;
4413         }
4414 }
4415
4416 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4417 {
4418         int func = BP_FUNC(bp);
4419
4420         spin_lock_init(&bp->spq_lock);
4421
4422         bp->spq_left = MAX_SPQ_PENDING;
4423         bp->spq_prod_idx = 0;
4424         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4425         bp->spq_prod_bd = bp->spq;
4426         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4427
4428         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4429                U64_LO(bp->spq_mapping));
4430         REG_WR(bp,
4431                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4432                U64_HI(bp->spq_mapping));
4433
4434         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4435                bp->spq_prod_idx);
4436 }
4437
4438 static void bnx2x_init_context(struct bnx2x *bp)
4439 {
4440         int i;
4441
4442         for_each_queue(bp, i) {
4443                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4444                 struct bnx2x_fastpath *fp = &bp->fp[i];
4445                 u8 sb_id = FP_SB_ID(fp);
4446
4447                 context->xstorm_st_context.tx_bd_page_base_hi =
4448                                                 U64_HI(fp->tx_desc_mapping);
4449                 context->xstorm_st_context.tx_bd_page_base_lo =
4450                                                 U64_LO(fp->tx_desc_mapping);
4451                 context->xstorm_st_context.db_data_addr_hi =
4452                                                 U64_HI(fp->tx_prods_mapping);
4453                 context->xstorm_st_context.db_data_addr_lo =
4454                                                 U64_LO(fp->tx_prods_mapping);
4455                 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4456                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4457
4458                 context->ustorm_st_context.common.sb_index_numbers =
4459                                                 BNX2X_RX_SB_INDEX_NUM;
4460                 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4461                 context->ustorm_st_context.common.status_block_id = sb_id;
4462                 context->ustorm_st_context.common.flags =
4463                         USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
4464                 context->ustorm_st_context.common.mc_alignment_size = 64;
4465                 context->ustorm_st_context.common.bd_buff_size =
4466                                                 bp->rx_buf_use_size;
4467                 context->ustorm_st_context.common.bd_page_base_hi =
4468                                                 U64_HI(fp->rx_desc_mapping);
4469                 context->ustorm_st_context.common.bd_page_base_lo =
4470                                                 U64_LO(fp->rx_desc_mapping);
4471                 if (!fp->disable_tpa) {
4472                         context->ustorm_st_context.common.flags |=
4473                                 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4474                                  USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4475                         context->ustorm_st_context.common.sge_buff_size =
4476                                         (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4477                         context->ustorm_st_context.common.sge_page_base_hi =
4478                                                 U64_HI(fp->rx_sge_mapping);
4479                         context->ustorm_st_context.common.sge_page_base_lo =
4480                                                 U64_LO(fp->rx_sge_mapping);
4481                 }
4482
4483                 context->cstorm_st_context.sb_index_number =
4484                                                 C_SB_ETH_TX_CQ_INDEX;
4485                 context->cstorm_st_context.status_block_id = sb_id;
4486
4487                 context->xstorm_ag_context.cdu_reserved =
4488                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4489                                                CDU_REGION_NUMBER_XCM_AG,
4490                                                ETH_CONNECTION_TYPE);
4491                 context->ustorm_ag_context.cdu_usage =
4492                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4493                                                CDU_REGION_NUMBER_UCM_AG,
4494                                                ETH_CONNECTION_TYPE);
4495         }
4496 }
4497
4498 static void bnx2x_init_ind_table(struct bnx2x *bp)
4499 {
4500         int port = BP_PORT(bp);
4501         int i;
4502
4503         if (!is_multi(bp))
4504                 return;
4505
4506         DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
4507         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4508                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4509                         TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
4510                         i % bp->num_queues);
4511
4512         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4513 }
4514
4515 static void bnx2x_set_client_config(struct bnx2x *bp)
4516 {
4517         struct tstorm_eth_client_config tstorm_client = {0};
4518         int port = BP_PORT(bp);
4519         int i;
4520
4521         tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
4522         tstorm_client.statistics_counter_id = BP_CL_ID(bp);
4523         tstorm_client.config_flags =
4524                                 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4525 #ifdef BCM_VLAN
4526         if (bp->rx_mode && bp->vlgrp) {
4527                 tstorm_client.config_flags |=
4528                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4529                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4530         }
4531 #endif
4532
4533         if (bp->flags & TPA_ENABLE_FLAG) {
4534                 tstorm_client.max_sges_for_packet =
4535                         BCM_PAGE_ALIGN(tstorm_client.mtu) >> BCM_PAGE_SHIFT;
4536                 tstorm_client.max_sges_for_packet =
4537                         ((tstorm_client.max_sges_for_packet +
4538                           PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4539                         PAGES_PER_SGE_SHIFT;
4540
4541                 tstorm_client.config_flags |=
4542                                 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4543         }
4544
4545         for_each_queue(bp, i) {
4546                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4547                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4548                        ((u32 *)&tstorm_client)[0]);
4549                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4550                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4551                        ((u32 *)&tstorm_client)[1]);
4552         }
4553
4554         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4555            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4556 }
4557
4558 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4559 {
4560         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4561         int mode = bp->rx_mode;
4562         int mask = (1 << BP_L_ID(bp));
4563         int func = BP_FUNC(bp);
4564         int i;
4565
4566         DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode);
4567
4568         switch (mode) {
4569         case BNX2X_RX_MODE_NONE: /* no Rx */
4570                 tstorm_mac_filter.ucast_drop_all = mask;
4571                 tstorm_mac_filter.mcast_drop_all = mask;
4572                 tstorm_mac_filter.bcast_drop_all = mask;
4573                 break;
4574         case BNX2X_RX_MODE_NORMAL:
4575                 tstorm_mac_filter.bcast_accept_all = mask;
4576                 break;
4577         case BNX2X_RX_MODE_ALLMULTI:
4578                 tstorm_mac_filter.mcast_accept_all = mask;
4579                 tstorm_mac_filter.bcast_accept_all = mask;
4580                 break;
4581         case BNX2X_RX_MODE_PROMISC:
4582                 tstorm_mac_filter.ucast_accept_all = mask;
4583                 tstorm_mac_filter.mcast_accept_all = mask;
4584                 tstorm_mac_filter.bcast_accept_all = mask;
4585                 break;
4586         default:
4587                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4588                 break;
4589         }
4590
4591         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4592                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4593                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4594                        ((u32 *)&tstorm_mac_filter)[i]);
4595
4596 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4597                    ((u32 *)&tstorm_mac_filter)[i]); */
4598         }
4599
4600         if (mode != BNX2X_RX_MODE_NONE)
4601                 bnx2x_set_client_config(bp);
4602 }
4603
4604 static void bnx2x_init_internal_common(struct bnx2x *bp)
4605 {
4606         int i;
4607
4608         /* Zero this manually as its initialization is
4609            currently missing in the initTool */
4610         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4611                 REG_WR(bp, BAR_USTRORM_INTMEM +
4612                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4613 }
4614
4615 static void bnx2x_init_internal_port(struct bnx2x *bp)
4616 {
4617         int port = BP_PORT(bp);
4618
4619         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4620         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4621         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4622         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4623 }
4624
4625 static void bnx2x_init_internal_func(struct bnx2x *bp)
4626 {
4627         struct tstorm_eth_function_common_config tstorm_config = {0};
4628         struct stats_indication_flags stats_flags = {0};
4629         int port = BP_PORT(bp);
4630         int func = BP_FUNC(bp);
4631         int i;
4632         u16 max_agg_size;
4633
4634         if (is_multi(bp)) {
4635                 tstorm_config.config_flags = MULTI_FLAGS;
4636                 tstorm_config.rss_result_mask = MULTI_MASK;
4637         }
4638
4639         tstorm_config.leading_client_id = BP_L_ID(bp);
4640
4641         REG_WR(bp, BAR_TSTRORM_INTMEM +
4642                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4643                (*(u32 *)&tstorm_config));
4644
4645         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4646         bnx2x_set_storm_rx_mode(bp);
4647
4648         /* reset xstorm per client statistics */
4649         for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4650                 REG_WR(bp, BAR_XSTRORM_INTMEM +
4651                        XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4652                        i*4, 0);
4653         }
4654         /* reset tstorm per client statistics */
4655         for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4656                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4657                        TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4658                        i*4, 0);
4659         }
4660
4661         /* Init statistics related context */
4662         stats_flags.collect_eth = 1;
4663
4664         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4665                ((u32 *)&stats_flags)[0]);
4666         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4667                ((u32 *)&stats_flags)[1]);
4668
4669         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4670                ((u32 *)&stats_flags)[0]);
4671         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4672                ((u32 *)&stats_flags)[1]);
4673
4674         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4675                ((u32 *)&stats_flags)[0]);
4676         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4677                ((u32 *)&stats_flags)[1]);
4678
4679         REG_WR(bp, BAR_XSTRORM_INTMEM +
4680                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4681                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4682         REG_WR(bp, BAR_XSTRORM_INTMEM +
4683                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4684                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4685
4686         REG_WR(bp, BAR_TSTRORM_INTMEM +
4687                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4688                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4689         REG_WR(bp, BAR_TSTRORM_INTMEM +
4690                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4691                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4692
4693         if (CHIP_IS_E1H(bp)) {
4694                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4695                         IS_E1HMF(bp));
4696                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4697                         IS_E1HMF(bp));
4698                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4699                         IS_E1HMF(bp));
4700                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4701                         IS_E1HMF(bp));
4702
4703                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4704                          bp->e1hov);
4705         }
4706
4707         /* Init CQ ring mapping and aggregation size */
4708         max_agg_size = min((u32)(bp->rx_buf_use_size +
4709                                  8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4710                            (u32)0xffff);
4711         for_each_queue(bp, i) {
4712                 struct bnx2x_fastpath *fp = &bp->fp[i];
4713
4714                 REG_WR(bp, BAR_USTRORM_INTMEM +
4715                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4716                        U64_LO(fp->rx_comp_mapping));
4717                 REG_WR(bp, BAR_USTRORM_INTMEM +
4718                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4719                        U64_HI(fp->rx_comp_mapping));
4720
4721                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4722                          USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4723                          max_agg_size);
4724         }
4725 }
4726
4727 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4728 {
4729         switch (load_code) {
4730         case FW_MSG_CODE_DRV_LOAD_COMMON:
4731                 bnx2x_init_internal_common(bp);
4732                 /* no break */
4733
4734         case FW_MSG_CODE_DRV_LOAD_PORT:
4735                 bnx2x_init_internal_port(bp);
4736                 /* no break */
4737
4738         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4739                 bnx2x_init_internal_func(bp);
4740                 break;
4741
4742         default:
4743                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4744                 break;
4745         }
4746 }
4747
4748 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4749 {
4750         int i;
4751
4752         for_each_queue(bp, i) {
4753                 struct bnx2x_fastpath *fp = &bp->fp[i];
4754
4755                 fp->bp = bp;
4756                 fp->state = BNX2X_FP_STATE_CLOSED;
4757                 fp->index = i;
4758                 fp->cl_id = BP_L_ID(bp) + i;
4759                 fp->sb_id = fp->cl_id;
4760                 DP(NETIF_MSG_IFUP,
4761                    "bnx2x_init_sb(%p,%p) index %d  cl_id %d  sb %d\n",
4762                    bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4763                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4764                               FP_SB_ID(fp));
4765                 bnx2x_update_fpsb_idx(fp);
4766         }
4767
4768         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4769                           DEF_SB_ID);
4770         bnx2x_update_dsb_idx(bp);
4771         bnx2x_update_coalesce(bp);
4772         bnx2x_init_rx_rings(bp);
4773         bnx2x_init_tx_ring(bp);
4774         bnx2x_init_sp_ring(bp);
4775         bnx2x_init_context(bp);
4776         bnx2x_init_internal(bp, load_code);
4777         bnx2x_init_ind_table(bp);
4778         bnx2x_int_enable(bp);
4779 }
4780
4781 /* end of nic init */
4782
4783 /*
4784  * gzip service functions
4785  */
4786
4787 static int bnx2x_gunzip_init(struct bnx2x *bp)
4788 {
4789         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4790                                               &bp->gunzip_mapping);
4791         if (bp->gunzip_buf  == NULL)
4792                 goto gunzip_nomem1;
4793
4794         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4795         if (bp->strm  == NULL)
4796                 goto gunzip_nomem2;
4797
4798         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4799                                       GFP_KERNEL);
4800         if (bp->strm->workspace == NULL)
4801                 goto gunzip_nomem3;
4802
4803         return 0;
4804
4805 gunzip_nomem3:
4806         kfree(bp->strm);
4807         bp->strm = NULL;
4808
4809 gunzip_nomem2:
4810         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4811                             bp->gunzip_mapping);
4812         bp->gunzip_buf = NULL;
4813
4814 gunzip_nomem1:
4815         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4816                " un-compression\n", bp->dev->name);
4817         return -ENOMEM;
4818 }
4819
4820 static void bnx2x_gunzip_end(struct bnx2x *bp)
4821 {
4822         kfree(bp->strm->workspace);
4823
4824         kfree(bp->strm);
4825         bp->strm = NULL;
4826
4827         if (bp->gunzip_buf) {
4828                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4829                                     bp->gunzip_mapping);
4830                 bp->gunzip_buf = NULL;
4831         }
4832 }
4833
4834 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4835 {
4836         int n, rc;
4837
4838         /* check gzip header */
4839         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4840                 return -EINVAL;
4841
4842         n = 10;
4843
4844 #define FNAME                           0x8
4845
4846         if (zbuf[3] & FNAME)
4847                 while ((zbuf[n++] != 0) && (n < len));
4848
4849         bp->strm->next_in = zbuf + n;
4850         bp->strm->avail_in = len - n;
4851         bp->strm->next_out = bp->gunzip_buf;
4852         bp->strm->avail_out = FW_BUF_SIZE;
4853
4854         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4855         if (rc != Z_OK)
4856                 return rc;
4857
4858         rc = zlib_inflate(bp->strm, Z_FINISH);
4859         if ((rc != Z_OK) && (rc != Z_STREAM_END))
4860                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4861                        bp->dev->name, bp->strm->msg);
4862
4863         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4864         if (bp->gunzip_outlen & 0x3)
4865                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4866                                     " gunzip_outlen (%d) not aligned\n",
4867                        bp->dev->name, bp->gunzip_outlen);
4868         bp->gunzip_outlen >>= 2;
4869
4870         zlib_inflateEnd(bp->strm);
4871
4872         if (rc == Z_STREAM_END)
4873                 return 0;
4874
4875         return rc;
4876 }
4877
4878 /* nic load/unload */
4879
4880 /*
4881  * General service functions
4882  */
4883
4884 /* send a NIG loopback debug packet */
4885 static void bnx2x_lb_pckt(struct bnx2x *bp)
4886 {
4887         u32 wb_write[3];
4888
4889         /* Ethernet source and destination addresses */
4890         wb_write[0] = 0x55555555;
4891         wb_write[1] = 0x55555555;
4892         wb_write[2] = 0x20;             /* SOP */
4893         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4894
4895         /* NON-IP protocol */
4896         wb_write[0] = 0x09000000;
4897         wb_write[1] = 0x55555555;
4898         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
4899         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
4900 }
4901
4902 /* some of the internal memories
4903  * are not directly readable from the driver
4904  * to test them we send debug packets
4905  */
4906 static int bnx2x_int_mem_test(struct bnx2x *bp)
4907 {
4908         int factor;
4909         int count, i;
4910         u32 val = 0;
4911
4912         if (CHIP_REV_IS_FPGA(bp))
4913                 factor = 120;
4914         else if (CHIP_REV_IS_EMUL(bp))
4915                 factor = 200;
4916         else
4917                 factor = 1;
4918
4919         DP(NETIF_MSG_HW, "start part1\n");
4920
4921         /* Disable inputs of parser neighbor blocks */
4922         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4923         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4924         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4925         NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4926
4927         /*  Write 0 to parser credits for CFC search request */
4928         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4929
4930         /* send Ethernet packet */
4931         bnx2x_lb_pckt(bp);
4932
4933         /* TODO do i reset NIG statistic? */
4934         /* Wait until NIG register shows 1 packet of size 0x10 */
4935         count = 1000 * factor;
4936         while (count) {
4937
4938                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4939                 val = *bnx2x_sp(bp, wb_data[0]);
4940                 if (val == 0x10)
4941                         break;
4942
4943                 msleep(10);
4944                 count--;
4945         }
4946         if (val != 0x10) {
4947                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
4948                 return -1;
4949         }
4950
4951         /* Wait until PRS register shows 1 packet */
4952         count = 1000 * factor;
4953         while (count) {
4954                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4955                 if (val == 1)
4956                         break;
4957
4958                 msleep(10);
4959                 count--;
4960         }
4961         if (val != 0x1) {
4962                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4963                 return -2;
4964         }
4965
4966         /* Reset and init BRB, PRS */
4967         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4968         msleep(50);
4969         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4970         msleep(50);
4971         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4972         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4973
4974         DP(NETIF_MSG_HW, "part2\n");
4975
4976         /* Disable inputs of parser neighbor blocks */
4977         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4978         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4979         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4980         NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4981
4982         /* Write 0 to parser credits for CFC search request */
4983         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4984
4985         /* send 10 Ethernet packets */
4986         for (i = 0; i < 10; i++)
4987                 bnx2x_lb_pckt(bp);
4988
4989         /* Wait until NIG register shows 10 + 1
4990            packets of size 11*0x10 = 0xb0 */
4991         count = 1000 * factor;
4992         while (count) {
4993
4994                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4995                 val = *bnx2x_sp(bp, wb_data[0]);
4996                 if (val == 0xb0)
4997                         break;
4998
4999                 msleep(10);
5000                 count--;
5001         }
5002         if (val != 0xb0) {
5003                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5004                 return -3;
5005         }
5006
5007         /* Wait until PRS register shows 2 packets */
5008         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5009         if (val != 2)
5010                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5011
5012         /* Write 1 to parser credits for CFC search request */
5013         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5014
5015         /* Wait until PRS register shows 3 packets */
5016         msleep(10 * factor);
5017         /* Wait until NIG register shows 1 packet of size 0x10 */
5018         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5019         if (val != 3)
5020                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5021
5022         /* clear NIG EOP FIFO */
5023         for (i = 0; i < 11; i++)
5024                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5025         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5026         if (val != 1) {
5027                 BNX2X_ERR("clear of NIG failed\n");
5028                 return -4;
5029         }
5030
5031         /* Reset and init BRB, PRS, NIG */
5032         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5033         msleep(50);
5034         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5035         msleep(50);
5036         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5037         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5038 #ifndef BCM_ISCSI
5039         /* set NIC mode */
5040         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5041 #endif
5042
5043         /* Enable inputs of parser neighbor blocks */
5044         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5045         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5046         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5047         NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1);
5048
5049         DP(NETIF_MSG_HW, "done\n");
5050
5051         return 0; /* OK */
5052 }
5053
5054 static void enable_blocks_attention(struct bnx2x *bp)
5055 {
5056         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5057         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5058         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5059         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5060         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5061         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5062         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5063         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5064         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5065 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5066 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5067         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5068         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5069         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5070 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5071 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5072         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5073         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5074         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5075         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5076 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5077 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5078         if (CHIP_REV_IS_FPGA(bp))
5079                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5080         else
5081                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5082         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5083         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5084         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5085 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5086 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5087         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5088         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5089 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5090         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5091 }
5092
5093
5094 static int bnx2x_init_common(struct bnx2x *bp)
5095 {
5096         u32 val, i;
5097
5098         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5099
5100         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5101         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5102
5103         bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5104         if (CHIP_IS_E1H(bp))
5105                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5106
5107         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5108         msleep(30);
5109         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5110
5111         bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5112         if (CHIP_IS_E1(bp)) {
5113                 /* enable HW interrupt from PXP on USDM overflow
5114                    bit 16 on INT_MASK_0 */
5115                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5116         }
5117
5118         bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5119         bnx2x_init_pxp(bp);
5120
5121 #ifdef __BIG_ENDIAN
5122         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5123         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5124         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5125         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5126         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5127         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5128
5129 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5130         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5131         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5132         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5133         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5134 #endif
5135
5136 #ifndef BCM_ISCSI
5137                 /* set NIC mode */
5138                 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5139 #endif
5140
5141         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5142 #ifdef BCM_ISCSI
5143         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5144         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5145         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5146 #endif
5147
5148         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5149                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5150
5151         /* let the HW do it's magic ... */
5152         msleep(100);
5153         /* finish PXP init */
5154         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5155         if (val != 1) {
5156                 BNX2X_ERR("PXP2 CFG failed\n");
5157                 return -EBUSY;
5158         }
5159         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5160         if (val != 1) {
5161                 BNX2X_ERR("PXP2 RD_INIT failed\n");
5162                 return -EBUSY;
5163         }
5164
5165         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5166         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5167
5168         bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5169
5170         /* clean the DMAE memory */
5171         bp->dmae_ready = 1;
5172         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5173
5174         bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5175         bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5176         bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5177         bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5178
5179         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5180         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5181         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5182         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5183
5184         bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5185         /* soft reset pulse */
5186         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5187         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5188
5189 #ifdef BCM_ISCSI
5190         bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5191 #endif
5192
5193         bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5194         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5195         if (!CHIP_REV_IS_SLOW(bp)) {
5196                 /* enable hw interrupt from doorbell Q */
5197                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5198         }
5199
5200         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5201         if (CHIP_REV_IS_SLOW(bp)) {
5202                 /* fix for emulation and FPGA for no pause */
5203                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5204                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5205                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5206                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5207         }
5208
5209         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5210         if (CHIP_IS_E1H(bp))
5211                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5212
5213         bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5214         bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5215         bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5216         bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5217
5218         if (CHIP_IS_E1H(bp)) {
5219                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5220                                 STORM_INTMEM_SIZE_E1H/2);
5221                 bnx2x_init_fill(bp,
5222                                 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5223                                 0, STORM_INTMEM_SIZE_E1H/2);
5224                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5225                                 STORM_INTMEM_SIZE_E1H/2);
5226                 bnx2x_init_fill(bp,
5227                                 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5228                                 0, STORM_INTMEM_SIZE_E1H/2);
5229                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5230                                 STORM_INTMEM_SIZE_E1H/2);
5231                 bnx2x_init_fill(bp,
5232                                 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5233                                 0, STORM_INTMEM_SIZE_E1H/2);
5234                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5235                                 STORM_INTMEM_SIZE_E1H/2);
5236                 bnx2x_init_fill(bp,
5237                                 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5238                                 0, STORM_INTMEM_SIZE_E1H/2);
5239         } else { /* E1 */
5240                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5241                                 STORM_INTMEM_SIZE_E1);
5242                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5243                                 STORM_INTMEM_SIZE_E1);
5244                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5245                                 STORM_INTMEM_SIZE_E1);
5246                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5247                                 STORM_INTMEM_SIZE_E1);
5248         }
5249
5250         bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5251         bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5252         bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5253         bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5254
5255         /* sync semi rtc */
5256         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5257                0x80000000);
5258         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5259                0x80000000);
5260
5261         bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5262         bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5263         bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5264
5265         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5266         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5267                 REG_WR(bp, i, 0xc0cac01a);
5268                 /* TODO: replace with something meaningful */
5269         }
5270         if (CHIP_IS_E1H(bp))
5271                 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5272         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5273
5274         if (sizeof(union cdu_context) != 1024)
5275                 /* we currently assume that a context is 1024 bytes */
5276                 printk(KERN_ALERT PFX "please adjust the size of"
5277                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5278
5279         bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5280         val = (4 << 24) + (0 << 12) + 1024;
5281         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5282         if (CHIP_IS_E1(bp)) {
5283                 /* !!! fix pxp client crdit until excel update */
5284                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5285                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5286         }
5287
5288         bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5289         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5290
5291         bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5292         bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5293
5294         /* PXPCS COMMON comes here */
5295         /* Reset PCIE errors for debug */
5296         REG_WR(bp, 0x2814, 0xffffffff);
5297         REG_WR(bp, 0x3820, 0xffffffff);
5298
5299         /* EMAC0 COMMON comes here */
5300         /* EMAC1 COMMON comes here */
5301         /* DBU COMMON comes here */
5302         /* DBG COMMON comes here */
5303
5304         bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5305         if (CHIP_IS_E1H(bp)) {
5306                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5307                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5308         }
5309
5310         if (CHIP_REV_IS_SLOW(bp))
5311                 msleep(200);
5312
5313         /* finish CFC init */
5314         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5315         if (val != 1) {
5316                 BNX2X_ERR("CFC LL_INIT failed\n");
5317                 return -EBUSY;
5318         }
5319         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5320         if (val != 1) {
5321                 BNX2X_ERR("CFC AC_INIT failed\n");
5322                 return -EBUSY;
5323         }
5324         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5325         if (val != 1) {
5326                 BNX2X_ERR("CFC CAM_INIT failed\n");
5327                 return -EBUSY;
5328         }
5329         REG_WR(bp, CFC_REG_DEBUG0, 0);
5330
5331         /* read NIG statistic
5332            to see if this is our first up since powerup */
5333         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5334         val = *bnx2x_sp(bp, wb_data[0]);
5335
5336         /* do internal memory self test */
5337         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5338                 BNX2X_ERR("internal mem self test failed\n");
5339                 return -EBUSY;
5340         }
5341
5342         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5343         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5344                 /* Fan failure is indicated by SPIO 5 */
5345                 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5346                                MISC_REGISTERS_SPIO_INPUT_HI_Z);
5347
5348                 /* set to active low mode */
5349                 val = REG_RD(bp, MISC_REG_SPIO_INT);
5350                 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5351                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5352                 REG_WR(bp, MISC_REG_SPIO_INT, val);
5353
5354                 /* enable interrupt to signal the IGU */
5355                 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5356                 val |= (1 << MISC_REGISTERS_SPIO_5);
5357                 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5358                 break;
5359
5360         default:
5361                 break;
5362         }
5363
5364         /* clear PXP2 attentions */
5365         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5366
5367         enable_blocks_attention(bp);
5368
5369         if (bp->flags & TPA_ENABLE_FLAG) {
5370                 struct tstorm_eth_tpa_exist tmp = {0};
5371
5372                 tmp.tpa_exist = 1;
5373
5374                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
5375                        ((u32 *)&tmp)[0]);
5376                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
5377                        ((u32 *)&tmp)[1]);
5378         }
5379
5380         if (!BP_NOMCP(bp)) {
5381                 bnx2x_acquire_phy_lock(bp);
5382                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5383                 bnx2x_release_phy_lock(bp);
5384         } else
5385                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5386
5387         return 0;
5388 }
5389
5390 static int bnx2x_init_port(struct bnx2x *bp)
5391 {
5392         int port = BP_PORT(bp);
5393         u32 val;
5394
5395         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
5396
5397         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5398
5399         /* Port PXP comes here */
5400         /* Port PXP2 comes here */
5401 #ifdef BCM_ISCSI
5402         /* Port0  1
5403          * Port1  385 */
5404         i++;
5405         wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5406         wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5407         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5408         REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5409
5410         /* Port0  2
5411          * Port1  386 */
5412         i++;
5413         wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5414         wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5415         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5416         REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5417
5418         /* Port0  3
5419          * Port1  387 */
5420         i++;
5421         wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5422         wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5423         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5424         REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5425 #endif
5426         /* Port CMs come here */
5427
5428         /* Port QM comes here */
5429 #ifdef BCM_ISCSI
5430         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5431         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5432
5433         bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5434                              func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5435 #endif
5436         /* Port DQ comes here */
5437         /* Port BRB1 comes here */
5438         /* Port PRS comes here */
5439         /* Port TSDM comes here */
5440         /* Port CSDM comes here */
5441         /* Port USDM comes here */
5442         /* Port XSDM comes here */
5443         bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5444                              port ? TSEM_PORT1_END : TSEM_PORT0_END);
5445         bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5446                              port ? USEM_PORT1_END : USEM_PORT0_END);
5447         bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5448                              port ? CSEM_PORT1_END : CSEM_PORT0_END);
5449         bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5450                              port ? XSEM_PORT1_END : XSEM_PORT0_END);
5451         /* Port UPB comes here */
5452         /* Port XPB comes here */
5453
5454         bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5455                              port ? PBF_PORT1_END : PBF_PORT0_END);
5456
5457         /* configure PBF to work without PAUSE mtu 9000 */
5458         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5459
5460         /* update threshold */
5461         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5462         /* update init credit */
5463         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5464
5465         /* probe changes */
5466         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5467         msleep(5);
5468         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5469
5470 #ifdef BCM_ISCSI
5471         /* tell the searcher where the T2 table is */
5472         REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5473
5474         wb_write[0] = U64_LO(bp->t2_mapping);
5475         wb_write[1] = U64_HI(bp->t2_mapping);
5476         REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5477         wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5478         wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5479         REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5480
5481         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5482         /* Port SRCH comes here */
5483 #endif
5484         /* Port CDU comes here */
5485         /* Port CFC comes here */
5486
5487         if (CHIP_IS_E1(bp)) {
5488                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5489                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5490         }
5491         bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5492                              port ? HC_PORT1_END : HC_PORT0_END);
5493
5494         bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5495                                     MISC_AEU_PORT0_START,
5496                              port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5497         /* init aeu_mask_attn_func_0/1:
5498          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5499          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5500          *             bits 4-7 are used for "per vn group attention" */
5501         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5502                (IS_E1HMF(bp) ? 0xF7 : 0x7));
5503
5504         /* Port PXPCS comes here */
5505         /* Port EMAC0 comes here */
5506         /* Port EMAC1 comes here */
5507         /* Port DBU comes here */
5508         /* Port DBG comes here */
5509         bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5510                              port ? NIG_PORT1_END : NIG_PORT0_END);
5511
5512         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5513
5514         if (CHIP_IS_E1H(bp)) {
5515                 u32 wsum;
5516                 struct cmng_struct_per_port m_cmng_port;
5517                 int vn;
5518
5519                 /* 0x2 disable e1hov, 0x1 enable */
5520                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5521                        (IS_E1HMF(bp) ? 0x1 : 0x2));
5522
5523                 /* Init RATE SHAPING and FAIRNESS contexts.
5524                    Initialize as if there is 10G link. */
5525                 wsum = bnx2x_calc_vn_wsum(bp);
5526                 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5527                 if (IS_E1HMF(bp))
5528                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
5529                                 bnx2x_init_vn_minmax(bp, 2*vn + port,
5530                                         wsum, 10000, &m_cmng_port);
5531         }
5532
5533         /* Port MCP comes here */
5534         /* Port DMAE comes here */
5535
5536         switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
5537         case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5538                 /* add SPIO 5 to group 0 */
5539                 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5540                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5541                 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5542                 break;
5543
5544         default:
5545                 break;
5546         }
5547
5548         bnx2x__link_reset(bp);
5549
5550         return 0;
5551 }
5552
5553 #define ILT_PER_FUNC            (768/2)
5554 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
5555 /* the phys address is shifted right 12 bits and has an added
5556    1=valid bit added to the 53rd bit
5557    then since this is a wide register(TM)
5558    we split it into two 32 bit writes
5559  */
5560 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5561 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
5562 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
5563 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
5564
5565 #define CNIC_ILT_LINES          0
5566
5567 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5568 {
5569         int reg;
5570
5571         if (CHIP_IS_E1H(bp))
5572                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5573         else /* E1 */
5574                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5575
5576         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5577 }
5578
5579 static int bnx2x_init_func(struct bnx2x *bp)
5580 {
5581         int port = BP_PORT(bp);
5582         int func = BP_FUNC(bp);
5583         int i;
5584
5585         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
5586
5587         i = FUNC_ILT_BASE(func);
5588
5589         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5590         if (CHIP_IS_E1H(bp)) {
5591                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5592                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5593         } else /* E1 */
5594                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5595                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5596
5597
5598         if (CHIP_IS_E1H(bp)) {
5599                 for (i = 0; i < 9; i++)
5600                         bnx2x_init_block(bp,
5601                                          cm_start[func][i], cm_end[func][i]);
5602
5603                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5604                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5605         }
5606
5607         /* HC init per function */
5608         if (CHIP_IS_E1H(bp)) {
5609                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5610
5611                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5612                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5613         }
5614         bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5615
5616         if (CHIP_IS_E1H(bp))
5617                 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5618
5619         /* Reset PCIE errors for debug */
5620         REG_WR(bp, 0x2114, 0xffffffff);
5621         REG_WR(bp, 0x2120, 0xffffffff);
5622
5623         return 0;
5624 }
5625
5626 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5627 {
5628         int i, rc = 0;
5629
5630         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
5631            BP_FUNC(bp), load_code);
5632
5633         bp->dmae_ready = 0;
5634         mutex_init(&bp->dmae_mutex);
5635         bnx2x_gunzip_init(bp);
5636
5637         switch (load_code) {
5638         case FW_MSG_CODE_DRV_LOAD_COMMON:
5639                 rc = bnx2x_init_common(bp);
5640                 if (rc)
5641                         goto init_hw_err;
5642                 /* no break */
5643
5644         case FW_MSG_CODE_DRV_LOAD_PORT:
5645                 bp->dmae_ready = 1;
5646                 rc = bnx2x_init_port(bp);
5647                 if (rc)
5648                         goto init_hw_err;
5649                 /* no break */
5650
5651         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5652                 bp->dmae_ready = 1;
5653                 rc = bnx2x_init_func(bp);
5654                 if (rc)
5655                         goto init_hw_err;
5656                 break;
5657
5658         default:
5659                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5660                 break;
5661         }
5662
5663         if (!BP_NOMCP(bp)) {
5664                 int func = BP_FUNC(bp);
5665
5666                 bp->fw_drv_pulse_wr_seq =
5667                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5668                                  DRV_PULSE_SEQ_MASK);
5669                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5670                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x  func_stx 0x%x\n",
5671                    bp->fw_drv_pulse_wr_seq, bp->func_stx);
5672         } else
5673                 bp->func_stx = 0;
5674
5675         /* this needs to be done before gunzip end */
5676         bnx2x_zero_def_sb(bp);
5677         for_each_queue(bp, i)
5678                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5679
5680 init_hw_err:
5681         bnx2x_gunzip_end(bp);
5682
5683         return rc;
5684 }
5685
5686 /* send the MCP a request, block until there is a reply */
5687 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5688 {
5689         int func = BP_FUNC(bp);
5690         u32 seq = ++bp->fw_seq;
5691         u32 rc = 0;
5692         u32 cnt = 1;
5693         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5694
5695         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5696         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5697
5698         do {
5699                 /* let the FW do it's magic ... */
5700                 msleep(delay);
5701
5702                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
5703
5704                 /* Give the FW up to 2 second (200*10ms) */
5705         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5706
5707         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5708            cnt*delay, rc, seq);
5709
5710         /* is this a reply to our command? */
5711         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5712                 rc &= FW_MSG_CODE_MASK;
5713
5714         } else {
5715                 /* FW BUG! */
5716                 BNX2X_ERR("FW failed to respond!\n");
5717                 bnx2x_fw_dump(bp);
5718                 rc = 0;
5719         }
5720
5721         return rc;
5722 }
5723
5724 static void bnx2x_free_mem(struct bnx2x *bp)
5725 {
5726
5727 #define BNX2X_PCI_FREE(x, y, size) \
5728         do { \
5729                 if (x) { \
5730                         pci_free_consistent(bp->pdev, size, x, y); \
5731                         x = NULL; \
5732                         y = 0; \
5733                 } \
5734         } while (0)
5735
5736 #define BNX2X_FREE(x) \
5737         do { \
5738                 if (x) { \
5739                         vfree(x); \
5740                         x = NULL; \
5741                 } \
5742         } while (0)
5743
5744         int i;
5745
5746         /* fastpath */
5747         for_each_queue(bp, i) {
5748
5749                 /* Status blocks */
5750                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5751                                bnx2x_fp(bp, i, status_blk_mapping),
5752                                sizeof(struct host_status_block) +
5753                                sizeof(struct eth_tx_db_data));
5754
5755                 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5756                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5757                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5758                                bnx2x_fp(bp, i, tx_desc_mapping),
5759                                sizeof(struct eth_tx_bd) * NUM_TX_BD);
5760
5761                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5762                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5763                                bnx2x_fp(bp, i, rx_desc_mapping),
5764                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
5765
5766                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5767                                bnx2x_fp(bp, i, rx_comp_mapping),
5768                                sizeof(struct eth_fast_path_rx_cqe) *
5769                                NUM_RCQ_BD);
5770
5771                 /* SGE ring */
5772                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
5773                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5774                                bnx2x_fp(bp, i, rx_sge_mapping),
5775                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5776         }
5777         /* end of fastpath */
5778
5779         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5780                        sizeof(struct host_def_status_block));
5781
5782         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5783                        sizeof(struct bnx2x_slowpath));
5784
5785 #ifdef BCM_ISCSI
5786         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5787         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5788         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5789         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5790 #endif
5791         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
5792
5793 #undef BNX2X_PCI_FREE
5794 #undef BNX2X_KFREE
5795 }
5796
5797 static int bnx2x_alloc_mem(struct bnx2x *bp)
5798 {
5799
5800 #define BNX2X_PCI_ALLOC(x, y, size) \
5801         do { \
5802                 x = pci_alloc_consistent(bp->pdev, size, y); \
5803                 if (x == NULL) \
5804                         goto alloc_mem_err; \
5805                 memset(x, 0, size); \
5806         } while (0)
5807
5808 #define BNX2X_ALLOC(x, size) \
5809         do { \
5810                 x = vmalloc(size); \
5811                 if (x == NULL) \
5812                         goto alloc_mem_err; \
5813                 memset(x, 0, size); \
5814         } while (0)
5815
5816         int i;
5817
5818         /* fastpath */
5819         for_each_queue(bp, i) {
5820                 bnx2x_fp(bp, i, bp) = bp;
5821
5822                 /* Status blocks */
5823                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5824                                 &bnx2x_fp(bp, i, status_blk_mapping),
5825                                 sizeof(struct host_status_block) +
5826                                 sizeof(struct eth_tx_db_data));
5827
5828                 bnx2x_fp(bp, i, hw_tx_prods) =
5829                                 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5830
5831                 bnx2x_fp(bp, i, tx_prods_mapping) =
5832                                 bnx2x_fp(bp, i, status_blk_mapping) +
5833                                 sizeof(struct host_status_block);
5834
5835                 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5836                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5837                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5838                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5839                                 &bnx2x_fp(bp, i, tx_desc_mapping),
5840                                 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5841
5842                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5843                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5844                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5845                                 &bnx2x_fp(bp, i, rx_desc_mapping),
5846                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5847
5848                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5849                                 &bnx2x_fp(bp, i, rx_comp_mapping),
5850                                 sizeof(struct eth_fast_path_rx_cqe) *
5851                                 NUM_RCQ_BD);
5852
5853                 /* SGE ring */
5854                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5855                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5856                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5857                                 &bnx2x_fp(bp, i, rx_sge_mapping),
5858                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5859         }
5860         /* end of fastpath */
5861
5862         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5863                         sizeof(struct host_def_status_block));
5864
5865         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5866                         sizeof(struct bnx2x_slowpath));
5867
5868 #ifdef BCM_ISCSI
5869         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5870
5871         /* Initialize T1 */
5872         for (i = 0; i < 64*1024; i += 64) {
5873                 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5874                 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5875         }
5876
5877         /* allocate searcher T2 table
5878            we allocate 1/4 of alloc num for T2
5879           (which is not entered into the ILT) */
5880         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5881
5882         /* Initialize T2 */
5883         for (i = 0; i < 16*1024; i += 64)
5884                 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5885
5886         /* now fixup the last line in the block to point to the next block */
5887         *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5888
5889         /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5890         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5891
5892         /* QM queues (128*MAX_CONN) */
5893         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5894 #endif
5895
5896         /* Slow path ring */
5897         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5898
5899         return 0;
5900
5901 alloc_mem_err:
5902         bnx2x_free_mem(bp);
5903         return -ENOMEM;
5904
5905 #undef BNX2X_PCI_ALLOC
5906 #undef BNX2X_ALLOC
5907 }
5908
5909 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5910 {
5911         int i;
5912
5913         for_each_queue(bp, i) {
5914                 struct bnx2x_fastpath *fp = &bp->fp[i];
5915
5916                 u16 bd_cons = fp->tx_bd_cons;
5917                 u16 sw_prod = fp->tx_pkt_prod;
5918                 u16 sw_cons = fp->tx_pkt_cons;
5919
5920                 while (sw_cons != sw_prod) {
5921                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5922                         sw_cons++;
5923                 }
5924         }
5925 }
5926
5927 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5928 {
5929         int i, j;
5930
5931         for_each_queue(bp, j) {
5932                 struct bnx2x_fastpath *fp = &bp->fp[j];
5933
5934                 for (i = 0; i < NUM_RX_BD; i++) {
5935                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5936                         struct sk_buff *skb = rx_buf->skb;
5937
5938                         if (skb == NULL)
5939                                 continue;
5940
5941                         pci_unmap_single(bp->pdev,
5942                                          pci_unmap_addr(rx_buf, mapping),
5943                                          bp->rx_buf_use_size,
5944                                          PCI_DMA_FROMDEVICE);
5945
5946                         rx_buf->skb = NULL;
5947                         dev_kfree_skb(skb);
5948                 }
5949                 if (!fp->disable_tpa)
5950                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
5951                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
5952                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
5953         }
5954 }
5955
5956 static void bnx2x_free_skbs(struct bnx2x *bp)
5957 {
5958         bnx2x_free_tx_skbs(bp);
5959         bnx2x_free_rx_skbs(bp);
5960 }
5961
5962 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5963 {
5964         int i, offset = 1;
5965
5966         free_irq(bp->msix_table[0].vector, bp->dev);
5967         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
5968            bp->msix_table[0].vector);
5969
5970         for_each_queue(bp, i) {
5971                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
5972                    "state %x\n", i, bp->msix_table[i + offset].vector,
5973                    bnx2x_fp(bp, i, state));
5974
5975                 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
5976                         BNX2X_ERR("IRQ of fp #%d being freed while "
5977                                   "state != closed\n", i);
5978
5979                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
5980         }
5981 }
5982
5983 static void bnx2x_free_irq(struct bnx2x *bp)
5984 {
5985         if (bp->flags & USING_MSIX_FLAG) {
5986                 bnx2x_free_msix_irqs(bp);
5987                 pci_disable_msix(bp->pdev);
5988                 bp->flags &= ~USING_MSIX_FLAG;
5989
5990         } else
5991                 free_irq(bp->pdev->irq, bp->dev);
5992 }
5993
5994 static int bnx2x_enable_msix(struct bnx2x *bp)
5995 {
5996         int i, rc, offset;
5997
5998         bp->msix_table[0].entry = 0;
5999         offset = 1;
6000         DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
6001
6002         for_each_queue(bp, i) {
6003                 int igu_vec = offset + i + BP_L_ID(bp);
6004
6005                 bp->msix_table[i + offset].entry = igu_vec;
6006                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6007                    "(fastpath #%u)\n", i + offset, igu_vec, i);
6008         }
6009
6010         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6011                              bp->num_queues + offset);
6012         if (rc) {
6013                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6014                 return -1;
6015         }
6016         bp->flags |= USING_MSIX_FLAG;
6017
6018         return 0;
6019 }
6020
6021 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6022 {
6023         int i, rc, offset = 1;
6024
6025         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6026                          bp->dev->name, bp->dev);
6027         if (rc) {
6028                 BNX2X_ERR("request sp irq failed\n");
6029                 return -EBUSY;
6030         }
6031
6032         for_each_queue(bp, i) {
6033                 rc = request_irq(bp->msix_table[i + offset].vector,
6034                                  bnx2x_msix_fp_int, 0,
6035                                  bp->dev->name, &bp->fp[i]);
6036                 if (rc) {
6037                         BNX2X_ERR("request fp #%d irq failed  rc %d\n",
6038                                   i + offset, rc);
6039                         bnx2x_free_msix_irqs(bp);
6040                         return -EBUSY;
6041                 }
6042
6043                 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6044         }
6045
6046         return 0;
6047 }
6048
6049 static int bnx2x_req_irq(struct bnx2x *bp)
6050 {
6051         int rc;
6052
6053         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6054                          bp->dev->name, bp->dev);
6055         if (!rc)
6056                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6057
6058         return rc;
6059 }
6060
6061 /*
6062  * Init service functions
6063  */
6064
6065 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6066 {
6067         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6068         int port = BP_PORT(bp);
6069
6070         /* CAM allocation
6071          * unicasts 0-31:port0 32-63:port1
6072          * multicast 64-127:port0 128-191:port1
6073          */
6074         config->hdr.length_6b = 2;
6075         config->hdr.offset = port ? 31 : 0;
6076         config->hdr.client_id = BP_CL_ID(bp);
6077         config->hdr.reserved1 = 0;
6078
6079         /* primary MAC */
6080         config->config_table[0].cam_entry.msb_mac_addr =
6081                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6082         config->config_table[0].cam_entry.middle_mac_addr =
6083                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6084         config->config_table[0].cam_entry.lsb_mac_addr =
6085                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6086         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6087         if (set)
6088                 config->config_table[0].target_table_entry.flags = 0;
6089         else
6090                 CAM_INVALIDATE(config->config_table[0]);
6091         config->config_table[0].target_table_entry.client_id = 0;
6092         config->config_table[0].target_table_entry.vlan_id = 0;
6093
6094         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6095            (set ? "setting" : "clearing"),
6096            config->config_table[0].cam_entry.msb_mac_addr,
6097            config->config_table[0].cam_entry.middle_mac_addr,
6098            config->config_table[0].cam_entry.lsb_mac_addr);
6099
6100         /* broadcast */
6101         config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6102         config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6103         config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6104         config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6105         if (set)
6106                 config->config_table[1].target_table_entry.flags =
6107                                 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6108         else
6109                 CAM_INVALIDATE(config->config_table[1]);
6110         config->config_table[1].target_table_entry.client_id = 0;
6111         config->config_table[1].target_table_entry.vlan_id = 0;
6112
6113         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6114                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6115                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6116 }
6117
6118 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6119 {
6120         struct mac_configuration_cmd_e1h *config =
6121                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6122
6123         if (set && (bp->state != BNX2X_STATE_OPEN)) {
6124                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6125                 return;
6126         }
6127
6128         /* CAM allocation for E1H
6129          * unicasts: by func number
6130          * multicast: 20+FUNC*20, 20 each
6131          */
6132         config->hdr.length_6b = 1;
6133         config->hdr.offset = BP_FUNC(bp);
6134         config->hdr.client_id = BP_CL_ID(bp);
6135         config->hdr.reserved1 = 0;
6136
6137         /* primary MAC */
6138         config->config_table[0].msb_mac_addr =
6139                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6140         config->config_table[0].middle_mac_addr =
6141                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6142         config->config_table[0].lsb_mac_addr =
6143                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6144         config->config_table[0].client_id = BP_L_ID(bp);
6145         config->config_table[0].vlan_id = 0;
6146         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6147         if (set)
6148                 config->config_table[0].flags = BP_PORT(bp);
6149         else
6150                 config->config_table[0].flags =
6151                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6152
6153         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID %d\n",
6154            (set ? "setting" : "clearing"),
6155            config->config_table[0].msb_mac_addr,
6156            config->config_table[0].middle_mac_addr,
6157            config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6158
6159         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6160                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6161                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6162 }
6163
6164 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6165                              int *state_p, int poll)
6166 {
6167         /* can take a while if any port is running */
6168         int cnt = 500;
6169
6170         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6171            poll ? "polling" : "waiting", state, idx);
6172
6173         might_sleep();
6174         while (cnt--) {
6175                 if (poll) {
6176                         bnx2x_rx_int(bp->fp, 10);
6177                         /* if index is different from 0
6178                          * the reply for some commands will
6179                          * be on the non default queue
6180                          */
6181                         if (idx)
6182                                 bnx2x_rx_int(&bp->fp[idx], 10);
6183                 }
6184
6185                 mb(); /* state is changed by bnx2x_sp_event() */
6186                 if (*state_p == state)
6187                         return 0;
6188
6189                 msleep(1);
6190         }
6191
6192         /* timeout! */
6193         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6194                   poll ? "polling" : "waiting", state, idx);
6195 #ifdef BNX2X_STOP_ON_ERROR
6196         bnx2x_panic();
6197 #endif
6198
6199         return -EBUSY;
6200 }
6201
6202 static int bnx2x_setup_leading(struct bnx2x *bp)
6203 {
6204         int rc;
6205
6206         /* reset IGU state */
6207         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6208
6209         /* SETUP ramrod */
6210         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6211
6212         /* Wait for completion */
6213         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6214
6215         return rc;
6216 }
6217
6218 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6219 {
6220         /* reset IGU state */
6221         bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6222
6223         /* SETUP ramrod */
6224         bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6225         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6226
6227         /* Wait for completion */
6228         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6229                                  &(bp->fp[index].state), 0);
6230 }
6231
6232 static int bnx2x_poll(struct napi_struct *napi, int budget);
6233 static void bnx2x_set_rx_mode(struct net_device *dev);
6234
6235 /* must be called with rtnl_lock */
6236 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6237 {
6238         u32 load_code;
6239         int i, rc;
6240
6241 #ifdef BNX2X_STOP_ON_ERROR
6242         if (unlikely(bp->panic))
6243                 return -EPERM;
6244 #endif
6245
6246         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6247
6248         /* Send LOAD_REQUEST command to MCP
6249            Returns the type of LOAD command:
6250            if it is the first port to be initialized
6251            common blocks should be initialized, otherwise - not
6252         */
6253         if (!BP_NOMCP(bp)) {
6254                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6255                 if (!load_code) {
6256                         BNX2X_ERR("MCP response failure, aborting\n");
6257                         return -EBUSY;
6258                 }
6259                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
6260                         return -EBUSY; /* other port in diagnostic mode */
6261
6262         } else {
6263                 int port = BP_PORT(bp);
6264
6265                 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6266                    load_count[0], load_count[1], load_count[2]);
6267                 load_count[0]++;
6268                 load_count[1 + port]++;
6269                 DP(NETIF_MSG_IFUP, "NO MCP new load counts       %d, %d, %d\n",
6270                    load_count[0], load_count[1], load_count[2]);
6271                 if (load_count[0] == 1)
6272                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6273                 else if (load_count[1 + port] == 1)
6274                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6275                 else
6276                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6277         }
6278
6279         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6280             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6281                 bp->port.pmf = 1;
6282         else
6283                 bp->port.pmf = 0;
6284         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6285
6286         /* if we can't use MSI-X we only need one fp,
6287          * so try to enable MSI-X with the requested number of fp's
6288          * and fallback to inta with one fp
6289          */
6290         if (use_inta) {
6291                 bp->num_queues = 1;
6292
6293         } else {
6294                 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6295                         /* user requested number */
6296                         bp->num_queues = use_multi;
6297
6298                 else if (use_multi)
6299                         bp->num_queues = min_t(u32, num_online_cpus(),
6300                                                BP_MAX_QUEUES(bp));
6301                 else
6302                         bp->num_queues = 1;
6303
6304                 if (bnx2x_enable_msix(bp)) {
6305                         /* failed to enable MSI-X */
6306                         bp->num_queues = 1;
6307                         if (use_multi)
6308                                 BNX2X_ERR("Multi requested but failed"
6309                                           " to enable MSI-X\n");
6310                 }
6311         }
6312         DP(NETIF_MSG_IFUP,
6313            "set number of queues to %d\n", bp->num_queues);
6314
6315         if (bnx2x_alloc_mem(bp))
6316                 return -ENOMEM;
6317
6318         for_each_queue(bp, i)
6319                 bnx2x_fp(bp, i, disable_tpa) =
6320                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
6321
6322         if (bp->flags & USING_MSIX_FLAG) {
6323                 rc = bnx2x_req_msix_irqs(bp);
6324                 if (rc) {
6325                         pci_disable_msix(bp->pdev);
6326                         goto load_error;
6327                 }
6328         } else {
6329                 bnx2x_ack_int(bp);
6330                 rc = bnx2x_req_irq(bp);
6331                 if (rc) {
6332                         BNX2X_ERR("IRQ request failed, aborting\n");
6333                         goto load_error;
6334                 }
6335         }
6336
6337         for_each_queue(bp, i)
6338                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6339                                bnx2x_poll, 128);
6340
6341         /* Initialize HW */
6342         rc = bnx2x_init_hw(bp, load_code);
6343         if (rc) {
6344                 BNX2X_ERR("HW init failed, aborting\n");
6345                 goto load_error;
6346         }
6347
6348         /* Setup NIC internals and enable interrupts */
6349         bnx2x_nic_init(bp, load_code);
6350
6351         /* Send LOAD_DONE command to MCP */
6352         if (!BP_NOMCP(bp)) {
6353                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6354                 if (!load_code) {
6355                         BNX2X_ERR("MCP response failure, aborting\n");
6356                         rc = -EBUSY;
6357                         goto load_int_disable;
6358                 }
6359         }
6360
6361         bnx2x_stats_init(bp);
6362
6363         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6364
6365         /* Enable Rx interrupt handling before sending the ramrod
6366            as it's completed on Rx FP queue */
6367         for_each_queue(bp, i)
6368                 napi_enable(&bnx2x_fp(bp, i, napi));
6369
6370         /* Enable interrupt handling */
6371         atomic_set(&bp->intr_sem, 0);
6372
6373         rc = bnx2x_setup_leading(bp);
6374         if (rc) {
6375                 BNX2X_ERR("Setup leading failed!\n");
6376                 goto load_stop_netif;
6377         }
6378
6379         if (CHIP_IS_E1H(bp))
6380                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6381                         BNX2X_ERR("!!!  mf_cfg function disabled\n");
6382                         bp->state = BNX2X_STATE_DISABLED;
6383                 }
6384
6385         if (bp->state == BNX2X_STATE_OPEN)
6386                 for_each_nondefault_queue(bp, i) {
6387                         rc = bnx2x_setup_multi(bp, i);
6388                         if (rc)
6389                                 goto load_stop_netif;
6390                 }
6391
6392         if (CHIP_IS_E1(bp))
6393                 bnx2x_set_mac_addr_e1(bp, 1);
6394         else
6395                 bnx2x_set_mac_addr_e1h(bp, 1);
6396
6397         if (bp->port.pmf)
6398                 bnx2x_initial_phy_init(bp);
6399
6400         /* Start fast path */
6401         switch (load_mode) {
6402         case LOAD_NORMAL:
6403                 /* Tx queue should be only reenabled */
6404                 netif_wake_queue(bp->dev);
6405                 bnx2x_set_rx_mode(bp->dev);
6406                 break;
6407
6408         case LOAD_OPEN:
6409                 netif_start_queue(bp->dev);
6410                 bnx2x_set_rx_mode(bp->dev);
6411                 if (bp->flags & USING_MSIX_FLAG)
6412                         printk(KERN_INFO PFX "%s: using MSI-X\n",
6413                                bp->dev->name);
6414                 break;
6415
6416         case LOAD_DIAG:
6417                 bnx2x_set_rx_mode(bp->dev);
6418                 bp->state = BNX2X_STATE_DIAG;
6419                 break;
6420
6421         default:
6422                 break;
6423         }
6424
6425         if (!bp->port.pmf)
6426                 bnx2x__link_status_update(bp);
6427
6428         /* start the timer */
6429         mod_timer(&bp->timer, jiffies + bp->current_interval);
6430
6431
6432         return 0;
6433
6434 load_stop_netif:
6435         for_each_queue(bp, i)
6436                 napi_disable(&bnx2x_fp(bp, i, napi));
6437
6438 load_int_disable:
6439         bnx2x_int_disable_sync(bp);
6440
6441         /* Release IRQs */
6442         bnx2x_free_irq(bp);
6443
6444         /* Free SKBs, SGEs, TPA pool and driver internals */
6445         bnx2x_free_skbs(bp);
6446         for_each_queue(bp, i)
6447                 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6448                                         RX_SGE_CNT*NUM_RX_SGE_PAGES);
6449 load_error:
6450         bnx2x_free_mem(bp);
6451
6452         /* TBD we really need to reset the chip
6453            if we want to recover from this */
6454         return rc;
6455 }
6456
6457 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6458 {
6459         int rc;
6460
6461         /* halt the connection */
6462         bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6463         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
6464
6465         /* Wait for completion */
6466         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6467                                &(bp->fp[index].state), 1);
6468         if (rc) /* timeout */
6469                 return rc;
6470
6471         /* delete cfc entry */
6472         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6473
6474         /* Wait for completion */
6475         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6476                                &(bp->fp[index].state), 1);
6477         return rc;
6478 }
6479
6480 static int bnx2x_stop_leading(struct bnx2x *bp)
6481 {
6482         u16 dsb_sp_prod_idx;
6483         /* if the other port is handling traffic,
6484            this can take a lot of time */
6485         int cnt = 500;
6486         int rc;
6487
6488         might_sleep();
6489
6490         /* Send HALT ramrod */
6491         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6492         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
6493
6494         /* Wait for completion */
6495         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6496                                &(bp->fp[0].state), 1);
6497         if (rc) /* timeout */
6498                 return rc;
6499
6500         dsb_sp_prod_idx = *bp->dsb_sp_prod;
6501
6502         /* Send PORT_DELETE ramrod */
6503         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6504
6505         /* Wait for completion to arrive on default status block
6506            we are going to reset the chip anyway
6507            so there is not much to do if this times out
6508          */
6509         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6510                 if (!cnt) {
6511                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6512                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6513                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
6514 #ifdef BNX2X_STOP_ON_ERROR
6515                         bnx2x_panic();
6516 #else
6517                         rc = -EBUSY;
6518 #endif
6519                         break;
6520                 }
6521                 cnt--;
6522                 msleep(1);
6523         }
6524         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6525         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6526
6527         return rc;
6528 }
6529
6530 static void bnx2x_reset_func(struct bnx2x *bp)
6531 {
6532         int port = BP_PORT(bp);
6533         int func = BP_FUNC(bp);
6534         int base, i;
6535
6536         /* Configure IGU */
6537         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6538         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6539
6540         REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6541
6542         /* Clear ILT */
6543         base = FUNC_ILT_BASE(func);
6544         for (i = base; i < base + ILT_PER_FUNC; i++)
6545                 bnx2x_ilt_wr(bp, i, 0);
6546 }
6547
6548 static void bnx2x_reset_port(struct bnx2x *bp)
6549 {
6550         int port = BP_PORT(bp);
6551         u32 val;
6552
6553         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6554
6555         /* Do not rcv packets to BRB */
6556         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6557         /* Do not direct rcv packets that are not for MCP to the BRB */
6558         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6559                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6560
6561         /* Configure AEU */
6562         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6563
6564         msleep(100);
6565         /* Check for BRB port occupancy */
6566         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6567         if (val)
6568                 DP(NETIF_MSG_IFDOWN,
6569                    "BRB1 is not empty  %d blooks are occupied\n", val);
6570
6571         /* TODO: Close Doorbell port? */
6572 }
6573
6574 static void bnx2x_reset_common(struct bnx2x *bp)
6575 {
6576         /* reset_common */
6577         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6578                0xd3ffff7f);
6579         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6580 }
6581
6582 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6583 {
6584         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
6585            BP_FUNC(bp), reset_code);
6586
6587         switch (reset_code) {
6588         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6589                 bnx2x_reset_port(bp);
6590                 bnx2x_reset_func(bp);
6591                 bnx2x_reset_common(bp);
6592                 break;
6593
6594         case FW_MSG_CODE_DRV_UNLOAD_PORT:
6595                 bnx2x_reset_port(bp);
6596                 bnx2x_reset_func(bp);
6597                 break;
6598
6599         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6600                 bnx2x_reset_func(bp);
6601                 break;
6602
6603         default:
6604                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6605                 break;
6606         }
6607 }
6608
6609 /* msut be called with rtnl_lock */
6610 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6611 {
6612         int port = BP_PORT(bp);
6613         u32 reset_code = 0;
6614         int i, cnt, rc;
6615
6616         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6617
6618         bp->rx_mode = BNX2X_RX_MODE_NONE;
6619         bnx2x_set_storm_rx_mode(bp);
6620
6621         if (netif_running(bp->dev)) {
6622                 netif_tx_disable(bp->dev);
6623                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6624         }
6625
6626         del_timer_sync(&bp->timer);
6627         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6628                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
6629         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
6630
6631         /* Wait until tx fast path tasks complete */
6632         for_each_queue(bp, i) {
6633                 struct bnx2x_fastpath *fp = &bp->fp[i];
6634
6635                 cnt = 1000;
6636                 smp_rmb();
6637                 while (BNX2X_HAS_TX_WORK(fp)) {
6638
6639                         if (!netif_running(bp->dev))
6640                                 bnx2x_tx_int(fp, 1000);
6641
6642                         if (!cnt) {
6643                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
6644                                           i);
6645 #ifdef BNX2X_STOP_ON_ERROR
6646                                 bnx2x_panic();
6647                                 return -EBUSY;
6648 #else
6649                                 break;
6650 #endif
6651                         }
6652                         cnt--;
6653                         msleep(1);
6654                         smp_rmb();
6655                 }
6656         }
6657
6658         /* Give HW time to discard old tx messages */
6659         msleep(1);
6660
6661         for_each_queue(bp, i)
6662                 napi_disable(&bnx2x_fp(bp, i, napi));
6663         /* Disable interrupts after Tx and Rx are disabled on stack level */
6664         bnx2x_int_disable_sync(bp);
6665
6666         /* Release IRQs */
6667         bnx2x_free_irq(bp);
6668
6669         if (unload_mode == UNLOAD_NORMAL)
6670                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6671
6672         else if (bp->flags & NO_WOL_FLAG) {
6673                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6674                 if (CHIP_IS_E1H(bp))
6675                         REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6676
6677         } else if (bp->wol) {
6678                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6679                 u8 *mac_addr = bp->dev->dev_addr;
6680                 u32 val;
6681                 /* The mac address is written to entries 1-4 to
6682                    preserve entry 0 which is used by the PMF */
6683                 u8 entry = (BP_E1HVN(bp) + 1)*8;
6684
6685                 val = (mac_addr[0] << 8) | mac_addr[1];
6686                 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + entry, val);
6687
6688                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6689                       (mac_addr[4] << 8) | mac_addr[5];
6690                 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6691
6692                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6693
6694         } else
6695                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6696
6697         if (CHIP_IS_E1(bp)) {
6698                 struct mac_configuration_cmd *config =
6699                                                 bnx2x_sp(bp, mcast_config);
6700
6701                 bnx2x_set_mac_addr_e1(bp, 0);
6702
6703                 for (i = 0; i < config->hdr.length_6b; i++)
6704                         CAM_INVALIDATE(config->config_table[i]);
6705
6706                 config->hdr.length_6b = i;
6707                 if (CHIP_REV_IS_SLOW(bp))
6708                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6709                 else
6710                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6711                 config->hdr.client_id = BP_CL_ID(bp);
6712                 config->hdr.reserved1 = 0;
6713
6714                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6715                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6716                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6717
6718         } else { /* E1H */
6719                 bnx2x_set_mac_addr_e1h(bp, 0);
6720
6721                 for (i = 0; i < MC_HASH_SIZE; i++)
6722                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6723         }
6724
6725         if (CHIP_IS_E1H(bp))
6726                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6727
6728         /* Close multi and leading connections
6729            Completions for ramrods are collected in a synchronous way */
6730         for_each_nondefault_queue(bp, i)
6731                 if (bnx2x_stop_multi(bp, i))
6732                         goto unload_error;
6733
6734         rc = bnx2x_stop_leading(bp);
6735         if (rc) {
6736                 BNX2X_ERR("Stop leading failed!\n");
6737 #ifdef BNX2X_STOP_ON_ERROR
6738                 return -EBUSY;
6739 #else
6740                 goto unload_error;
6741 #endif
6742         }
6743
6744 unload_error:
6745         if (!BP_NOMCP(bp))
6746                 reset_code = bnx2x_fw_command(bp, reset_code);
6747         else {
6748                 DP(NETIF_MSG_IFDOWN, "NO MCP load counts      %d, %d, %d\n",
6749                    load_count[0], load_count[1], load_count[2]);
6750                 load_count[0]--;
6751                 load_count[1 + port]--;
6752                 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts  %d, %d, %d\n",
6753                    load_count[0], load_count[1], load_count[2]);
6754                 if (load_count[0] == 0)
6755                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6756                 else if (load_count[1 + port] == 0)
6757                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6758                 else
6759                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6760         }
6761
6762         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6763             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6764                 bnx2x__link_reset(bp);
6765
6766         /* Reset the chip */
6767         bnx2x_reset_chip(bp, reset_code);
6768
6769         /* Report UNLOAD_DONE to MCP */
6770         if (!BP_NOMCP(bp))
6771                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6772
6773         /* Free SKBs, SGEs, TPA pool and driver internals */
6774         bnx2x_free_skbs(bp);
6775         for_each_queue(bp, i)
6776                 bnx2x_free_rx_sge_range(bp, bp->fp + i,
6777                                         RX_SGE_CNT*NUM_RX_SGE_PAGES);
6778         bnx2x_free_mem(bp);
6779
6780         bp->state = BNX2X_STATE_CLOSED;
6781
6782         netif_carrier_off(bp->dev);
6783
6784         return 0;
6785 }
6786
6787 static void bnx2x_reset_task(struct work_struct *work)
6788 {
6789         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6790
6791 #ifdef BNX2X_STOP_ON_ERROR
6792         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6793                   " so reset not done to allow debug dump,\n"
6794          KERN_ERR " you will need to reboot when done\n");
6795         return;
6796 #endif
6797
6798         rtnl_lock();
6799
6800         if (!netif_running(bp->dev))
6801                 goto reset_task_exit;
6802
6803         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6804         bnx2x_nic_load(bp, LOAD_NORMAL);
6805
6806 reset_task_exit:
6807         rtnl_unlock();
6808 }
6809
6810 /* end of nic load/unload */
6811
6812 /* ethtool_ops */
6813
6814 /*
6815  * Init service functions
6816  */
6817
6818 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6819 {
6820         u32 val;
6821
6822         /* Check if there is any driver already loaded */
6823         val = REG_RD(bp, MISC_REG_UNPREPARED);
6824         if (val == 0x1) {
6825                 /* Check if it is the UNDI driver
6826                  * UNDI driver initializes CID offset for normal bell to 0x7
6827                  */
6828                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6829                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6830                 if (val == 0x7) {
6831                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6832                         /* save our func */
6833                         int func = BP_FUNC(bp);
6834                         u32 swap_en;
6835                         u32 swap_val;
6836
6837                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
6838
6839                         /* try unload UNDI on port 0 */
6840                         bp->func = 0;
6841                         bp->fw_seq =
6842                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6843                                 DRV_MSG_SEQ_NUMBER_MASK);
6844                         reset_code = bnx2x_fw_command(bp, reset_code);
6845
6846                         /* if UNDI is loaded on the other port */
6847                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6848
6849                                 /* send "DONE" for previous unload */
6850                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6851
6852                                 /* unload UNDI on port 1 */
6853                                 bp->func = 1;
6854                                 bp->fw_seq =
6855                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6856                                         DRV_MSG_SEQ_NUMBER_MASK);
6857                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6858
6859                                 bnx2x_fw_command(bp, reset_code);
6860                         }
6861
6862                         REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6863                                     HC_REG_CONFIG_0), 0x1000);
6864
6865                         /* close input traffic and wait for it */
6866                         /* Do not rcv packets to BRB */
6867                         REG_WR(bp,
6868                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6869                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6870                         /* Do not direct rcv packets that are not for MCP to
6871                          * the BRB */
6872                         REG_WR(bp,
6873                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6874                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6875                         /* clear AEU */
6876                         REG_WR(bp,
6877                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6878                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6879                         msleep(10);
6880
6881                         /* save NIG port swap info */
6882                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6883                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6884                         /* reset device */
6885                         REG_WR(bp,
6886                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6887                                0xd3ffffff);
6888                         REG_WR(bp,
6889                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6890                                0x1403);
6891                         /* take the NIG out of reset and restore swap values */
6892                         REG_WR(bp,
6893                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6894                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
6895                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6896                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6897
6898                         /* send unload done to the MCP */
6899                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6900
6901                         /* restore our func and fw_seq */
6902                         bp->func = func;
6903                         bp->fw_seq =
6904                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6905                                 DRV_MSG_SEQ_NUMBER_MASK);
6906                 }
6907                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6908         }
6909 }
6910
6911 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6912 {
6913         u32 val, val2, val3, val4, id;
6914         u16 pmc;
6915
6916         /* Get the chip revision id and number. */
6917         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6918         val = REG_RD(bp, MISC_REG_CHIP_NUM);
6919         id = ((val & 0xffff) << 16);
6920         val = REG_RD(bp, MISC_REG_CHIP_REV);
6921         id |= ((val & 0xf) << 12);
6922         val = REG_RD(bp, MISC_REG_CHIP_METAL);
6923         id |= ((val & 0xff) << 4);
6924         REG_RD(bp, MISC_REG_BOND_ID);
6925         id |= (val & 0xf);
6926         bp->common.chip_id = id;
6927         bp->link_params.chip_id = bp->common.chip_id;
6928         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6929
6930         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6931         bp->common.flash_size = (NVRAM_1MB_SIZE <<
6932                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
6933         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6934                        bp->common.flash_size, bp->common.flash_size);
6935
6936         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6937         bp->link_params.shmem_base = bp->common.shmem_base;
6938         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6939
6940         if (!bp->common.shmem_base ||
6941             (bp->common.shmem_base < 0xA0000) ||
6942             (bp->common.shmem_base >= 0xC0000)) {
6943                 BNX2X_DEV_INFO("MCP not active\n");
6944                 bp->flags |= NO_MCP_FLAG;
6945                 return;
6946         }
6947
6948         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6949         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6950                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6951                 BNX2X_ERR("BAD MCP validity signature\n");
6952
6953         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6954         bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
6955
6956         BNX2X_DEV_INFO("hw_config 0x%08x  board 0x%08x\n",
6957                        bp->common.hw_config, bp->common.board);
6958
6959         bp->link_params.hw_led_mode = ((bp->common.hw_config &
6960                                         SHARED_HW_CFG_LED_MODE_MASK) >>
6961                                        SHARED_HW_CFG_LED_MODE_SHIFT);
6962
6963         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6964         bp->common.bc_ver = val;
6965         BNX2X_DEV_INFO("bc_ver %X\n", val);
6966         if (val < BNX2X_BC_VER) {
6967                 /* for now only warn
6968                  * later we might need to enforce this */
6969                 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
6970                           " please upgrade BC\n", BNX2X_BC_VER, val);
6971         }
6972
6973         if (BP_E1HVN(bp) == 0) {
6974                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
6975                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
6976         } else {
6977                 /* no WOL capability for E1HVN != 0 */
6978                 bp->flags |= NO_WOL_FLAG;
6979         }
6980         BNX2X_DEV_INFO("%sWoL capable\n",
6981                        (bp->flags & NO_WOL_FLAG) ? "Not " : "");
6982
6983         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6984         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6985         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6986         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6987
6988         printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
6989                val, val2, val3, val4);
6990 }
6991
6992 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6993                                                     u32 switch_cfg)
6994 {
6995         int port = BP_PORT(bp);
6996         u32 ext_phy_type;
6997
6998         switch (switch_cfg) {
6999         case SWITCH_CFG_1G:
7000                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7001
7002                 ext_phy_type =
7003                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7004                 switch (ext_phy_type) {
7005                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7006                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7007                                        ext_phy_type);
7008
7009                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7010                                                SUPPORTED_10baseT_Full |
7011                                                SUPPORTED_100baseT_Half |
7012                                                SUPPORTED_100baseT_Full |
7013                                                SUPPORTED_1000baseT_Full |
7014                                                SUPPORTED_2500baseX_Full |
7015                                                SUPPORTED_TP |
7016                                                SUPPORTED_FIBRE |
7017                                                SUPPORTED_Autoneg |
7018                                                SUPPORTED_Pause |
7019                                                SUPPORTED_Asym_Pause);
7020                         break;
7021
7022                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7023                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7024                                        ext_phy_type);
7025
7026                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7027                                                SUPPORTED_10baseT_Full |
7028                                                SUPPORTED_100baseT_Half |
7029                                                SUPPORTED_100baseT_Full |
7030                                                SUPPORTED_1000baseT_Full |
7031                                                SUPPORTED_TP |
7032                                                SUPPORTED_FIBRE |
7033                                                SUPPORTED_Autoneg |
7034                                                SUPPORTED_Pause |
7035                                                SUPPORTED_Asym_Pause);
7036                         break;
7037
7038                 default:
7039                         BNX2X_ERR("NVRAM config error. "
7040                                   "BAD SerDes ext_phy_config 0x%x\n",
7041                                   bp->link_params.ext_phy_config);
7042                         return;
7043                 }
7044
7045                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7046                                            port*0x10);
7047                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7048                 break;
7049
7050         case SWITCH_CFG_10G:
7051                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7052
7053                 ext_phy_type =
7054                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7055                 switch (ext_phy_type) {
7056                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7057                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7058                                        ext_phy_type);
7059
7060                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7061                                                SUPPORTED_10baseT_Full |
7062                                                SUPPORTED_100baseT_Half |
7063                                                SUPPORTED_100baseT_Full |
7064                                                SUPPORTED_1000baseT_Full |
7065                                                SUPPORTED_2500baseX_Full |
7066                                                SUPPORTED_10000baseT_Full |
7067                                                SUPPORTED_TP |
7068                                                SUPPORTED_FIBRE |
7069                                                SUPPORTED_Autoneg |
7070                                                SUPPORTED_Pause |
7071                                                SUPPORTED_Asym_Pause);
7072                         break;
7073
7074                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7075                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7076                                        ext_phy_type);
7077
7078                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7079                                                SUPPORTED_FIBRE |
7080                                                SUPPORTED_Pause |
7081                                                SUPPORTED_Asym_Pause);
7082                         break;
7083
7084                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7085                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7086                                        ext_phy_type);
7087
7088                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7089                                                SUPPORTED_1000baseT_Full |
7090                                                SUPPORTED_FIBRE |
7091                                                SUPPORTED_Pause |
7092                                                SUPPORTED_Asym_Pause);
7093                         break;
7094
7095                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7096                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7097                                        ext_phy_type);
7098
7099                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7100                                                SUPPORTED_1000baseT_Full |
7101                                                SUPPORTED_FIBRE |
7102                                                SUPPORTED_Autoneg |
7103                                                SUPPORTED_Pause |
7104                                                SUPPORTED_Asym_Pause);
7105                         break;
7106
7107                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7108                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7109                                        ext_phy_type);
7110
7111                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7112                                                SUPPORTED_2500baseX_Full |
7113                                                SUPPORTED_1000baseT_Full |
7114                                                SUPPORTED_FIBRE |
7115                                                SUPPORTED_Autoneg |
7116                                                SUPPORTED_Pause |
7117                                                SUPPORTED_Asym_Pause);
7118                         break;
7119
7120                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7121                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7122                                        ext_phy_type);
7123
7124                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7125                                                SUPPORTED_TP |
7126                                                SUPPORTED_Autoneg |
7127                                                SUPPORTED_Pause |
7128                                                SUPPORTED_Asym_Pause);
7129                         break;
7130
7131                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7132                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7133                                   bp->link_params.ext_phy_config);
7134                         break;
7135
7136                 default:
7137                         BNX2X_ERR("NVRAM config error. "
7138                                   "BAD XGXS ext_phy_config 0x%x\n",
7139                                   bp->link_params.ext_phy_config);
7140                         return;
7141                 }
7142
7143                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7144                                            port*0x18);
7145                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7146
7147                 break;
7148
7149         default:
7150                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7151                           bp->port.link_config);
7152                 return;
7153         }
7154         bp->link_params.phy_addr = bp->port.phy_addr;
7155
7156         /* mask what we support according to speed_cap_mask */
7157         if (!(bp->link_params.speed_cap_mask &
7158                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7159                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7160
7161         if (!(bp->link_params.speed_cap_mask &
7162                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7163                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7164
7165         if (!(bp->link_params.speed_cap_mask &
7166                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7167                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7168
7169         if (!(bp->link_params.speed_cap_mask &
7170                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7171                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7172
7173         if (!(bp->link_params.speed_cap_mask &
7174                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7175                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7176                                         SUPPORTED_1000baseT_Full);
7177
7178         if (!(bp->link_params.speed_cap_mask &
7179                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7180                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7181
7182         if (!(bp->link_params.speed_cap_mask &
7183                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7184                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7185
7186         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7187 }
7188
7189 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7190 {
7191         bp->link_params.req_duplex = DUPLEX_FULL;
7192
7193         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7194         case PORT_FEATURE_LINK_SPEED_AUTO:
7195                 if (bp->port.supported & SUPPORTED_Autoneg) {
7196                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7197                         bp->port.advertising = bp->port.supported;
7198                 } else {
7199                         u32 ext_phy_type =
7200                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7201
7202                         if ((ext_phy_type ==
7203                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7204                             (ext_phy_type ==
7205                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7206                                 /* force 10G, no AN */
7207                                 bp->link_params.req_line_speed = SPEED_10000;
7208                                 bp->port.advertising =
7209                                                 (ADVERTISED_10000baseT_Full |
7210                                                  ADVERTISED_FIBRE);
7211                                 break;
7212                         }
7213                         BNX2X_ERR("NVRAM config error. "
7214                                   "Invalid link_config 0x%x"
7215                                   "  Autoneg not supported\n",
7216                                   bp->port.link_config);
7217                         return;
7218                 }
7219                 break;
7220
7221         case PORT_FEATURE_LINK_SPEED_10M_FULL:
7222                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7223                         bp->link_params.req_line_speed = SPEED_10;
7224                         bp->port.advertising = (ADVERTISED_10baseT_Full |
7225                                                 ADVERTISED_TP);
7226                 } else {
7227                         BNX2X_ERR("NVRAM config error. "
7228                                   "Invalid link_config 0x%x"
7229                                   "  speed_cap_mask 0x%x\n",
7230                                   bp->port.link_config,
7231                                   bp->link_params.speed_cap_mask);
7232                         return;
7233                 }
7234                 break;
7235
7236         case PORT_FEATURE_LINK_SPEED_10M_HALF:
7237                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7238                         bp->link_params.req_line_speed = SPEED_10;
7239                         bp->link_params.req_duplex = DUPLEX_HALF;
7240                         bp->port.advertising = (ADVERTISED_10baseT_Half |
7241                                                 ADVERTISED_TP);
7242                 } else {
7243                         BNX2X_ERR("NVRAM config error. "
7244                                   "Invalid link_config 0x%x"
7245                                   "  speed_cap_mask 0x%x\n",
7246                                   bp->port.link_config,
7247                                   bp->link_params.speed_cap_mask);
7248                         return;
7249                 }
7250                 break;
7251
7252         case PORT_FEATURE_LINK_SPEED_100M_FULL:
7253                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7254                         bp->link_params.req_line_speed = SPEED_100;
7255                         bp->port.advertising = (ADVERTISED_100baseT_Full |
7256                                                 ADVERTISED_TP);
7257                 } else {
7258                         BNX2X_ERR("NVRAM config error. "
7259                                   "Invalid link_config 0x%x"
7260                                   "  speed_cap_mask 0x%x\n",
7261                                   bp->port.link_config,
7262                                   bp->link_params.speed_cap_mask);
7263                         return;
7264                 }
7265                 break;
7266
7267         case PORT_FEATURE_LINK_SPEED_100M_HALF:
7268                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7269                         bp->link_params.req_line_speed = SPEED_100;
7270                         bp->link_params.req_duplex = DUPLEX_HALF;
7271                         bp->port.advertising = (ADVERTISED_100baseT_Half |
7272                                                 ADVERTISED_TP);
7273                 } else {
7274                         BNX2X_ERR("NVRAM config error. "
7275                                   "Invalid link_config 0x%x"
7276                                   "  speed_cap_mask 0x%x\n",
7277                                   bp->port.link_config,
7278                                   bp->link_params.speed_cap_mask);
7279                         return;
7280                 }
7281                 break;
7282
7283         case PORT_FEATURE_LINK_SPEED_1G:
7284                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7285                         bp->link_params.req_line_speed = SPEED_1000;
7286                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
7287                                                 ADVERTISED_TP);
7288                 } else {
7289                         BNX2X_ERR("NVRAM config error. "
7290                                   "Invalid link_config 0x%x"
7291                                   "  speed_cap_mask 0x%x\n",
7292                                   bp->port.link_config,
7293                                   bp->link_params.speed_cap_mask);
7294                         return;
7295                 }
7296                 break;
7297
7298         case PORT_FEATURE_LINK_SPEED_2_5G:
7299                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7300                         bp->link_params.req_line_speed = SPEED_2500;
7301                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
7302                                                 ADVERTISED_TP);
7303                 } else {
7304                         BNX2X_ERR("NVRAM config error. "
7305                                   "Invalid link_config 0x%x"
7306                                   "  speed_cap_mask 0x%x\n",
7307                                   bp->port.link_config,
7308                                   bp->link_params.speed_cap_mask);
7309                         return;
7310                 }
7311                 break;
7312
7313         case PORT_FEATURE_LINK_SPEED_10G_CX4:
7314         case PORT_FEATURE_LINK_SPEED_10G_KX4:
7315         case PORT_FEATURE_LINK_SPEED_10G_KR:
7316                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7317                         bp->link_params.req_line_speed = SPEED_10000;
7318                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
7319                                                 ADVERTISED_FIBRE);
7320                 } else {
7321                         BNX2X_ERR("NVRAM config error. "
7322                                   "Invalid link_config 0x%x"
7323                                   "  speed_cap_mask 0x%x\n",
7324                                   bp->port.link_config,
7325                                   bp->link_params.speed_cap_mask);
7326                         return;
7327                 }
7328                 break;
7329
7330         default:
7331                 BNX2X_ERR("NVRAM config error. "
7332                           "BAD link speed link_config 0x%x\n",
7333                           bp->port.link_config);
7334                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7335                 bp->port.advertising = bp->port.supported;
7336                 break;
7337         }
7338
7339         bp->link_params.req_flow_ctrl = (bp->port.link_config &
7340                                          PORT_FEATURE_FLOW_CONTROL_MASK);
7341         if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
7342             !(bp->port.supported & SUPPORTED_Autoneg))
7343                 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
7344
7345         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
7346                        "  advertising 0x%x\n",
7347                        bp->link_params.req_line_speed,
7348                        bp->link_params.req_duplex,
7349                        bp->link_params.req_flow_ctrl, bp->port.advertising);
7350 }
7351
7352 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7353 {
7354         int port = BP_PORT(bp);
7355         u32 val, val2;
7356
7357         bp->link_params.bp = bp;
7358         bp->link_params.port = port;
7359
7360         bp->link_params.serdes_config =
7361                 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7362         bp->link_params.lane_config =
7363                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7364         bp->link_params.ext_phy_config =
7365                 SHMEM_RD(bp,
7366                          dev_info.port_hw_config[port].external_phy_config);
7367         bp->link_params.speed_cap_mask =
7368                 SHMEM_RD(bp,
7369                          dev_info.port_hw_config[port].speed_capability_mask);
7370
7371         bp->port.link_config =
7372                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7373
7374         BNX2X_DEV_INFO("serdes_config 0x%08x  lane_config 0x%08x\n"
7375              KERN_INFO "  ext_phy_config 0x%08x  speed_cap_mask 0x%08x"
7376                        "  link_config 0x%08x\n",
7377                        bp->link_params.serdes_config,
7378                        bp->link_params.lane_config,
7379                        bp->link_params.ext_phy_config,
7380                        bp->link_params.speed_cap_mask, bp->port.link_config);
7381
7382         bp->link_params.switch_cfg = (bp->port.link_config &
7383                                       PORT_FEATURE_CONNECTED_SWITCH_MASK);
7384         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7385
7386         bnx2x_link_settings_requested(bp);
7387
7388         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7389         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7390         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7391         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7392         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7393         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7394         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
7395         bp->dev->dev_addr[5] = (u8)(val & 0xff);
7396         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7397         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7398 }
7399
7400 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7401 {
7402         int func = BP_FUNC(bp);
7403         u32 val, val2;
7404         int rc = 0;
7405
7406         bnx2x_get_common_hwinfo(bp);
7407
7408         bp->e1hov = 0;
7409         bp->e1hmf = 0;
7410         if (CHIP_IS_E1H(bp)) {
7411                 bp->mf_config =
7412                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7413
7414                 val =
7415                    (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7416                     FUNC_MF_CFG_E1HOV_TAG_MASK);
7417                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7418
7419                         bp->e1hov = val;
7420                         bp->e1hmf = 1;
7421                         BNX2X_DEV_INFO("MF mode  E1HOV for func %d is %d "
7422                                        "(0x%04x)\n",
7423                                        func, bp->e1hov, bp->e1hov);
7424                 } else {
7425                         BNX2X_DEV_INFO("Single function mode\n");
7426                         if (BP_E1HVN(bp)) {
7427                                 BNX2X_ERR("!!!  No valid E1HOV for func %d,"
7428                                           "  aborting\n", func);
7429                                 rc = -EPERM;
7430                         }
7431                 }
7432         }
7433
7434         if (!BP_NOMCP(bp)) {
7435                 bnx2x_get_port_hwinfo(bp);
7436
7437                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7438                               DRV_MSG_SEQ_NUMBER_MASK);
7439                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7440         }
7441
7442         if (IS_E1HMF(bp)) {
7443                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7444                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
7445                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7446                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7447                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7448                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7449                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7450                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7451                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
7452                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
7453                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7454                                ETH_ALEN);
7455                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7456                                ETH_ALEN);
7457                 }
7458
7459                 return rc;
7460         }
7461
7462         if (BP_NOMCP(bp)) {
7463                 /* only supposed to happen on emulation/FPGA */
7464                 BNX2X_ERR("warning rendom MAC workaround active\n");
7465                 random_ether_addr(bp->dev->dev_addr);
7466                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7467         }
7468
7469         return rc;
7470 }
7471
7472 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7473 {
7474         int func = BP_FUNC(bp);
7475         int rc;
7476
7477         /* Disable interrupt handling until HW is initialized */
7478         atomic_set(&bp->intr_sem, 1);
7479
7480         mutex_init(&bp->port.phy_mutex);
7481
7482         INIT_WORK(&bp->sp_task, bnx2x_sp_task);
7483         INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7484
7485         rc = bnx2x_get_hwinfo(bp);
7486
7487         /* need to reset chip if undi was active */
7488         if (!BP_NOMCP(bp))
7489                 bnx2x_undi_unload(bp);
7490
7491         if (CHIP_REV_IS_FPGA(bp))
7492                 printk(KERN_ERR PFX "FPGA detected\n");
7493
7494         if (BP_NOMCP(bp) && (func == 0))
7495                 printk(KERN_ERR PFX
7496                        "MCP disabled, must load devices in order!\n");
7497
7498         /* Set TPA flags */
7499         if (disable_tpa) {
7500                 bp->flags &= ~TPA_ENABLE_FLAG;
7501                 bp->dev->features &= ~NETIF_F_LRO;
7502         } else {
7503                 bp->flags |= TPA_ENABLE_FLAG;
7504                 bp->dev->features |= NETIF_F_LRO;
7505         }
7506
7507
7508         bp->tx_ring_size = MAX_TX_AVAIL;
7509         bp->rx_ring_size = MAX_RX_AVAIL;
7510
7511         bp->rx_csum = 1;
7512         bp->rx_offset = 0;
7513
7514         bp->tx_ticks = 50;
7515         bp->rx_ticks = 25;
7516
7517         bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7518         bp->current_interval = (poll ? poll : bp->timer_interval);
7519
7520         init_timer(&bp->timer);
7521         bp->timer.expires = jiffies + bp->current_interval;
7522         bp->timer.data = (unsigned long) bp;
7523         bp->timer.function = bnx2x_timer;
7524
7525         return rc;
7526 }
7527
7528 /*
7529  * ethtool service functions
7530  */
7531
7532 /* All ethtool functions called with rtnl_lock */
7533
7534 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7535 {
7536         struct bnx2x *bp = netdev_priv(dev);
7537
7538         cmd->supported = bp->port.supported;
7539         cmd->advertising = bp->port.advertising;
7540
7541         if (netif_carrier_ok(dev)) {
7542                 cmd->speed = bp->link_vars.line_speed;
7543                 cmd->duplex = bp->link_vars.duplex;
7544         } else {
7545                 cmd->speed = bp->link_params.req_line_speed;
7546                 cmd->duplex = bp->link_params.req_duplex;
7547         }
7548         if (IS_E1HMF(bp)) {
7549                 u16 vn_max_rate;
7550
7551                 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7552                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7553                 if (vn_max_rate < cmd->speed)
7554                         cmd->speed = vn_max_rate;
7555         }
7556
7557         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7558                 u32 ext_phy_type =
7559                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7560
7561                 switch (ext_phy_type) {
7562                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7563                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7564                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7565                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7566                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7567                         cmd->port = PORT_FIBRE;
7568                         break;
7569
7570                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7571                         cmd->port = PORT_TP;
7572                         break;
7573
7574                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7575                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7576                                   bp->link_params.ext_phy_config);
7577                         break;
7578
7579                 default:
7580                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7581                            bp->link_params.ext_phy_config);
7582                         break;
7583                 }
7584         } else
7585                 cmd->port = PORT_TP;
7586
7587         cmd->phy_address = bp->port.phy_addr;
7588         cmd->transceiver = XCVR_INTERNAL;
7589
7590         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
7591                 cmd->autoneg = AUTONEG_ENABLE;
7592         else
7593                 cmd->autoneg = AUTONEG_DISABLE;
7594
7595         cmd->maxtxpkt = 0;
7596         cmd->maxrxpkt = 0;
7597
7598         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7599            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
7600            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
7601            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
7602            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7603            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7604            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7605
7606         return 0;
7607 }
7608
7609 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7610 {
7611         struct bnx2x *bp = netdev_priv(dev);
7612         u32 advertising;
7613
7614         if (IS_E1HMF(bp))
7615                 return 0;
7616
7617         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7618            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
7619            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
7620            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
7621            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7622            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7623            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7624
7625         if (cmd->autoneg == AUTONEG_ENABLE) {
7626                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7627                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
7628                         return -EINVAL;
7629                 }
7630
7631                 /* advertise the requested speed and duplex if supported */
7632                 cmd->advertising &= bp->port.supported;
7633
7634                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7635                 bp->link_params.req_duplex = DUPLEX_FULL;
7636                 bp->port.advertising |= (ADVERTISED_Autoneg |
7637                                          cmd->advertising);
7638
7639         } else { /* forced speed */
7640                 /* advertise the requested speed and duplex if supported */
7641                 switch (cmd->speed) {
7642                 case SPEED_10:
7643                         if (cmd->duplex == DUPLEX_FULL) {
7644                                 if (!(bp->port.supported &
7645                                       SUPPORTED_10baseT_Full)) {
7646                                         DP(NETIF_MSG_LINK,
7647                                            "10M full not supported\n");
7648                                         return -EINVAL;
7649                                 }
7650
7651                                 advertising = (ADVERTISED_10baseT_Full |
7652                                                ADVERTISED_TP);
7653                         } else {
7654                                 if (!(bp->port.supported &
7655                                       SUPPORTED_10baseT_Half)) {
7656                                         DP(NETIF_MSG_LINK,
7657                                            "10M half not supported\n");
7658                                         return -EINVAL;
7659                                 }
7660
7661                                 advertising = (ADVERTISED_10baseT_Half |
7662                                                ADVERTISED_TP);
7663                         }
7664                         break;
7665
7666                 case SPEED_100:
7667                         if (cmd->duplex == DUPLEX_FULL) {
7668                                 if (!(bp->port.supported &
7669                                                 SUPPORTED_100baseT_Full)) {
7670                                         DP(NETIF_MSG_LINK,
7671                                            "100M full not supported\n");
7672                                         return -EINVAL;
7673                                 }
7674
7675                                 advertising = (ADVERTISED_100baseT_Full |
7676                                                ADVERTISED_TP);
7677                         } else {
7678                                 if (!(bp->port.supported &
7679                                                 SUPPORTED_100baseT_Half)) {
7680                                         DP(NETIF_MSG_LINK,
7681                                            "100M half not supported\n");
7682                                         return -EINVAL;
7683                                 }
7684
7685                                 advertising = (ADVERTISED_100baseT_Half |
7686                                                ADVERTISED_TP);
7687                         }
7688                         break;
7689
7690                 case SPEED_1000:
7691                         if (cmd->duplex != DUPLEX_FULL) {
7692                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
7693                                 return -EINVAL;
7694                         }
7695
7696                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
7697                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
7698                                 return -EINVAL;
7699                         }
7700
7701                         advertising = (ADVERTISED_1000baseT_Full |
7702                                        ADVERTISED_TP);
7703                         break;
7704
7705                 case SPEED_2500:
7706                         if (cmd->duplex != DUPLEX_FULL) {
7707                                 DP(NETIF_MSG_LINK,
7708                                    "2.5G half not supported\n");
7709                                 return -EINVAL;
7710                         }
7711
7712                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
7713                                 DP(NETIF_MSG_LINK,
7714                                    "2.5G full not supported\n");
7715                                 return -EINVAL;
7716                         }
7717
7718                         advertising = (ADVERTISED_2500baseX_Full |
7719                                        ADVERTISED_TP);
7720                         break;
7721
7722                 case SPEED_10000:
7723                         if (cmd->duplex != DUPLEX_FULL) {
7724                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
7725                                 return -EINVAL;
7726                         }
7727
7728                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
7729                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
7730                                 return -EINVAL;
7731                         }
7732
7733                         advertising = (ADVERTISED_10000baseT_Full |
7734                                        ADVERTISED_FIBRE);
7735                         break;
7736
7737                 default:
7738                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
7739                         return -EINVAL;
7740                 }
7741
7742                 bp->link_params.req_line_speed = cmd->speed;
7743                 bp->link_params.req_duplex = cmd->duplex;
7744                 bp->port.advertising = advertising;
7745         }
7746
7747         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
7748            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
7749            bp->link_params.req_line_speed, bp->link_params.req_duplex,
7750            bp->port.advertising);
7751
7752         if (netif_running(dev)) {
7753                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7754                 bnx2x_link_set(bp);
7755         }
7756
7757         return 0;
7758 }
7759
7760 #define PHY_FW_VER_LEN                  10
7761
7762 static void bnx2x_get_drvinfo(struct net_device *dev,
7763                               struct ethtool_drvinfo *info)
7764 {
7765         struct bnx2x *bp = netdev_priv(dev);
7766         u8 phy_fw_ver[PHY_FW_VER_LEN];
7767
7768         strcpy(info->driver, DRV_MODULE_NAME);
7769         strcpy(info->version, DRV_MODULE_VERSION);
7770
7771         phy_fw_ver[0] = '\0';
7772         if (bp->port.pmf) {
7773                 bnx2x_acquire_phy_lock(bp);
7774                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7775                                              (bp->state != BNX2X_STATE_CLOSED),
7776                                              phy_fw_ver, PHY_FW_VER_LEN);
7777                 bnx2x_release_phy_lock(bp);
7778         }
7779
7780         snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7781                  (bp->common.bc_ver & 0xff0000) >> 16,
7782                  (bp->common.bc_ver & 0xff00) >> 8,
7783                  (bp->common.bc_ver & 0xff),
7784                  ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
7785         strcpy(info->bus_info, pci_name(bp->pdev));
7786         info->n_stats = BNX2X_NUM_STATS;
7787         info->testinfo_len = BNX2X_NUM_TESTS;
7788         info->eedump_len = bp->common.flash_size;
7789         info->regdump_len = 0;
7790 }
7791
7792 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7793 {
7794         struct bnx2x *bp = netdev_priv(dev);
7795
7796         if (bp->flags & NO_WOL_FLAG) {
7797                 wol->supported = 0;
7798                 wol->wolopts = 0;
7799         } else {
7800                 wol->supported = WAKE_MAGIC;
7801                 if (bp->wol)
7802                         wol->wolopts = WAKE_MAGIC;
7803                 else
7804                         wol->wolopts = 0;
7805         }
7806         memset(&wol->sopass, 0, sizeof(wol->sopass));
7807 }
7808
7809 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7810 {
7811         struct bnx2x *bp = netdev_priv(dev);
7812
7813         if (wol->wolopts & ~WAKE_MAGIC)
7814                 return -EINVAL;
7815
7816         if (wol->wolopts & WAKE_MAGIC) {
7817                 if (bp->flags & NO_WOL_FLAG)
7818                         return -EINVAL;
7819
7820                 bp->wol = 1;
7821         } else
7822                 bp->wol = 0;
7823
7824         return 0;
7825 }
7826
7827 static u32 bnx2x_get_msglevel(struct net_device *dev)
7828 {
7829         struct bnx2x *bp = netdev_priv(dev);
7830
7831         return bp->msglevel;
7832 }
7833
7834 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7835 {
7836         struct bnx2x *bp = netdev_priv(dev);
7837
7838         if (capable(CAP_NET_ADMIN))
7839                 bp->msglevel = level;
7840 }
7841
7842 static int bnx2x_nway_reset(struct net_device *dev)
7843 {
7844         struct bnx2x *bp = netdev_priv(dev);
7845
7846         if (!bp->port.pmf)
7847                 return 0;
7848
7849         if (netif_running(dev)) {
7850                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7851                 bnx2x_link_set(bp);
7852         }
7853
7854         return 0;
7855 }
7856
7857 static int bnx2x_get_eeprom_len(struct net_device *dev)
7858 {
7859         struct bnx2x *bp = netdev_priv(dev);
7860
7861         return bp->common.flash_size;
7862 }
7863
7864 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7865 {
7866         int port = BP_PORT(bp);
7867         int count, i;
7868         u32 val = 0;
7869
7870         /* adjust timeout for emulation/FPGA */
7871         count = NVRAM_TIMEOUT_COUNT;
7872         if (CHIP_REV_IS_SLOW(bp))
7873                 count *= 100;
7874
7875         /* request access to nvram interface */
7876         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7877                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7878
7879         for (i = 0; i < count*10; i++) {
7880                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7881                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7882                         break;
7883
7884                 udelay(5);
7885         }
7886
7887         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
7888                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
7889                 return -EBUSY;
7890         }
7891
7892         return 0;
7893 }
7894
7895 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7896 {
7897         int port = BP_PORT(bp);
7898         int count, i;
7899         u32 val = 0;
7900
7901         /* adjust timeout for emulation/FPGA */
7902         count = NVRAM_TIMEOUT_COUNT;
7903         if (CHIP_REV_IS_SLOW(bp))
7904                 count *= 100;
7905
7906         /* relinquish nvram interface */
7907         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7908                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7909
7910         for (i = 0; i < count*10; i++) {
7911                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7912                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7913                         break;
7914
7915                 udelay(5);
7916         }
7917
7918         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
7919                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
7920                 return -EBUSY;
7921         }
7922
7923         return 0;
7924 }
7925
7926 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7927 {
7928         u32 val;
7929
7930         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7931
7932         /* enable both bits, even on read */
7933         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7934                (val | MCPR_NVM_ACCESS_ENABLE_EN |
7935                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
7936 }
7937
7938 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7939 {
7940         u32 val;
7941
7942         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7943
7944         /* disable both bits, even after read */
7945         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7946                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7947                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7948 }
7949
7950 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7951                                   u32 cmd_flags)
7952 {
7953         int count, i, rc;
7954         u32 val;
7955
7956         /* build the command word */
7957         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
7958
7959         /* need to clear DONE bit separately */
7960         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7961
7962         /* address of the NVRAM to read from */
7963         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7964                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7965
7966         /* issue a read command */
7967         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7968
7969         /* adjust timeout for emulation/FPGA */
7970         count = NVRAM_TIMEOUT_COUNT;
7971         if (CHIP_REV_IS_SLOW(bp))
7972                 count *= 100;
7973
7974         /* wait for completion */
7975         *ret_val = 0;
7976         rc = -EBUSY;
7977         for (i = 0; i < count; i++) {
7978                 udelay(5);
7979                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
7980
7981                 if (val & MCPR_NVM_COMMAND_DONE) {
7982                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
7983                         /* we read nvram data in cpu order
7984                          * but ethtool sees it as an array of bytes
7985                          * converting to big-endian will do the work */
7986                         val = cpu_to_be32(val);
7987                         *ret_val = val;
7988                         rc = 0;
7989                         break;
7990                 }
7991         }
7992
7993         return rc;
7994 }
7995
7996 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
7997                             int buf_size)
7998 {
7999         int rc;
8000         u32 cmd_flags;
8001         u32 val;
8002
8003         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8004                 DP(BNX2X_MSG_NVM,
8005                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8006                    offset, buf_size);
8007                 return -EINVAL;
8008         }
8009
8010         if (offset + buf_size > bp->common.flash_size) {
8011                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8012                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8013                    offset, buf_size, bp->common.flash_size);
8014                 return -EINVAL;
8015         }
8016
8017         /* request access to nvram interface */
8018         rc = bnx2x_acquire_nvram_lock(bp);
8019         if (rc)
8020                 return rc;
8021
8022         /* enable access to nvram interface */
8023         bnx2x_enable_nvram_access(bp);
8024
8025         /* read the first word(s) */
8026         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8027         while ((buf_size > sizeof(u32)) && (rc == 0)) {
8028                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8029                 memcpy(ret_buf, &val, 4);
8030
8031                 /* advance to the next dword */
8032                 offset += sizeof(u32);
8033                 ret_buf += sizeof(u32);
8034                 buf_size -= sizeof(u32);
8035                 cmd_flags = 0;
8036         }
8037
8038         if (rc == 0) {
8039                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8040                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8041                 memcpy(ret_buf, &val, 4);
8042         }
8043
8044         /* disable access to nvram interface */
8045         bnx2x_disable_nvram_access(bp);
8046         bnx2x_release_nvram_lock(bp);
8047
8048         return rc;
8049 }
8050
8051 static int bnx2x_get_eeprom(struct net_device *dev,
8052                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8053 {
8054         struct bnx2x *bp = netdev_priv(dev);
8055         int rc;
8056
8057         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8058            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8059            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8060            eeprom->len, eeprom->len);
8061
8062         /* parameters already validated in ethtool_get_eeprom */
8063
8064         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8065
8066         return rc;
8067 }
8068
8069 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8070                                    u32 cmd_flags)
8071 {
8072         int count, i, rc;
8073
8074         /* build the command word */
8075         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8076
8077         /* need to clear DONE bit separately */
8078         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8079
8080         /* write the data */
8081         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8082
8083         /* address of the NVRAM to write to */
8084         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8085                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8086
8087         /* issue the write command */
8088         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8089
8090         /* adjust timeout for emulation/FPGA */
8091         count = NVRAM_TIMEOUT_COUNT;
8092         if (CHIP_REV_IS_SLOW(bp))
8093                 count *= 100;
8094
8095         /* wait for completion */
8096         rc = -EBUSY;
8097         for (i = 0; i < count; i++) {
8098                 udelay(5);
8099                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8100                 if (val & MCPR_NVM_COMMAND_DONE) {
8101                         rc = 0;
8102                         break;
8103                 }
8104         }
8105
8106         return rc;
8107 }
8108
8109 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
8110
8111 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8112                               int buf_size)
8113 {
8114         int rc;
8115         u32 cmd_flags;
8116         u32 align_offset;
8117         u32 val;
8118
8119         if (offset + buf_size > bp->common.flash_size) {
8120                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8121                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8122                    offset, buf_size, bp->common.flash_size);
8123                 return -EINVAL;
8124         }
8125
8126         /* request access to nvram interface */
8127         rc = bnx2x_acquire_nvram_lock(bp);
8128         if (rc)
8129                 return rc;
8130
8131         /* enable access to nvram interface */
8132         bnx2x_enable_nvram_access(bp);
8133
8134         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8135         align_offset = (offset & ~0x03);
8136         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8137
8138         if (rc == 0) {
8139                 val &= ~(0xff << BYTE_OFFSET(offset));
8140                 val |= (*data_buf << BYTE_OFFSET(offset));
8141
8142                 /* nvram data is returned as an array of bytes
8143                  * convert it back to cpu order */
8144                 val = be32_to_cpu(val);
8145
8146                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8147                                              cmd_flags);
8148         }
8149
8150         /* disable access to nvram interface */
8151         bnx2x_disable_nvram_access(bp);
8152         bnx2x_release_nvram_lock(bp);
8153
8154         return rc;
8155 }
8156
8157 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8158                              int buf_size)
8159 {
8160         int rc;
8161         u32 cmd_flags;
8162         u32 val;
8163         u32 written_so_far;
8164
8165         if (buf_size == 1)      /* ethtool */
8166                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8167
8168         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8169                 DP(BNX2X_MSG_NVM,
8170                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8171                    offset, buf_size);
8172                 return -EINVAL;
8173         }
8174
8175         if (offset + buf_size > bp->common.flash_size) {
8176                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8177                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8178                    offset, buf_size, bp->common.flash_size);
8179                 return -EINVAL;
8180         }
8181
8182         /* request access to nvram interface */
8183         rc = bnx2x_acquire_nvram_lock(bp);
8184         if (rc)
8185                 return rc;
8186
8187         /* enable access to nvram interface */
8188         bnx2x_enable_nvram_access(bp);
8189
8190         written_so_far = 0;
8191         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8192         while ((written_so_far < buf_size) && (rc == 0)) {
8193                 if (written_so_far == (buf_size - sizeof(u32)))
8194                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8195                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8196                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8197                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8198                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8199
8200                 memcpy(&val, data_buf, 4);
8201
8202                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8203
8204                 /* advance to the next dword */
8205                 offset += sizeof(u32);
8206                 data_buf += sizeof(u32);
8207                 written_so_far += sizeof(u32);
8208                 cmd_flags = 0;
8209         }
8210
8211         /* disable access to nvram interface */
8212         bnx2x_disable_nvram_access(bp);
8213         bnx2x_release_nvram_lock(bp);
8214
8215         return rc;
8216 }
8217
8218 static int bnx2x_set_eeprom(struct net_device *dev,
8219                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8220 {
8221         struct bnx2x *bp = netdev_priv(dev);
8222         int rc;
8223
8224         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8225            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8226            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8227            eeprom->len, eeprom->len);
8228
8229         /* parameters already validated in ethtool_set_eeprom */
8230
8231         /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8232         if (eeprom->magic == 0x00504859)
8233                 if (bp->port.pmf) {
8234
8235                         bnx2x_acquire_phy_lock(bp);
8236                         rc = bnx2x_flash_download(bp, BP_PORT(bp),
8237                                              bp->link_params.ext_phy_config,
8238                                              (bp->state != BNX2X_STATE_CLOSED),
8239                                              eebuf, eeprom->len);
8240                         if ((bp->state == BNX2X_STATE_OPEN) ||
8241                             (bp->state == BNX2X_STATE_DISABLED)) {
8242                                 rc |= bnx2x_link_reset(&bp->link_params,
8243                                                        &bp->link_vars);
8244                                 rc |= bnx2x_phy_init(&bp->link_params,
8245                                                      &bp->link_vars);
8246                         }
8247                         bnx2x_release_phy_lock(bp);
8248
8249                 } else /* Only the PMF can access the PHY */
8250                         return -EINVAL;
8251         else
8252                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8253
8254         return rc;
8255 }
8256
8257 static int bnx2x_get_coalesce(struct net_device *dev,
8258                               struct ethtool_coalesce *coal)
8259 {
8260         struct bnx2x *bp = netdev_priv(dev);
8261
8262         memset(coal, 0, sizeof(struct ethtool_coalesce));
8263
8264         coal->rx_coalesce_usecs = bp->rx_ticks;
8265         coal->tx_coalesce_usecs = bp->tx_ticks;
8266
8267         return 0;
8268 }
8269
8270 static int bnx2x_set_coalesce(struct net_device *dev,
8271                               struct ethtool_coalesce *coal)
8272 {
8273         struct bnx2x *bp = netdev_priv(dev);
8274
8275         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8276         if (bp->rx_ticks > 3000)
8277                 bp->rx_ticks = 3000;
8278
8279         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8280         if (bp->tx_ticks > 0x3000)
8281                 bp->tx_ticks = 0x3000;
8282
8283         if (netif_running(dev))
8284                 bnx2x_update_coalesce(bp);
8285
8286         return 0;
8287 }
8288
8289 static void bnx2x_get_ringparam(struct net_device *dev,
8290                                 struct ethtool_ringparam *ering)
8291 {
8292         struct bnx2x *bp = netdev_priv(dev);
8293
8294         ering->rx_max_pending = MAX_RX_AVAIL;
8295         ering->rx_mini_max_pending = 0;
8296         ering->rx_jumbo_max_pending = 0;
8297
8298         ering->rx_pending = bp->rx_ring_size;
8299         ering->rx_mini_pending = 0;
8300         ering->rx_jumbo_pending = 0;
8301
8302         ering->tx_max_pending = MAX_TX_AVAIL;
8303         ering->tx_pending = bp->tx_ring_size;
8304 }
8305
8306 static int bnx2x_set_ringparam(struct net_device *dev,
8307                                struct ethtool_ringparam *ering)
8308 {
8309         struct bnx2x *bp = netdev_priv(dev);
8310         int rc = 0;
8311
8312         if ((ering->rx_pending > MAX_RX_AVAIL) ||
8313             (ering->tx_pending > MAX_TX_AVAIL) ||
8314             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8315                 return -EINVAL;
8316
8317         bp->rx_ring_size = ering->rx_pending;
8318         bp->tx_ring_size = ering->tx_pending;
8319
8320         if (netif_running(dev)) {
8321                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8322                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8323         }
8324
8325         return rc;
8326 }
8327
8328 static void bnx2x_get_pauseparam(struct net_device *dev,
8329                                  struct ethtool_pauseparam *epause)
8330 {
8331         struct bnx2x *bp = netdev_priv(dev);
8332
8333         epause->autoneg = (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
8334                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8335
8336         epause->rx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_RX) ==
8337                             FLOW_CTRL_RX);
8338         epause->tx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_TX) ==
8339                             FLOW_CTRL_TX);
8340
8341         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8342            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
8343            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8344 }
8345
8346 static int bnx2x_set_pauseparam(struct net_device *dev,
8347                                 struct ethtool_pauseparam *epause)
8348 {
8349         struct bnx2x *bp = netdev_priv(dev);
8350
8351         if (IS_E1HMF(bp))
8352                 return 0;
8353
8354         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8355            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
8356            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8357
8358         bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8359
8360         if (epause->rx_pause)
8361                 bp->link_params.req_flow_ctrl |= FLOW_CTRL_RX;
8362
8363         if (epause->tx_pause)
8364                 bp->link_params.req_flow_ctrl |= FLOW_CTRL_TX;
8365
8366         if (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO)
8367                 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
8368
8369         if (epause->autoneg) {
8370                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8371                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8372                         return -EINVAL;
8373                 }
8374
8375                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8376                         bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8377         }
8378
8379         DP(NETIF_MSG_LINK,
8380            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
8381
8382         if (netif_running(dev)) {
8383                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8384                 bnx2x_link_set(bp);
8385         }
8386
8387         return 0;
8388 }
8389
8390 static int bnx2x_set_flags(struct net_device *dev, u32 data)
8391 {
8392         struct bnx2x *bp = netdev_priv(dev);
8393         int changed = 0;
8394         int rc = 0;
8395
8396         /* TPA requires Rx CSUM offloading */
8397         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8398                 if (!(dev->features & NETIF_F_LRO)) {
8399                         dev->features |= NETIF_F_LRO;
8400                         bp->flags |= TPA_ENABLE_FLAG;
8401                         changed = 1;
8402                 }
8403
8404         } else if (dev->features & NETIF_F_LRO) {
8405                 dev->features &= ~NETIF_F_LRO;
8406                 bp->flags &= ~TPA_ENABLE_FLAG;
8407                 changed = 1;
8408         }
8409
8410         if (changed && netif_running(dev)) {
8411                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8412                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8413         }
8414
8415         return rc;
8416 }
8417
8418 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8419 {
8420         struct bnx2x *bp = netdev_priv(dev);
8421
8422         return bp->rx_csum;
8423 }
8424
8425 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8426 {
8427         struct bnx2x *bp = netdev_priv(dev);
8428         int rc = 0;
8429
8430         bp->rx_csum = data;
8431
8432         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8433            TPA'ed packets will be discarded due to wrong TCP CSUM */
8434         if (!data) {
8435                 u32 flags = ethtool_op_get_flags(dev);
8436
8437                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8438         }
8439
8440         return rc;
8441 }
8442
8443 static int bnx2x_set_tso(struct net_device *dev, u32 data)
8444 {
8445         if (data) {
8446                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8447                 dev->features |= NETIF_F_TSO6;
8448         } else {
8449                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8450                 dev->features &= ~NETIF_F_TSO6;
8451         }
8452
8453         return 0;
8454 }
8455
8456 static const struct {
8457         char string[ETH_GSTRING_LEN];
8458 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8459         { "register_test (offline)" },
8460         { "memory_test (offline)" },
8461         { "loopback_test (offline)" },
8462         { "nvram_test (online)" },
8463         { "interrupt_test (online)" },
8464         { "link_test (online)" },
8465         { "idle check (online)" },
8466         { "MC errors (online)" }
8467 };
8468
8469 static int bnx2x_self_test_count(struct net_device *dev)
8470 {
8471         return BNX2X_NUM_TESTS;
8472 }
8473
8474 static int bnx2x_test_registers(struct bnx2x *bp)
8475 {
8476         int idx, i, rc = -ENODEV;
8477         u32 wr_val = 0;
8478         int port = BP_PORT(bp);
8479         static const struct {
8480                 u32  offset0;
8481                 u32  offset1;
8482                 u32  mask;
8483         } reg_tbl[] = {
8484 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
8485                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
8486                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
8487                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
8488                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
8489                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
8490                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
8491                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
8492                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
8493                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
8494 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
8495                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
8496                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
8497                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
8498                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
8499                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8500                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
8501                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
8502                 { NIG_REG_EGRESS_MNG0_FIFO,           20, 0xffffffff },
8503                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
8504 /* 20 */        { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
8505                 { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
8506                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
8507                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
8508                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
8509                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
8510                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
8511                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
8512                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
8513                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
8514 /* 30 */        { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
8515                 { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
8516                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
8517                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
8518                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8519                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
8520                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8521                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
8522
8523                 { 0xffffffff, 0, 0x00000000 }
8524         };
8525
8526         if (!netif_running(bp->dev))
8527                 return rc;
8528
8529         /* Repeat the test twice:
8530            First by writing 0x00000000, second by writing 0xffffffff */
8531         for (idx = 0; idx < 2; idx++) {
8532
8533                 switch (idx) {
8534                 case 0:
8535                         wr_val = 0;
8536                         break;
8537                 case 1:
8538                         wr_val = 0xffffffff;
8539                         break;
8540                 }
8541
8542                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8543                         u32 offset, mask, save_val, val;
8544
8545                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8546                         mask = reg_tbl[i].mask;
8547
8548                         save_val = REG_RD(bp, offset);
8549
8550                         REG_WR(bp, offset, wr_val);
8551                         val = REG_RD(bp, offset);
8552
8553                         /* Restore the original register's value */
8554                         REG_WR(bp, offset, save_val);
8555
8556                         /* verify that value is as expected value */
8557                         if ((val & mask) != (wr_val & mask))
8558                                 goto test_reg_exit;
8559                 }
8560         }
8561
8562         rc = 0;
8563
8564 test_reg_exit:
8565         return rc;
8566 }
8567
8568 static int bnx2x_test_memory(struct bnx2x *bp)
8569 {
8570         int i, j, rc = -ENODEV;
8571         u32 val;
8572         static const struct {
8573                 u32 offset;
8574                 int size;
8575         } mem_tbl[] = {
8576                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
8577                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8578                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
8579                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
8580                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
8581                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
8582                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
8583
8584                 { 0xffffffff, 0 }
8585         };
8586         static const struct {
8587                 char *name;
8588                 u32 offset;
8589                 u32 e1_mask;
8590                 u32 e1h_mask;
8591         } prty_tbl[] = {
8592                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
8593                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
8594                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
8595                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
8596                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
8597                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
8598
8599                 { NULL, 0xffffffff, 0, 0 }
8600         };
8601
8602         if (!netif_running(bp->dev))
8603                 return rc;
8604
8605         /* Go through all the memories */
8606         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8607                 for (j = 0; j < mem_tbl[i].size; j++)
8608                         REG_RD(bp, mem_tbl[i].offset + j*4);
8609
8610         /* Check the parity status */
8611         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8612                 val = REG_RD(bp, prty_tbl[i].offset);
8613                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8614                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
8615                         DP(NETIF_MSG_HW,
8616                            "%s is 0x%x\n", prty_tbl[i].name, val);
8617                         goto test_mem_exit;
8618                 }
8619         }
8620
8621         rc = 0;
8622
8623 test_mem_exit:
8624         return rc;
8625 }
8626
8627 static void bnx2x_netif_start(struct bnx2x *bp)
8628 {
8629         int i;
8630
8631         if (atomic_dec_and_test(&bp->intr_sem)) {
8632                 if (netif_running(bp->dev)) {
8633                         bnx2x_int_enable(bp);
8634                         for_each_queue(bp, i)
8635                                 napi_enable(&bnx2x_fp(bp, i, napi));
8636                         if (bp->state == BNX2X_STATE_OPEN)
8637                                 netif_wake_queue(bp->dev);
8638                 }
8639         }
8640 }
8641
8642 static void bnx2x_netif_stop(struct bnx2x *bp)
8643 {
8644         int i;
8645
8646         if (netif_running(bp->dev)) {
8647                 netif_tx_disable(bp->dev);
8648                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
8649                 for_each_queue(bp, i)
8650                         napi_disable(&bnx2x_fp(bp, i, napi));
8651         }
8652         bnx2x_int_disable_sync(bp);
8653 }
8654
8655 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8656 {
8657         int cnt = 1000;
8658
8659         if (link_up)
8660                 while (bnx2x_link_test(bp) && cnt--)
8661                         msleep(10);
8662 }
8663
8664 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8665 {
8666         unsigned int pkt_size, num_pkts, i;
8667         struct sk_buff *skb;
8668         unsigned char *packet;
8669         struct bnx2x_fastpath *fp = &bp->fp[0];
8670         u16 tx_start_idx, tx_idx;
8671         u16 rx_start_idx, rx_idx;
8672         u16 pkt_prod;
8673         struct sw_tx_bd *tx_buf;
8674         struct eth_tx_bd *tx_bd;
8675         dma_addr_t mapping;
8676         union eth_rx_cqe *cqe;
8677         u8 cqe_fp_flags;
8678         struct sw_rx_bd *rx_buf;
8679         u16 len;
8680         int rc = -ENODEV;
8681
8682         if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8683                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
8684                 bnx2x_acquire_phy_lock(bp);
8685                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8686                 bnx2x_release_phy_lock(bp);
8687
8688         } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8689                 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
8690                 bnx2x_acquire_phy_lock(bp);
8691                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
8692                 bnx2x_release_phy_lock(bp);
8693                 /* wait until link state is restored */
8694                 bnx2x_wait_for_link(bp, link_up);
8695
8696         } else
8697                 return -EINVAL;
8698
8699         pkt_size = 1514;
8700         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8701         if (!skb) {
8702                 rc = -ENOMEM;
8703                 goto test_loopback_exit;
8704         }
8705         packet = skb_put(skb, pkt_size);
8706         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8707         memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8708         for (i = ETH_HLEN; i < pkt_size; i++)
8709                 packet[i] = (unsigned char) (i & 0xff);
8710
8711         num_pkts = 0;
8712         tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8713         rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8714
8715         pkt_prod = fp->tx_pkt_prod++;
8716         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8717         tx_buf->first_bd = fp->tx_bd_prod;
8718         tx_buf->skb = skb;
8719
8720         tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8721         mapping = pci_map_single(bp->pdev, skb->data,
8722                                  skb_headlen(skb), PCI_DMA_TODEVICE);
8723         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8724         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8725         tx_bd->nbd = cpu_to_le16(1);
8726         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8727         tx_bd->vlan = cpu_to_le16(pkt_prod);
8728         tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8729                                        ETH_TX_BD_FLAGS_END_BD);
8730         tx_bd->general_data = ((UNICAST_ADDRESS <<
8731                                 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8732
8733         fp->hw_tx_prods->bds_prod =
8734                 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8735         mb(); /* FW restriction: must not reorder writing nbd and packets */
8736         fp->hw_tx_prods->packets_prod =
8737                 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8738         DOORBELL(bp, FP_IDX(fp), 0);
8739
8740         mmiowb();
8741
8742         num_pkts++;
8743         fp->tx_bd_prod++;
8744         bp->dev->trans_start = jiffies;
8745
8746         udelay(100);
8747
8748         tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8749         if (tx_idx != tx_start_idx + num_pkts)
8750                 goto test_loopback_exit;
8751
8752         rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8753         if (rx_idx != rx_start_idx + num_pkts)
8754                 goto test_loopback_exit;
8755
8756         cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8757         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8758         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8759                 goto test_loopback_rx_exit;
8760
8761         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8762         if (len != pkt_size)
8763                 goto test_loopback_rx_exit;
8764
8765         rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8766         skb = rx_buf->skb;
8767         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8768         for (i = ETH_HLEN; i < pkt_size; i++)
8769                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8770                         goto test_loopback_rx_exit;
8771
8772         rc = 0;
8773
8774 test_loopback_rx_exit:
8775         bp->dev->last_rx = jiffies;
8776
8777         fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8778         fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8779         fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8780         fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8781
8782         /* Update producers */
8783         bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8784                              fp->rx_sge_prod);
8785         mmiowb(); /* keep prod updates ordered */
8786
8787 test_loopback_exit:
8788         bp->link_params.loopback_mode = LOOPBACK_NONE;
8789
8790         return rc;
8791 }
8792
8793 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8794 {
8795         int rc = 0;
8796
8797         if (!netif_running(bp->dev))
8798                 return BNX2X_LOOPBACK_FAILED;
8799
8800         bnx2x_netif_stop(bp);
8801
8802         if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8803                 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8804                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8805         }
8806
8807         if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8808                 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8809                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8810         }
8811
8812         bnx2x_netif_start(bp);
8813
8814         return rc;
8815 }
8816
8817 #define CRC32_RESIDUAL                  0xdebb20e3
8818
8819 static int bnx2x_test_nvram(struct bnx2x *bp)
8820 {
8821         static const struct {
8822                 int offset;
8823                 int size;
8824         } nvram_tbl[] = {
8825                 {     0,  0x14 }, /* bootstrap */
8826                 {  0x14,  0xec }, /* dir */
8827                 { 0x100, 0x350 }, /* manuf_info */
8828                 { 0x450,  0xf0 }, /* feature_info */
8829                 { 0x640,  0x64 }, /* upgrade_key_info */
8830                 { 0x6a4,  0x64 },
8831                 { 0x708,  0x70 }, /* manuf_key_info */
8832                 { 0x778,  0x70 },
8833                 {     0,     0 }
8834         };
8835         u32 buf[0x350 / 4];
8836         u8 *data = (u8 *)buf;
8837         int i, rc;
8838         u32 magic, csum;
8839
8840         rc = bnx2x_nvram_read(bp, 0, data, 4);
8841         if (rc) {
8842                 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8843                 goto test_nvram_exit;
8844         }
8845
8846         magic = be32_to_cpu(buf[0]);
8847         if (magic != 0x669955aa) {
8848                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8849                 rc = -ENODEV;
8850                 goto test_nvram_exit;
8851         }
8852
8853         for (i = 0; nvram_tbl[i].size; i++) {
8854
8855                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8856                                       nvram_tbl[i].size);
8857                 if (rc) {
8858                         DP(NETIF_MSG_PROBE,
8859                            "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8860                         goto test_nvram_exit;
8861                 }
8862
8863                 csum = ether_crc_le(nvram_tbl[i].size, data);
8864                 if (csum != CRC32_RESIDUAL) {
8865                         DP(NETIF_MSG_PROBE,
8866                            "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8867                         rc = -ENODEV;
8868                         goto test_nvram_exit;
8869                 }
8870         }
8871
8872 test_nvram_exit:
8873         return rc;
8874 }
8875
8876 static int bnx2x_test_intr(struct bnx2x *bp)
8877 {
8878         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8879         int i, rc;
8880
8881         if (!netif_running(bp->dev))
8882                 return -ENODEV;
8883
8884         config->hdr.length_6b = 0;
8885         config->hdr.offset = 0;
8886         config->hdr.client_id = BP_CL_ID(bp);
8887         config->hdr.reserved1 = 0;
8888
8889         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8890                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8891                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8892         if (rc == 0) {
8893                 bp->set_mac_pending++;
8894                 for (i = 0; i < 10; i++) {
8895                         if (!bp->set_mac_pending)
8896                                 break;
8897                         msleep_interruptible(10);
8898                 }
8899                 if (i == 10)
8900                         rc = -ENODEV;
8901         }
8902
8903         return rc;
8904 }
8905
8906 static void bnx2x_self_test(struct net_device *dev,
8907                             struct ethtool_test *etest, u64 *buf)
8908 {
8909         struct bnx2x *bp = netdev_priv(dev);
8910
8911         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8912
8913         if (!netif_running(dev))
8914                 return;
8915
8916         /* offline tests are not suppoerted in MF mode */
8917         if (IS_E1HMF(bp))
8918                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8919
8920         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8921                 u8 link_up;
8922
8923                 link_up = bp->link_vars.link_up;
8924                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8925                 bnx2x_nic_load(bp, LOAD_DIAG);
8926                 /* wait until link state is restored */
8927                 bnx2x_wait_for_link(bp, link_up);
8928
8929                 if (bnx2x_test_registers(bp) != 0) {
8930                         buf[0] = 1;
8931                         etest->flags |= ETH_TEST_FL_FAILED;
8932                 }
8933                 if (bnx2x_test_memory(bp) != 0) {
8934                         buf[1] = 1;
8935                         etest->flags |= ETH_TEST_FL_FAILED;
8936                 }
8937                 buf[2] = bnx2x_test_loopback(bp, link_up);
8938                 if (buf[2] != 0)
8939                         etest->flags |= ETH_TEST_FL_FAILED;
8940
8941                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8942                 bnx2x_nic_load(bp, LOAD_NORMAL);
8943                 /* wait until link state is restored */
8944                 bnx2x_wait_for_link(bp, link_up);
8945         }
8946         if (bnx2x_test_nvram(bp) != 0) {
8947                 buf[3] = 1;
8948                 etest->flags |= ETH_TEST_FL_FAILED;
8949         }
8950         if (bnx2x_test_intr(bp) != 0) {
8951                 buf[4] = 1;
8952                 etest->flags |= ETH_TEST_FL_FAILED;
8953         }
8954         if (bp->port.pmf)
8955                 if (bnx2x_link_test(bp) != 0) {
8956                         buf[5] = 1;
8957                         etest->flags |= ETH_TEST_FL_FAILED;
8958                 }
8959         buf[7] = bnx2x_mc_assert(bp);
8960         if (buf[7] != 0)
8961                 etest->flags |= ETH_TEST_FL_FAILED;
8962
8963 #ifdef BNX2X_EXTRA_DEBUG
8964         bnx2x_panic_dump(bp);
8965 #endif
8966 }
8967
8968 static const struct {
8969         long offset;
8970         int size;
8971         u32 flags;
8972 #define STATS_FLAGS_PORT                1
8973 #define STATS_FLAGS_FUNC                2
8974         u8 string[ETH_GSTRING_LEN];
8975 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
8976 /* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
8977                                 8, STATS_FLAGS_FUNC, "rx_bytes" },
8978         { STATS_OFFSET32(error_bytes_received_hi),
8979                                 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
8980         { STATS_OFFSET32(total_bytes_transmitted_hi),
8981                                 8, STATS_FLAGS_FUNC, "tx_bytes" },
8982         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8983                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
8984         { STATS_OFFSET32(total_unicast_packets_received_hi),
8985                                 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
8986         { STATS_OFFSET32(total_multicast_packets_received_hi),
8987                                 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
8988         { STATS_OFFSET32(total_broadcast_packets_received_hi),
8989                                 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
8990         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
8991                                 8, STATS_FLAGS_FUNC, "tx_packets" },
8992         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
8993                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
8994 /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
8995                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
8996         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
8997                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
8998         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
8999                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
9000         { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9001                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9002         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9003                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9004         { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9005                                 8, STATS_FLAGS_PORT, "tx_deferred" },
9006         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9007                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9008         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9009                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9010         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9011                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9012         { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9013                                 8, STATS_FLAGS_PORT, "rx_fragments" },
9014 /* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9015                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
9016         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9017                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9018         { STATS_OFFSET32(jabber_packets_received),
9019                                 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
9020         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9021                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9022         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9023                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9024         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9025                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9026         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9027                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9028         { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9029                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9030         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9031                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9032         { STATS_OFFSET32(etherstatspktsover1522octets_hi),
9033                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9034 /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
9035                                 8, STATS_FLAGS_PORT, "rx_xon_frames" },
9036         { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
9037                                 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9038         { STATS_OFFSET32(tx_stat_outxonsent_hi),
9039                                 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9040         { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9041                                 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
9042         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9043                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9044         { STATS_OFFSET32(mac_filter_discard),
9045                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9046         { STATS_OFFSET32(no_buff_discard),
9047                                 4, STATS_FLAGS_FUNC, "rx_discards" },
9048         { STATS_OFFSET32(xxoverflow_discard),
9049                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9050         { STATS_OFFSET32(brb_drop_hi),
9051                                 8, STATS_FLAGS_PORT, "brb_discard" },
9052         { STATS_OFFSET32(brb_truncate_hi),
9053                                 8, STATS_FLAGS_PORT, "brb_truncate" },
9054 /* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9055                                 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9056         { STATS_OFFSET32(rx_skb_alloc_failed),
9057                                 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9058 /* 42 */{ STATS_OFFSET32(hw_csum_err),
9059                                 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
9060 };
9061
9062 #define IS_NOT_E1HMF_STAT(bp, i) \
9063                 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9064
9065 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9066 {
9067         struct bnx2x *bp = netdev_priv(dev);
9068         int i, j;
9069
9070         switch (stringset) {
9071         case ETH_SS_STATS:
9072                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9073                         if (IS_NOT_E1HMF_STAT(bp, i))
9074                                 continue;
9075                         strcpy(buf + j*ETH_GSTRING_LEN,
9076                                bnx2x_stats_arr[i].string);
9077                         j++;
9078                 }
9079                 break;
9080
9081         case ETH_SS_TEST:
9082                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9083                 break;
9084         }
9085 }
9086
9087 static int bnx2x_get_stats_count(struct net_device *dev)
9088 {
9089         struct bnx2x *bp = netdev_priv(dev);
9090         int i, num_stats = 0;
9091
9092         for (i = 0; i < BNX2X_NUM_STATS; i++) {
9093                 if (IS_NOT_E1HMF_STAT(bp, i))
9094                         continue;
9095                 num_stats++;
9096         }
9097         return num_stats;
9098 }
9099
9100 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9101                                     struct ethtool_stats *stats, u64 *buf)
9102 {
9103         struct bnx2x *bp = netdev_priv(dev);
9104         u32 *hw_stats = (u32 *)&bp->eth_stats;
9105         int i, j;
9106
9107         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9108                 if (IS_NOT_E1HMF_STAT(bp, i))
9109                         continue;
9110
9111                 if (bnx2x_stats_arr[i].size == 0) {
9112                         /* skip this counter */
9113                         buf[j] = 0;
9114                         j++;
9115                         continue;
9116                 }
9117                 if (bnx2x_stats_arr[i].size == 4) {
9118                         /* 4-byte counter */
9119                         buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9120                         j++;
9121                         continue;
9122                 }
9123                 /* 8-byte counter */
9124                 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9125                                   *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9126                 j++;
9127         }
9128 }
9129
9130 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9131 {
9132         struct bnx2x *bp = netdev_priv(dev);
9133         int port = BP_PORT(bp);
9134         int i;
9135
9136         if (!netif_running(dev))
9137                 return 0;
9138
9139         if (!bp->port.pmf)
9140                 return 0;
9141
9142         if (data == 0)
9143                 data = 2;
9144
9145         for (i = 0; i < (data * 2); i++) {
9146                 if ((i % 2) == 0)
9147                         bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9148                                       bp->link_params.hw_led_mode,
9149                                       bp->link_params.chip_id);
9150                 else
9151                         bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9152                                       bp->link_params.hw_led_mode,
9153                                       bp->link_params.chip_id);
9154
9155                 msleep_interruptible(500);
9156                 if (signal_pending(current))
9157                         break;
9158         }
9159
9160         if (bp->link_vars.link_up)
9161                 bnx2x_set_led(bp, port, LED_MODE_OPER,
9162                               bp->link_vars.line_speed,
9163                               bp->link_params.hw_led_mode,
9164                               bp->link_params.chip_id);
9165
9166         return 0;
9167 }
9168
9169 static struct ethtool_ops bnx2x_ethtool_ops = {
9170         .get_settings           = bnx2x_get_settings,
9171         .set_settings           = bnx2x_set_settings,
9172         .get_drvinfo            = bnx2x_get_drvinfo,
9173         .get_wol                = bnx2x_get_wol,
9174         .set_wol                = bnx2x_set_wol,
9175         .get_msglevel           = bnx2x_get_msglevel,
9176         .set_msglevel           = bnx2x_set_msglevel,
9177         .nway_reset             = bnx2x_nway_reset,
9178         .get_link               = ethtool_op_get_link,
9179         .get_eeprom_len         = bnx2x_get_eeprom_len,
9180         .get_eeprom             = bnx2x_get_eeprom,
9181         .set_eeprom             = bnx2x_set_eeprom,
9182         .get_coalesce           = bnx2x_get_coalesce,
9183         .set_coalesce           = bnx2x_set_coalesce,
9184         .get_ringparam          = bnx2x_get_ringparam,
9185         .set_ringparam          = bnx2x_set_ringparam,
9186         .get_pauseparam         = bnx2x_get_pauseparam,
9187         .set_pauseparam         = bnx2x_set_pauseparam,
9188         .get_rx_csum            = bnx2x_get_rx_csum,
9189         .set_rx_csum            = bnx2x_set_rx_csum,
9190         .get_tx_csum            = ethtool_op_get_tx_csum,
9191         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
9192         .set_flags              = bnx2x_set_flags,
9193         .get_flags              = ethtool_op_get_flags,
9194         .get_sg                 = ethtool_op_get_sg,
9195         .set_sg                 = ethtool_op_set_sg,
9196         .get_tso                = ethtool_op_get_tso,
9197         .set_tso                = bnx2x_set_tso,
9198         .self_test_count        = bnx2x_self_test_count,
9199         .self_test              = bnx2x_self_test,
9200         .get_strings            = bnx2x_get_strings,
9201         .phys_id                = bnx2x_phys_id,
9202         .get_stats_count        = bnx2x_get_stats_count,
9203         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
9204 };
9205
9206 /* end of ethtool_ops */
9207
9208 /****************************************************************************
9209 * General service functions
9210 ****************************************************************************/
9211
9212 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9213 {
9214         u16 pmcsr;
9215
9216         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9217
9218         switch (state) {
9219         case PCI_D0:
9220                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9221                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9222                                        PCI_PM_CTRL_PME_STATUS));
9223
9224                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9225                 /* delay required during transition out of D3hot */
9226                         msleep(20);
9227                 break;
9228
9229         case PCI_D3hot:
9230                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9231                 pmcsr |= 3;
9232
9233                 if (bp->wol)
9234                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9235
9236                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9237                                       pmcsr);
9238
9239                 /* No more memory access after this point until
9240                 * device is brought back to D0.
9241                 */
9242                 break;
9243
9244         default:
9245                 return -EINVAL;
9246         }
9247         return 0;
9248 }
9249
9250 /*
9251  * net_device service functions
9252  */
9253
9254 static int bnx2x_poll(struct napi_struct *napi, int budget)
9255 {
9256         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9257                                                  napi);
9258         struct bnx2x *bp = fp->bp;
9259         int work_done = 0;
9260
9261 #ifdef BNX2X_STOP_ON_ERROR
9262         if (unlikely(bp->panic))
9263                 goto poll_panic;
9264 #endif
9265
9266         prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9267         prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9268         prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9269
9270         bnx2x_update_fpsb_idx(fp);
9271
9272         if (BNX2X_HAS_TX_WORK(fp))
9273                 bnx2x_tx_int(fp, budget);
9274
9275         if (BNX2X_HAS_RX_WORK(fp))
9276                 work_done = bnx2x_rx_int(fp, budget);
9277
9278         rmb(); /* BNX2X_HAS_WORK() reads the status block */
9279
9280         /* must not complete if we consumed full budget */
9281         if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9282
9283 #ifdef BNX2X_STOP_ON_ERROR
9284 poll_panic:
9285 #endif
9286                 netif_rx_complete(bp->dev, napi);
9287
9288                 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9289                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9290                 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
9291                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9292         }
9293         return work_done;
9294 }
9295
9296
9297 /* we split the first BD into headers and data BDs
9298  * to ease the pain of our fellow micocode engineers
9299  * we use one mapping for both BDs
9300  * So far this has only been observed to happen
9301  * in Other Operating Systems(TM)
9302  */
9303 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9304                                    struct bnx2x_fastpath *fp,
9305                                    struct eth_tx_bd **tx_bd, u16 hlen,
9306                                    u16 bd_prod, int nbd)
9307 {
9308         struct eth_tx_bd *h_tx_bd = *tx_bd;
9309         struct eth_tx_bd *d_tx_bd;
9310         dma_addr_t mapping;
9311         int old_len = le16_to_cpu(h_tx_bd->nbytes);
9312
9313         /* first fix first BD */
9314         h_tx_bd->nbd = cpu_to_le16(nbd);
9315         h_tx_bd->nbytes = cpu_to_le16(hlen);
9316
9317         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9318            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9319            h_tx_bd->addr_lo, h_tx_bd->nbd);
9320
9321         /* now get a new data BD
9322          * (after the pbd) and fill it */
9323         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9324         d_tx_bd = &fp->tx_desc_ring[bd_prod];
9325
9326         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9327                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9328
9329         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9330         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9331         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9332         d_tx_bd->vlan = 0;
9333         /* this marks the BD as one that has no individual mapping
9334          * the FW ignores this flag in a BD not marked start
9335          */
9336         d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9337         DP(NETIF_MSG_TX_QUEUED,
9338            "TSO split data size is %d (%x:%x)\n",
9339            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9340
9341         /* update tx_bd for marking the last BD flag */
9342         *tx_bd = d_tx_bd;
9343
9344         return bd_prod;
9345 }
9346
9347 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9348 {
9349         if (fix > 0)
9350                 csum = (u16) ~csum_fold(csum_sub(csum,
9351                                 csum_partial(t_header - fix, fix, 0)));
9352
9353         else if (fix < 0)
9354                 csum = (u16) ~csum_fold(csum_add(csum,
9355                                 csum_partial(t_header, -fix, 0)));
9356
9357         return swab16(csum);
9358 }
9359
9360 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9361 {
9362         u32 rc;
9363
9364         if (skb->ip_summed != CHECKSUM_PARTIAL)
9365                 rc = XMIT_PLAIN;
9366
9367         else {
9368                 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9369                         rc = XMIT_CSUM_V6;
9370                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9371                                 rc |= XMIT_CSUM_TCP;
9372
9373                 } else {
9374                         rc = XMIT_CSUM_V4;
9375                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9376                                 rc |= XMIT_CSUM_TCP;
9377                 }
9378         }
9379
9380         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9381                 rc |= XMIT_GSO_V4;
9382
9383         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9384                 rc |= XMIT_GSO_V6;
9385
9386         return rc;
9387 }
9388
9389 /* check if packet requires linearization (packet is too fragmented) */
9390 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9391                              u32 xmit_type)
9392 {
9393         int to_copy = 0;
9394         int hlen = 0;
9395         int first_bd_sz = 0;
9396
9397         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9398         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9399
9400                 if (xmit_type & XMIT_GSO) {
9401                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9402                         /* Check if LSO packet needs to be copied:
9403                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9404                         int wnd_size = MAX_FETCH_BD - 3;
9405                         /* Number of widnows to check */
9406                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9407                         int wnd_idx = 0;
9408                         int frag_idx = 0;
9409                         u32 wnd_sum = 0;
9410
9411                         /* Headers length */
9412                         hlen = (int)(skb_transport_header(skb) - skb->data) +
9413                                 tcp_hdrlen(skb);
9414
9415                         /* Amount of data (w/o headers) on linear part of SKB*/
9416                         first_bd_sz = skb_headlen(skb) - hlen;
9417
9418                         wnd_sum  = first_bd_sz;
9419
9420                         /* Calculate the first sum - it's special */
9421                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9422                                 wnd_sum +=
9423                                         skb_shinfo(skb)->frags[frag_idx].size;
9424
9425                         /* If there was data on linear skb data - check it */
9426                         if (first_bd_sz > 0) {
9427                                 if (unlikely(wnd_sum < lso_mss)) {
9428                                         to_copy = 1;
9429                                         goto exit_lbl;
9430                                 }
9431
9432                                 wnd_sum -= first_bd_sz;
9433                         }
9434
9435                         /* Others are easier: run through the frag list and
9436                            check all windows */
9437                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9438                                 wnd_sum +=
9439                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9440
9441                                 if (unlikely(wnd_sum < lso_mss)) {
9442                                         to_copy = 1;
9443                                         break;
9444                                 }
9445                                 wnd_sum -=
9446                                         skb_shinfo(skb)->frags[wnd_idx].size;
9447                         }
9448
9449                 } else {
9450                         /* in non-LSO too fragmented packet should always
9451                            be linearized */
9452                         to_copy = 1;
9453                 }
9454         }
9455
9456 exit_lbl:
9457         if (unlikely(to_copy))
9458                 DP(NETIF_MSG_TX_QUEUED,
9459                    "Linearization IS REQUIRED for %s packet. "
9460                    "num_frags %d  hlen %d  first_bd_sz %d\n",
9461                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9462                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9463
9464         return to_copy;
9465 }
9466
9467 /* called with netif_tx_lock
9468  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9469  * netif_wake_queue()
9470  */
9471 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9472 {
9473         struct bnx2x *bp = netdev_priv(dev);
9474         struct bnx2x_fastpath *fp;
9475         struct sw_tx_bd *tx_buf;
9476         struct eth_tx_bd *tx_bd;
9477         struct eth_tx_parse_bd *pbd = NULL;
9478         u16 pkt_prod, bd_prod;
9479         int nbd, fp_index;
9480         dma_addr_t mapping;
9481         u32 xmit_type = bnx2x_xmit_type(bp, skb);
9482         int vlan_off = (bp->e1hov ? 4 : 0);
9483         int i;
9484         u8 hlen = 0;
9485
9486 #ifdef BNX2X_STOP_ON_ERROR
9487         if (unlikely(bp->panic))
9488                 return NETDEV_TX_BUSY;
9489 #endif
9490
9491         fp_index = (smp_processor_id() % bp->num_queues);
9492         fp = &bp->fp[fp_index];
9493
9494         if (unlikely(bnx2x_tx_avail(bp->fp) <
9495                                         (skb_shinfo(skb)->nr_frags + 3))) {
9496                 bp->eth_stats.driver_xoff++,
9497                 netif_stop_queue(dev);
9498                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9499                 return NETDEV_TX_BUSY;
9500         }
9501
9502         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
9503            "  gso type %x  xmit_type %x\n",
9504            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9505            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9506
9507         /* First, check if we need to linearaize the skb
9508            (due to FW restrictions) */
9509         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9510                 /* Statistics of linearization */
9511                 bp->lin_cnt++;
9512                 if (skb_linearize(skb) != 0) {
9513                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9514                            "silently dropping this SKB\n");
9515                         dev_kfree_skb_any(skb);
9516                         return NETDEV_TX_OK;
9517                 }
9518         }
9519
9520         /*
9521         Please read carefully. First we use one BD which we mark as start,
9522         then for TSO or xsum we have a parsing info BD,
9523         and only then we have the rest of the TSO BDs.
9524         (don't forget to mark the last one as last,
9525         and to unmap only AFTER you write to the BD ...)
9526         And above all, all pdb sizes are in words - NOT DWORDS!
9527         */
9528
9529         pkt_prod = fp->tx_pkt_prod++;
9530         bd_prod = TX_BD(fp->tx_bd_prod);
9531
9532         /* get a tx_buf and first BD */
9533         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9534         tx_bd = &fp->tx_desc_ring[bd_prod];
9535
9536         tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9537         tx_bd->general_data = (UNICAST_ADDRESS <<
9538                                ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9539         tx_bd->general_data |= 1; /* header nbd */
9540
9541         /* remember the first BD of the packet */
9542         tx_buf->first_bd = fp->tx_bd_prod;
9543         tx_buf->skb = skb;
9544
9545         DP(NETIF_MSG_TX_QUEUED,
9546            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
9547            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9548
9549         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9550                 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9551                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9552                 vlan_off += 4;
9553         } else
9554                 tx_bd->vlan = cpu_to_le16(pkt_prod);
9555
9556         if (xmit_type) {
9557
9558                 /* turn on parsing and get a BD */
9559                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9560                 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9561
9562                 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9563         }
9564
9565         if (xmit_type & XMIT_CSUM) {
9566                 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
9567
9568                 /* for now NS flag is not used in Linux */
9569                 pbd->global_data = (hlen |
9570                                     ((skb->protocol == ntohs(ETH_P_8021Q)) <<
9571                                      ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9572
9573                 pbd->ip_hlen = (skb_transport_header(skb) -
9574                                 skb_network_header(skb)) / 2;
9575
9576                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
9577
9578                 pbd->total_hlen = cpu_to_le16(hlen);
9579                 hlen = hlen*2 - vlan_off;
9580
9581                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9582
9583                 if (xmit_type & XMIT_CSUM_V4)
9584                         tx_bd->bd_flags.as_bitfield |=
9585                                                 ETH_TX_BD_FLAGS_IP_CSUM;
9586                 else
9587                         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9588
9589                 if (xmit_type & XMIT_CSUM_TCP) {
9590                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9591
9592                 } else {
9593                         s8 fix = SKB_CS_OFF(skb); /* signed! */
9594
9595                         pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9596                         pbd->cs_offset = fix / 2;
9597
9598                         DP(NETIF_MSG_TX_QUEUED,
9599                            "hlen %d  offset %d  fix %d  csum before fix %x\n",
9600                            le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9601                            SKB_CS(skb));
9602
9603                         /* HW bug: fixup the CSUM */
9604                         pbd->tcp_pseudo_csum =
9605                                 bnx2x_csum_fix(skb_transport_header(skb),
9606                                                SKB_CS(skb), fix);
9607
9608                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9609                            pbd->tcp_pseudo_csum);
9610                 }
9611         }
9612
9613         mapping = pci_map_single(bp->pdev, skb->data,
9614                                  skb_headlen(skb), PCI_DMA_TODEVICE);
9615
9616         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9617         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9618         nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2);
9619         tx_bd->nbd = cpu_to_le16(nbd);
9620         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9621
9622         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
9623            "  nbytes %d  flags %x  vlan %x\n",
9624            tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9625            le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9626            le16_to_cpu(tx_bd->vlan));
9627
9628         if (xmit_type & XMIT_GSO) {
9629
9630                 DP(NETIF_MSG_TX_QUEUED,
9631                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
9632                    skb->len, hlen, skb_headlen(skb),
9633                    skb_shinfo(skb)->gso_size);
9634
9635                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9636
9637                 if (unlikely(skb_headlen(skb) > hlen))
9638                         bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9639                                                  bd_prod, ++nbd);
9640
9641                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9642                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9643                 pbd->tcp_flags = pbd_tcp_flags(skb);
9644
9645                 if (xmit_type & XMIT_GSO_V4) {
9646                         pbd->ip_id = swab16(ip_hdr(skb)->id);
9647                         pbd->tcp_pseudo_csum =
9648                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9649                                                           ip_hdr(skb)->daddr,
9650                                                           0, IPPROTO_TCP, 0));
9651
9652                 } else
9653                         pbd->tcp_pseudo_csum =
9654                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9655                                                         &ipv6_hdr(skb)->daddr,
9656                                                         0, IPPROTO_TCP, 0));
9657
9658                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9659         }
9660
9661         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9662                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9663
9664                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9665                 tx_bd = &fp->tx_desc_ring[bd_prod];
9666
9667                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9668                                        frag->size, PCI_DMA_TODEVICE);
9669
9670                 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9671                 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9672                 tx_bd->nbytes = cpu_to_le16(frag->size);
9673                 tx_bd->vlan = cpu_to_le16(pkt_prod);
9674                 tx_bd->bd_flags.as_bitfield = 0;
9675
9676                 DP(NETIF_MSG_TX_QUEUED,
9677                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d  flags %x\n",
9678                    i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9679                    le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
9680         }
9681
9682         /* now at last mark the BD as the last BD */
9683         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9684
9685         DP(NETIF_MSG_TX_QUEUED, "last bd @%p  flags %x\n",
9686            tx_bd, tx_bd->bd_flags.as_bitfield);
9687
9688         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9689
9690         /* now send a tx doorbell, counting the next BD
9691          * if the packet contains or ends with it
9692          */
9693         if (TX_BD_POFF(bd_prod) < nbd)
9694                 nbd++;
9695
9696         if (pbd)
9697                 DP(NETIF_MSG_TX_QUEUED,
9698                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
9699                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
9700                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9701                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9702                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
9703
9704         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
9705
9706         fp->hw_tx_prods->bds_prod =
9707                 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
9708         mb(); /* FW restriction: must not reorder writing nbd and packets */
9709         fp->hw_tx_prods->packets_prod =
9710                 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9711         DOORBELL(bp, FP_IDX(fp), 0);
9712
9713         mmiowb();
9714
9715         fp->tx_bd_prod += nbd;
9716         dev->trans_start = jiffies;
9717
9718         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9719                 netif_stop_queue(dev);
9720                 bp->eth_stats.driver_xoff++;
9721                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9722                         netif_wake_queue(dev);
9723         }
9724         fp->tx_pkt++;
9725
9726         return NETDEV_TX_OK;
9727 }
9728
9729 /* called with rtnl_lock */
9730 static int bnx2x_open(struct net_device *dev)
9731 {
9732         struct bnx2x *bp = netdev_priv(dev);
9733
9734         bnx2x_set_power_state(bp, PCI_D0);
9735
9736         return bnx2x_nic_load(bp, LOAD_OPEN);
9737 }
9738
9739 /* called with rtnl_lock */
9740 static int bnx2x_close(struct net_device *dev)
9741 {
9742         struct bnx2x *bp = netdev_priv(dev);
9743
9744         /* Unload the driver, release IRQs */
9745         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9746         if (atomic_read(&bp->pdev->enable_cnt) == 1)
9747                 if (!CHIP_REV_IS_SLOW(bp))
9748                         bnx2x_set_power_state(bp, PCI_D3hot);
9749
9750         return 0;
9751 }
9752
9753 /* called with netif_tx_lock from set_multicast */
9754 static void bnx2x_set_rx_mode(struct net_device *dev)
9755 {
9756         struct bnx2x *bp = netdev_priv(dev);
9757         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9758         int port = BP_PORT(bp);
9759
9760         if (bp->state != BNX2X_STATE_OPEN) {
9761                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9762                 return;
9763         }
9764
9765         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9766
9767         if (dev->flags & IFF_PROMISC)
9768                 rx_mode = BNX2X_RX_MODE_PROMISC;
9769
9770         else if ((dev->flags & IFF_ALLMULTI) ||
9771                  ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9772                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9773
9774         else { /* some multicasts */
9775                 if (CHIP_IS_E1(bp)) {
9776                         int i, old, offset;
9777                         struct dev_mc_list *mclist;
9778                         struct mac_configuration_cmd *config =
9779                                                 bnx2x_sp(bp, mcast_config);
9780
9781                         for (i = 0, mclist = dev->mc_list;
9782                              mclist && (i < dev->mc_count);
9783                              i++, mclist = mclist->next) {
9784
9785                                 config->config_table[i].
9786                                         cam_entry.msb_mac_addr =
9787                                         swab16(*(u16 *)&mclist->dmi_addr[0]);
9788                                 config->config_table[i].
9789                                         cam_entry.middle_mac_addr =
9790                                         swab16(*(u16 *)&mclist->dmi_addr[2]);
9791                                 config->config_table[i].
9792                                         cam_entry.lsb_mac_addr =
9793                                         swab16(*(u16 *)&mclist->dmi_addr[4]);
9794                                 config->config_table[i].cam_entry.flags =
9795                                                         cpu_to_le16(port);
9796                                 config->config_table[i].
9797                                         target_table_entry.flags = 0;
9798                                 config->config_table[i].
9799                                         target_table_entry.client_id = 0;
9800                                 config->config_table[i].
9801                                         target_table_entry.vlan_id = 0;
9802
9803                                 DP(NETIF_MSG_IFUP,
9804                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9805                                    config->config_table[i].
9806                                                 cam_entry.msb_mac_addr,
9807                                    config->config_table[i].
9808                                                 cam_entry.middle_mac_addr,
9809                                    config->config_table[i].
9810                                                 cam_entry.lsb_mac_addr);
9811                         }
9812                         old = config->hdr.length_6b;
9813                         if (old > i) {
9814                                 for (; i < old; i++) {
9815                                         if (CAM_IS_INVALID(config->
9816                                                            config_table[i])) {
9817                                                 i--; /* already invalidated */
9818                                                 break;
9819                                         }
9820                                         /* invalidate */
9821                                         CAM_INVALIDATE(config->
9822                                                        config_table[i]);
9823                                 }
9824                         }
9825
9826                         if (CHIP_REV_IS_SLOW(bp))
9827                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9828                         else
9829                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
9830
9831                         config->hdr.length_6b = i;
9832                         config->hdr.offset = offset;
9833                         config->hdr.client_id = BP_CL_ID(bp);
9834                         config->hdr.reserved1 = 0;
9835
9836                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9837                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9838                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9839                                       0);
9840                 } else { /* E1H */
9841                         /* Accept one or more multicasts */
9842                         struct dev_mc_list *mclist;
9843                         u32 mc_filter[MC_HASH_SIZE];
9844                         u32 crc, bit, regidx;
9845                         int i;
9846
9847                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9848
9849                         for (i = 0, mclist = dev->mc_list;
9850                              mclist && (i < dev->mc_count);
9851                              i++, mclist = mclist->next) {
9852
9853                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: "
9854                                    "%02x:%02x:%02x:%02x:%02x:%02x\n",
9855                                    mclist->dmi_addr[0], mclist->dmi_addr[1],
9856                                    mclist->dmi_addr[2], mclist->dmi_addr[3],
9857                                    mclist->dmi_addr[4], mclist->dmi_addr[5]);
9858
9859                                 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9860                                 bit = (crc >> 24) & 0xff;
9861                                 regidx = bit >> 5;
9862                                 bit &= 0x1f;
9863                                 mc_filter[regidx] |= (1 << bit);
9864                         }
9865
9866                         for (i = 0; i < MC_HASH_SIZE; i++)
9867                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9868                                        mc_filter[i]);
9869                 }
9870         }
9871
9872         bp->rx_mode = rx_mode;
9873         bnx2x_set_storm_rx_mode(bp);
9874 }
9875
9876 /* called with rtnl_lock */
9877 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9878 {
9879         struct sockaddr *addr = p;
9880         struct bnx2x *bp = netdev_priv(dev);
9881
9882         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
9883                 return -EINVAL;
9884
9885         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9886         if (netif_running(dev)) {
9887                 if (CHIP_IS_E1(bp))
9888                         bnx2x_set_mac_addr_e1(bp, 1);
9889                 else
9890                         bnx2x_set_mac_addr_e1h(bp, 1);
9891         }
9892
9893         return 0;
9894 }
9895
9896 /* called with rtnl_lock */
9897 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9898 {
9899         struct mii_ioctl_data *data = if_mii(ifr);
9900         struct bnx2x *bp = netdev_priv(dev);
9901         int err;
9902
9903         switch (cmd) {
9904         case SIOCGMIIPHY:
9905                 data->phy_id = bp->port.phy_addr;
9906
9907                 /* fallthrough */
9908
9909         case SIOCGMIIREG: {
9910                 u16 mii_regval;
9911
9912                 if (!netif_running(dev))
9913                         return -EAGAIN;
9914
9915                 mutex_lock(&bp->port.phy_mutex);
9916                 err = bnx2x_cl45_read(bp, BP_PORT(bp), 0, bp->port.phy_addr,
9917                                       DEFAULT_PHY_DEV_ADDR,
9918                                       (data->reg_num & 0x1f), &mii_regval);
9919                 data->val_out = mii_regval;
9920                 mutex_unlock(&bp->port.phy_mutex);
9921                 return err;
9922         }
9923
9924         case SIOCSMIIREG:
9925                 if (!capable(CAP_NET_ADMIN))
9926                         return -EPERM;
9927
9928                 if (!netif_running(dev))
9929                         return -EAGAIN;
9930
9931                 mutex_lock(&bp->port.phy_mutex);
9932                 err = bnx2x_cl45_write(bp, BP_PORT(bp), 0, bp->port.phy_addr,
9933                                        DEFAULT_PHY_DEV_ADDR,
9934                                        (data->reg_num & 0x1f), data->val_in);
9935                 mutex_unlock(&bp->port.phy_mutex);
9936                 return err;
9937
9938         default:
9939                 /* do nothing */
9940                 break;
9941         }
9942
9943         return -EOPNOTSUPP;
9944 }
9945
9946 /* called with rtnl_lock */
9947 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9948 {
9949         struct bnx2x *bp = netdev_priv(dev);
9950         int rc = 0;
9951
9952         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9953             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9954                 return -EINVAL;
9955
9956         /* This does not race with packet allocation
9957          * because the actual alloc size is
9958          * only updated as part of load
9959          */
9960         dev->mtu = new_mtu;
9961
9962         if (netif_running(dev)) {
9963                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9964                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9965         }
9966
9967         return rc;
9968 }
9969
9970 static void bnx2x_tx_timeout(struct net_device *dev)
9971 {
9972         struct bnx2x *bp = netdev_priv(dev);
9973
9974 #ifdef BNX2X_STOP_ON_ERROR
9975         if (!bp->panic)
9976                 bnx2x_panic();
9977 #endif
9978         /* This allows the netif to be shutdown gracefully before resetting */
9979         schedule_work(&bp->reset_task);
9980 }
9981
9982 #ifdef BCM_VLAN
9983 /* called with rtnl_lock */
9984 static void bnx2x_vlan_rx_register(struct net_device *dev,
9985                                    struct vlan_group *vlgrp)
9986 {
9987         struct bnx2x *bp = netdev_priv(dev);
9988
9989         bp->vlgrp = vlgrp;
9990         if (netif_running(dev))
9991                 bnx2x_set_client_config(bp);
9992 }
9993
9994 #endif
9995
9996 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9997 static void poll_bnx2x(struct net_device *dev)
9998 {
9999         struct bnx2x *bp = netdev_priv(dev);
10000
10001         disable_irq(bp->pdev->irq);
10002         bnx2x_interrupt(bp->pdev->irq, dev);
10003         enable_irq(bp->pdev->irq);
10004 }
10005 #endif
10006
10007 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10008                                     struct net_device *dev)
10009 {
10010         struct bnx2x *bp;
10011         int rc;
10012
10013         SET_NETDEV_DEV(dev, &pdev->dev);
10014         bp = netdev_priv(dev);
10015
10016         bp->dev = dev;
10017         bp->pdev = pdev;
10018         bp->flags = 0;
10019         bp->func = PCI_FUNC(pdev->devfn);
10020
10021         rc = pci_enable_device(pdev);
10022         if (rc) {
10023                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10024                 goto err_out;
10025         }
10026
10027         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10028                 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10029                        " aborting\n");
10030                 rc = -ENODEV;
10031                 goto err_out_disable;
10032         }
10033
10034         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10035                 printk(KERN_ERR PFX "Cannot find second PCI device"
10036                        " base address, aborting\n");
10037                 rc = -ENODEV;
10038                 goto err_out_disable;
10039         }
10040
10041         if (atomic_read(&pdev->enable_cnt) == 1) {
10042                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10043                 if (rc) {
10044                         printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10045                                " aborting\n");
10046                         goto err_out_disable;
10047                 }
10048
10049                 pci_set_master(pdev);
10050                 pci_save_state(pdev);
10051         }
10052
10053         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10054         if (bp->pm_cap == 0) {
10055                 printk(KERN_ERR PFX "Cannot find power management"
10056                        " capability, aborting\n");
10057                 rc = -EIO;
10058                 goto err_out_release;
10059         }
10060
10061         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10062         if (bp->pcie_cap == 0) {
10063                 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10064                        " aborting\n");
10065                 rc = -EIO;
10066                 goto err_out_release;
10067         }
10068
10069         if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10070                 bp->flags |= USING_DAC_FLAG;
10071                 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10072                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10073                                " failed, aborting\n");
10074                         rc = -EIO;
10075                         goto err_out_release;
10076                 }
10077
10078         } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10079                 printk(KERN_ERR PFX "System does not support DMA,"
10080                        " aborting\n");
10081                 rc = -EIO;
10082                 goto err_out_release;
10083         }
10084
10085         dev->mem_start = pci_resource_start(pdev, 0);
10086         dev->base_addr = dev->mem_start;
10087         dev->mem_end = pci_resource_end(pdev, 0);
10088
10089         dev->irq = pdev->irq;
10090
10091         bp->regview = ioremap_nocache(dev->base_addr,
10092                                       pci_resource_len(pdev, 0));
10093         if (!bp->regview) {
10094                 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10095                 rc = -ENOMEM;
10096                 goto err_out_release;
10097         }
10098
10099         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10100                                         min_t(u64, BNX2X_DB_SIZE,
10101                                               pci_resource_len(pdev, 2)));
10102         if (!bp->doorbells) {
10103                 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10104                 rc = -ENOMEM;
10105                 goto err_out_unmap;
10106         }
10107
10108         bnx2x_set_power_state(bp, PCI_D0);
10109
10110         /* clean indirect addresses */
10111         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10112                                PCICFG_VENDOR_ID_OFFSET);
10113         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10114         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10115         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10116         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10117
10118         dev->hard_start_xmit = bnx2x_start_xmit;
10119         dev->watchdog_timeo = TX_TIMEOUT;
10120
10121         dev->ethtool_ops = &bnx2x_ethtool_ops;
10122         dev->open = bnx2x_open;
10123         dev->stop = bnx2x_close;
10124         dev->set_multicast_list = bnx2x_set_rx_mode;
10125         dev->set_mac_address = bnx2x_change_mac_addr;
10126         dev->do_ioctl = bnx2x_ioctl;
10127         dev->change_mtu = bnx2x_change_mtu;
10128         dev->tx_timeout = bnx2x_tx_timeout;
10129 #ifdef BCM_VLAN
10130         dev->vlan_rx_register = bnx2x_vlan_rx_register;
10131 #endif
10132 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10133         dev->poll_controller = poll_bnx2x;
10134 #endif
10135         dev->features |= NETIF_F_SG;
10136         dev->features |= NETIF_F_HW_CSUM;
10137         if (bp->flags & USING_DAC_FLAG)
10138                 dev->features |= NETIF_F_HIGHDMA;
10139 #ifdef BCM_VLAN
10140         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10141 #endif
10142         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10143         dev->features |= NETIF_F_TSO6;
10144
10145         return 0;
10146
10147 err_out_unmap:
10148         if (bp->regview) {
10149                 iounmap(bp->regview);
10150                 bp->regview = NULL;
10151         }
10152         if (bp->doorbells) {
10153                 iounmap(bp->doorbells);
10154                 bp->doorbells = NULL;
10155         }
10156
10157 err_out_release:
10158         if (atomic_read(&pdev->enable_cnt) == 1)
10159                 pci_release_regions(pdev);
10160
10161 err_out_disable:
10162         pci_disable_device(pdev);
10163         pci_set_drvdata(pdev, NULL);
10164
10165 err_out:
10166         return rc;
10167 }
10168
10169 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10170 {
10171         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10172
10173         val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10174         return val;
10175 }
10176
10177 /* return value of 1=2.5GHz 2=5GHz */
10178 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10179 {
10180         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10181
10182         val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10183         return val;
10184 }
10185
10186 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10187                                     const struct pci_device_id *ent)
10188 {
10189         static int version_printed;
10190         struct net_device *dev = NULL;
10191         struct bnx2x *bp;
10192         int rc;
10193         DECLARE_MAC_BUF(mac);
10194
10195         if (version_printed++ == 0)
10196                 printk(KERN_INFO "%s", version);
10197
10198         /* dev zeroed in init_etherdev */
10199         dev = alloc_etherdev(sizeof(*bp));
10200         if (!dev) {
10201                 printk(KERN_ERR PFX "Cannot allocate net device\n");
10202                 return -ENOMEM;
10203         }
10204
10205         netif_carrier_off(dev);
10206
10207         bp = netdev_priv(dev);
10208         bp->msglevel = debug;
10209
10210         rc = bnx2x_init_dev(pdev, dev);
10211         if (rc < 0) {
10212                 free_netdev(dev);
10213                 return rc;
10214         }
10215
10216         rc = register_netdev(dev);
10217         if (rc) {
10218                 dev_err(&pdev->dev, "Cannot register net device\n");
10219                 goto init_one_exit;
10220         }
10221
10222         pci_set_drvdata(pdev, dev);
10223
10224         rc = bnx2x_init_bp(bp);
10225         if (rc) {
10226                 unregister_netdev(dev);
10227                 goto init_one_exit;
10228         }
10229
10230         bp->common.name = board_info[ent->driver_data].name;
10231         printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10232                " IRQ %d, ", dev->name, bp->common.name,
10233                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10234                bnx2x_get_pcie_width(bp),
10235                (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10236                dev->base_addr, bp->pdev->irq);
10237         printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
10238         return 0;
10239
10240 init_one_exit:
10241         if (bp->regview)
10242                 iounmap(bp->regview);
10243
10244         if (bp->doorbells)
10245                 iounmap(bp->doorbells);
10246
10247         free_netdev(dev);
10248
10249         if (atomic_read(&pdev->enable_cnt) == 1)
10250                 pci_release_regions(pdev);
10251
10252         pci_disable_device(pdev);
10253         pci_set_drvdata(pdev, NULL);
10254
10255         return rc;
10256 }
10257
10258 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10259 {
10260         struct net_device *dev = pci_get_drvdata(pdev);
10261         struct bnx2x *bp;
10262
10263         if (!dev) {
10264                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10265                 return;
10266         }
10267         bp = netdev_priv(dev);
10268
10269         unregister_netdev(dev);
10270
10271         if (bp->regview)
10272                 iounmap(bp->regview);
10273
10274         if (bp->doorbells)
10275                 iounmap(bp->doorbells);
10276
10277         free_netdev(dev);
10278
10279         if (atomic_read(&pdev->enable_cnt) == 1)
10280                 pci_release_regions(pdev);
10281
10282         pci_disable_device(pdev);
10283         pci_set_drvdata(pdev, NULL);
10284 }
10285
10286 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10287 {
10288         struct net_device *dev = pci_get_drvdata(pdev);
10289         struct bnx2x *bp;
10290
10291         if (!dev) {
10292                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10293                 return -ENODEV;
10294         }
10295         bp = netdev_priv(dev);
10296
10297         rtnl_lock();
10298
10299         pci_save_state(pdev);
10300
10301         if (!netif_running(dev)) {
10302                 rtnl_unlock();
10303                 return 0;
10304         }
10305
10306         netif_device_detach(dev);
10307
10308         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10309
10310         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10311
10312         rtnl_unlock();
10313
10314         return 0;
10315 }
10316
10317 static int bnx2x_resume(struct pci_dev *pdev)
10318 {
10319         struct net_device *dev = pci_get_drvdata(pdev);
10320         struct bnx2x *bp;
10321         int rc;
10322
10323         if (!dev) {
10324                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10325                 return -ENODEV;
10326         }
10327         bp = netdev_priv(dev);
10328
10329         rtnl_lock();
10330
10331         pci_restore_state(pdev);
10332
10333         if (!netif_running(dev)) {
10334                 rtnl_unlock();
10335                 return 0;
10336         }
10337
10338         bnx2x_set_power_state(bp, PCI_D0);
10339         netif_device_attach(dev);
10340
10341         rc = bnx2x_nic_load(bp, LOAD_OPEN);
10342
10343         rtnl_unlock();
10344
10345         return rc;
10346 }
10347
10348 /**
10349  * bnx2x_io_error_detected - called when PCI error is detected
10350  * @pdev: Pointer to PCI device
10351  * @state: The current pci connection state
10352  *
10353  * This function is called after a PCI bus error affecting
10354  * this device has been detected.
10355  */
10356 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10357                                                 pci_channel_state_t state)
10358 {
10359         struct net_device *dev = pci_get_drvdata(pdev);
10360         struct bnx2x *bp = netdev_priv(dev);
10361
10362         rtnl_lock();
10363
10364         netif_device_detach(dev);
10365
10366         if (netif_running(dev))
10367                 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10368
10369         pci_disable_device(pdev);
10370
10371         rtnl_unlock();
10372
10373         /* Request a slot reset */
10374         return PCI_ERS_RESULT_NEED_RESET;
10375 }
10376
10377 /**
10378  * bnx2x_io_slot_reset - called after the PCI bus has been reset
10379  * @pdev: Pointer to PCI device
10380  *
10381  * Restart the card from scratch, as if from a cold-boot.
10382  */
10383 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10384 {
10385         struct net_device *dev = pci_get_drvdata(pdev);
10386         struct bnx2x *bp = netdev_priv(dev);
10387
10388         rtnl_lock();
10389
10390         if (pci_enable_device(pdev)) {
10391                 dev_err(&pdev->dev,
10392                         "Cannot re-enable PCI device after reset\n");
10393                 rtnl_unlock();
10394                 return PCI_ERS_RESULT_DISCONNECT;
10395         }
10396
10397         pci_set_master(pdev);
10398         pci_restore_state(pdev);
10399
10400         if (netif_running(dev))
10401                 bnx2x_set_power_state(bp, PCI_D0);
10402
10403         rtnl_unlock();
10404
10405         return PCI_ERS_RESULT_RECOVERED;
10406 }
10407
10408 /**
10409  * bnx2x_io_resume - called when traffic can start flowing again
10410  * @pdev: Pointer to PCI device
10411  *
10412  * This callback is called when the error recovery driver tells us that
10413  * its OK to resume normal operation.
10414  */
10415 static void bnx2x_io_resume(struct pci_dev *pdev)
10416 {
10417         struct net_device *dev = pci_get_drvdata(pdev);
10418         struct bnx2x *bp = netdev_priv(dev);
10419
10420         rtnl_lock();
10421
10422         if (netif_running(dev))
10423                 bnx2x_nic_load(bp, LOAD_OPEN);
10424
10425         netif_device_attach(dev);
10426
10427         rtnl_unlock();
10428 }
10429
10430 static struct pci_error_handlers bnx2x_err_handler = {
10431         .error_detected = bnx2x_io_error_detected,
10432         .slot_reset = bnx2x_io_slot_reset,
10433         .resume = bnx2x_io_resume,
10434 };
10435
10436 static struct pci_driver bnx2x_pci_driver = {
10437         .name        = DRV_MODULE_NAME,
10438         .id_table    = bnx2x_pci_tbl,
10439         .probe       = bnx2x_init_one,
10440         .remove      = __devexit_p(bnx2x_remove_one),
10441         .suspend     = bnx2x_suspend,
10442         .resume      = bnx2x_resume,
10443         .err_handler = &bnx2x_err_handler,
10444 };
10445
10446 static int __init bnx2x_init(void)
10447 {
10448         return pci_register_driver(&bnx2x_pci_driver);
10449 }
10450
10451 static void __exit bnx2x_cleanup(void)
10452 {
10453         pci_unregister_driver(&bnx2x_pci_driver);
10454 }
10455
10456 module_init(bnx2x_init);
10457 module_exit(bnx2x_cleanup);
10458