bnx2x: PHY lock list
[safe/jmp/linux-2.6] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52
53 #include "bnx2x_reg.h"
54 #include "bnx2x_fw_defs.h"
55 #include "bnx2x_hsi.h"
56 #include "bnx2x_link.h"
57 #include "bnx2x.h"
58 #include "bnx2x_init.h"
59
60 #define DRV_MODULE_VERSION      "1.45.26"
61 #define DRV_MODULE_RELDATE      "2009/01/26"
62 #define BNX2X_BC_VER            0x040200
63
64 /* Time in jiffies before concluding the transmitter is hung */
65 #define TX_TIMEOUT              (5*HZ)
66
67 static char version[] __devinitdata =
68         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
69         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
71 MODULE_AUTHOR("Eliezer Tamir");
72 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
75
76 static int multi_mode = 1;
77 module_param(multi_mode, int, 0);
78
79 static int disable_tpa;
80 static int poll;
81 static int debug;
82 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
83
84 module_param(disable_tpa, int, 0);
85
86 static int int_mode;
87 module_param(int_mode, int, 0);
88 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
89
90 module_param(poll, int, 0);
91 module_param(debug, int, 0);
92 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
93 MODULE_PARM_DESC(poll, "use polling (for debug)");
94 MODULE_PARM_DESC(debug, "default debug msglevel");
95
96 static struct workqueue_struct *bnx2x_wq;
97
98 enum bnx2x_board_type {
99         BCM57710 = 0,
100         BCM57711 = 1,
101         BCM57711E = 2,
102 };
103
104 /* indexed by board_type, above */
105 static struct {
106         char *name;
107 } board_info[] __devinitdata = {
108         { "Broadcom NetXtreme II BCM57710 XGb" },
109         { "Broadcom NetXtreme II BCM57711 XGb" },
110         { "Broadcom NetXtreme II BCM57711E XGb" }
111 };
112
113
114 static const struct pci_device_id bnx2x_pci_tbl[] = {
115         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
116                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
117         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
118                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
119         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
120                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
121         { 0 }
122 };
123
124 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
125
126 /****************************************************************************
127 * General service functions
128 ****************************************************************************/
129
130 /* used only at init
131  * locking is done by mcp
132  */
133 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
134 {
135         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
136         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
137         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
138                                PCICFG_VENDOR_ID_OFFSET);
139 }
140
141 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
142 {
143         u32 val;
144
145         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
146         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
147         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
148                                PCICFG_VENDOR_ID_OFFSET);
149
150         return val;
151 }
152
153 static const u32 dmae_reg_go_c[] = {
154         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
158 };
159
160 /* copy command into DMAE command memory and set DMAE command go */
161 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
162                             int idx)
163 {
164         u32 cmd_offset;
165         int i;
166
167         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
170
171                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
173         }
174         REG_WR(bp, dmae_reg_go_c[idx], 1);
175 }
176
177 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
178                       u32 len32)
179 {
180         struct dmae_command *dmae = &bp->init_dmae;
181         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
182         int cnt = 200;
183
184         if (!bp->dmae_ready) {
185                 u32 *data = bnx2x_sp(bp, wb_data[0]);
186
187                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
188                    "  using indirect\n", dst_addr, len32);
189                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
190                 return;
191         }
192
193         mutex_lock(&bp->dmae_mutex);
194
195         memset(dmae, 0, sizeof(struct dmae_command));
196
197         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
198                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
199                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
200 #ifdef __BIG_ENDIAN
201                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
202 #else
203                         DMAE_CMD_ENDIANITY_DW_SWAP |
204 #endif
205                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
206                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
207         dmae->src_addr_lo = U64_LO(dma_addr);
208         dmae->src_addr_hi = U64_HI(dma_addr);
209         dmae->dst_addr_lo = dst_addr >> 2;
210         dmae->dst_addr_hi = 0;
211         dmae->len = len32;
212         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
213         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
214         dmae->comp_val = DMAE_COMP_VAL;
215
216         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
217            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
218                     "dst_addr [%x:%08x (%08x)]\n"
219            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
220            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
221            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
222            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
223         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
224            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
225            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
226
227         *wb_comp = 0;
228
229         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
230
231         udelay(5);
232
233         while (*wb_comp != DMAE_COMP_VAL) {
234                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
235
236                 if (!cnt) {
237                         BNX2X_ERR("dmae timeout!\n");
238                         break;
239                 }
240                 cnt--;
241                 /* adjust delay for emulation/FPGA */
242                 if (CHIP_REV_IS_SLOW(bp))
243                         msleep(100);
244                 else
245                         udelay(5);
246         }
247
248         mutex_unlock(&bp->dmae_mutex);
249 }
250
251 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
252 {
253         struct dmae_command *dmae = &bp->init_dmae;
254         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
255         int cnt = 200;
256
257         if (!bp->dmae_ready) {
258                 u32 *data = bnx2x_sp(bp, wb_data[0]);
259                 int i;
260
261                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
262                    "  using indirect\n", src_addr, len32);
263                 for (i = 0; i < len32; i++)
264                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
265                 return;
266         }
267
268         mutex_lock(&bp->dmae_mutex);
269
270         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
271         memset(dmae, 0, sizeof(struct dmae_command));
272
273         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
274                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
275                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
276 #ifdef __BIG_ENDIAN
277                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
278 #else
279                         DMAE_CMD_ENDIANITY_DW_SWAP |
280 #endif
281                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
282                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
283         dmae->src_addr_lo = src_addr >> 2;
284         dmae->src_addr_hi = 0;
285         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
286         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
287         dmae->len = len32;
288         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
289         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
290         dmae->comp_val = DMAE_COMP_VAL;
291
292         DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
293            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
294                     "dst_addr [%x:%08x (%08x)]\n"
295            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
296            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
297            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
298            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
299
300         *wb_comp = 0;
301
302         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
303
304         udelay(5);
305
306         while (*wb_comp != DMAE_COMP_VAL) {
307
308                 if (!cnt) {
309                         BNX2X_ERR("dmae timeout!\n");
310                         break;
311                 }
312                 cnt--;
313                 /* adjust delay for emulation/FPGA */
314                 if (CHIP_REV_IS_SLOW(bp))
315                         msleep(100);
316                 else
317                         udelay(5);
318         }
319         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
320            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
321            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
322
323         mutex_unlock(&bp->dmae_mutex);
324 }
325
326 /* used only for slowpath so not inlined */
327 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
328 {
329         u32 wb_write[2];
330
331         wb_write[0] = val_hi;
332         wb_write[1] = val_lo;
333         REG_WR_DMAE(bp, reg, wb_write, 2);
334 }
335
336 #ifdef USE_WB_RD
337 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
338 {
339         u32 wb_data[2];
340
341         REG_RD_DMAE(bp, reg, wb_data, 2);
342
343         return HILO_U64(wb_data[0], wb_data[1]);
344 }
345 #endif
346
347 static int bnx2x_mc_assert(struct bnx2x *bp)
348 {
349         char last_idx;
350         int i, rc = 0;
351         u32 row0, row1, row2, row3;
352
353         /* XSTORM */
354         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
355                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
356         if (last_idx)
357                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
358
359         /* print the asserts */
360         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
361
362                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
363                               XSTORM_ASSERT_LIST_OFFSET(i));
364                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
366                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
368                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
370
371                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
372                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
373                                   " 0x%08x 0x%08x 0x%08x\n",
374                                   i, row3, row2, row1, row0);
375                         rc++;
376                 } else {
377                         break;
378                 }
379         }
380
381         /* TSTORM */
382         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
383                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
384         if (last_idx)
385                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
386
387         /* print the asserts */
388         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
389
390                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
391                               TSTORM_ASSERT_LIST_OFFSET(i));
392                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
394                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
396                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
398
399                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
400                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
401                                   " 0x%08x 0x%08x 0x%08x\n",
402                                   i, row3, row2, row1, row0);
403                         rc++;
404                 } else {
405                         break;
406                 }
407         }
408
409         /* CSTORM */
410         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
411                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
412         if (last_idx)
413                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
414
415         /* print the asserts */
416         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
417
418                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
419                               CSTORM_ASSERT_LIST_OFFSET(i));
420                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
422                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
424                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
426
427                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
428                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
429                                   " 0x%08x 0x%08x 0x%08x\n",
430                                   i, row3, row2, row1, row0);
431                         rc++;
432                 } else {
433                         break;
434                 }
435         }
436
437         /* USTORM */
438         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
439                            USTORM_ASSERT_LIST_INDEX_OFFSET);
440         if (last_idx)
441                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
442
443         /* print the asserts */
444         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
445
446                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
447                               USTORM_ASSERT_LIST_OFFSET(i));
448                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
449                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
450                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
451                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
452                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
453                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
454
455                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
456                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
457                                   " 0x%08x 0x%08x 0x%08x\n",
458                                   i, row3, row2, row1, row0);
459                         rc++;
460                 } else {
461                         break;
462                 }
463         }
464
465         return rc;
466 }
467
468 static void bnx2x_fw_dump(struct bnx2x *bp)
469 {
470         u32 mark, offset;
471         u32 data[9];
472         int word;
473
474         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
475         mark = ((mark + 0x3) & ~0x3);
476         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
477
478         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
479                 for (word = 0; word < 8; word++)
480                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
481                                                   offset + 4*word));
482                 data[8] = 0x0;
483                 printk(KERN_CONT "%s", (char *)data);
484         }
485         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
486                 for (word = 0; word < 8; word++)
487                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
488                                                   offset + 4*word));
489                 data[8] = 0x0;
490                 printk(KERN_CONT "%s", (char *)data);
491         }
492         printk("\n" KERN_ERR PFX "end of fw dump\n");
493 }
494
495 static void bnx2x_panic_dump(struct bnx2x *bp)
496 {
497         int i;
498         u16 j, start, end;
499
500         bp->stats_state = STATS_STATE_DISABLED;
501         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
502
503         BNX2X_ERR("begin crash dump -----------------\n");
504
505         for_each_queue(bp, i) {
506                 struct bnx2x_fastpath *fp = &bp->fp[i];
507                 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
508
509                 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
510                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
511                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
512                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
513                 BNX2X_ERR("          rx_bd_prod(%x)  rx_bd_cons(%x)"
514                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
515                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
516                           fp->rx_bd_prod, fp->rx_bd_cons,
517                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
518                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
519                 BNX2X_ERR("          rx_sge_prod(%x)  last_max_sge(%x)"
520                           "  fp_c_idx(%x)  *sb_c_idx(%x)  fp_u_idx(%x)"
521                           "  *sb_u_idx(%x)  bd data(%x,%x)\n",
522                           fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
523                           fp->status_blk->c_status_block.status_block_index,
524                           fp->fp_u_idx,
525                           fp->status_blk->u_status_block.status_block_index,
526                           hw_prods->packets_prod, hw_prods->bds_prod);
527
528                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
530                 for (j = start; j < end; j++) {
531                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
532
533                         BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
534                                   sw_bd->skb, sw_bd->first_bd);
535                 }
536
537                 start = TX_BD(fp->tx_bd_cons - 10);
538                 end = TX_BD(fp->tx_bd_cons + 254);
539                 for (j = start; j < end; j++) {
540                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
541
542                         BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
543                                   j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
544                 }
545
546                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
547                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
548                 for (j = start; j < end; j++) {
549                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
550                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
551
552                         BNX2X_ERR("rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
553                                   j, rx_bd[1], rx_bd[0], sw_bd->skb);
554                 }
555
556                 start = RX_SGE(fp->rx_sge_prod);
557                 end = RX_SGE(fp->last_max_sge);
558                 for (j = start; j < end; j++) {
559                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
561
562                         BNX2X_ERR("rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
563                                   j, rx_sge[1], rx_sge[0], sw_page->page);
564                 }
565
566                 start = RCQ_BD(fp->rx_comp_cons - 10);
567                 end = RCQ_BD(fp->rx_comp_cons + 503);
568                 for (j = start; j < end; j++) {
569                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
570
571                         BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
572                                   j, cqe[0], cqe[1], cqe[2], cqe[3]);
573                 }
574         }
575
576         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
577                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
578                   "  spq_prod_idx(%u)\n",
579                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
580                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
581
582         bnx2x_fw_dump(bp);
583         bnx2x_mc_assert(bp);
584         BNX2X_ERR("end crash dump -----------------\n");
585 }
586
587 static void bnx2x_int_enable(struct bnx2x *bp)
588 {
589         int port = BP_PORT(bp);
590         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
591         u32 val = REG_RD(bp, addr);
592         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
593         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
594
595         if (msix) {
596                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
597                          HC_CONFIG_0_REG_INT_LINE_EN_0);
598                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
599                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
600         } else if (msi) {
601                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
602                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
603                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
604                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
605         } else {
606                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
607                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
608                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
609                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
610
611                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
612                    val, port, addr);
613
614                 REG_WR(bp, addr, val);
615
616                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
617         }
618
619         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
620            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
621
622         REG_WR(bp, addr, val);
623
624         if (CHIP_IS_E1H(bp)) {
625                 /* init leading/trailing edge */
626                 if (IS_E1HMF(bp)) {
627                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
628                         if (bp->port.pmf)
629                                 /* enable nig and gpio3 attention */
630                                 val |= 0x1100;
631                 } else
632                         val = 0xffff;
633
634                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
635                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
636         }
637 }
638
639 static void bnx2x_int_disable(struct bnx2x *bp)
640 {
641         int port = BP_PORT(bp);
642         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
643         u32 val = REG_RD(bp, addr);
644
645         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
646                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
647                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
648                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
649
650         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
651            val, port, addr);
652
653         /* flush all outstanding writes */
654         mmiowb();
655
656         REG_WR(bp, addr, val);
657         if (REG_RD(bp, addr) != val)
658                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
659 }
660
661 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
662 {
663         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
664         int i, offset;
665
666         /* disable interrupt handling */
667         atomic_inc(&bp->intr_sem);
668         if (disable_hw)
669                 /* prevent the HW from sending interrupts */
670                 bnx2x_int_disable(bp);
671
672         /* make sure all ISRs are done */
673         if (msix) {
674                 synchronize_irq(bp->msix_table[0].vector);
675                 offset = 1;
676                 for_each_queue(bp, i)
677                         synchronize_irq(bp->msix_table[i + offset].vector);
678         } else
679                 synchronize_irq(bp->pdev->irq);
680
681         /* make sure sp_task is not running */
682         cancel_delayed_work(&bp->sp_task);
683         flush_workqueue(bnx2x_wq);
684 }
685
686 /* fast path */
687
688 /*
689  * General service functions
690  */
691
692 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
693                                 u8 storm, u16 index, u8 op, u8 update)
694 {
695         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
696                        COMMAND_REG_INT_ACK);
697         struct igu_ack_register igu_ack;
698
699         igu_ack.status_block_index = index;
700         igu_ack.sb_id_and_flags =
701                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
702                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
703                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
704                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
705
706         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
707            (*(u32 *)&igu_ack), hc_addr);
708         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
709 }
710
711 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
712 {
713         struct host_status_block *fpsb = fp->status_blk;
714         u16 rc = 0;
715
716         barrier(); /* status block is written to by the chip */
717         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
718                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
719                 rc |= 1;
720         }
721         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
722                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
723                 rc |= 2;
724         }
725         return rc;
726 }
727
728 static u16 bnx2x_ack_int(struct bnx2x *bp)
729 {
730         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
731                        COMMAND_REG_SIMD_MASK);
732         u32 result = REG_RD(bp, hc_addr);
733
734         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
735            result, hc_addr);
736
737         return result;
738 }
739
740
741 /*
742  * fast path service functions
743  */
744
745 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
746 {
747         u16 tx_cons_sb;
748
749         /* Tell compiler that status block fields can change */
750         barrier();
751         tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
752         return (fp->tx_pkt_cons != tx_cons_sb);
753 }
754
755 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
756 {
757         /* Tell compiler that consumer and producer can change */
758         barrier();
759         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
760
761 }
762
763 /* free skb in the packet ring at pos idx
764  * return idx of last bd freed
765  */
766 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
767                              u16 idx)
768 {
769         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
770         struct eth_tx_bd *tx_bd;
771         struct sk_buff *skb = tx_buf->skb;
772         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
773         int nbd;
774
775         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
776            idx, tx_buf, skb);
777
778         /* unmap first bd */
779         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
780         tx_bd = &fp->tx_desc_ring[bd_idx];
781         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
782                          BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
783
784         nbd = le16_to_cpu(tx_bd->nbd) - 1;
785         new_cons = nbd + tx_buf->first_bd;
786 #ifdef BNX2X_STOP_ON_ERROR
787         if (nbd > (MAX_SKB_FRAGS + 2)) {
788                 BNX2X_ERR("BAD nbd!\n");
789                 bnx2x_panic();
790         }
791 #endif
792
793         /* Skip a parse bd and the TSO split header bd
794            since they have no mapping */
795         if (nbd)
796                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
797
798         if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
799                                            ETH_TX_BD_FLAGS_TCP_CSUM |
800                                            ETH_TX_BD_FLAGS_SW_LSO)) {
801                 if (--nbd)
802                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
803                 tx_bd = &fp->tx_desc_ring[bd_idx];
804                 /* is this a TSO split header bd? */
805                 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
806                         if (--nbd)
807                                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
808                 }
809         }
810
811         /* now free frags */
812         while (nbd > 0) {
813
814                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
815                 tx_bd = &fp->tx_desc_ring[bd_idx];
816                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
817                                BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
818                 if (--nbd)
819                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
820         }
821
822         /* release skb */
823         WARN_ON(!skb);
824         dev_kfree_skb(skb);
825         tx_buf->first_bd = 0;
826         tx_buf->skb = NULL;
827
828         return new_cons;
829 }
830
831 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
832 {
833         s16 used;
834         u16 prod;
835         u16 cons;
836
837         barrier(); /* Tell compiler that prod and cons can change */
838         prod = fp->tx_bd_prod;
839         cons = fp->tx_bd_cons;
840
841         /* NUM_TX_RINGS = number of "next-page" entries
842            It will be used as a threshold */
843         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
844
845 #ifdef BNX2X_STOP_ON_ERROR
846         WARN_ON(used < 0);
847         WARN_ON(used > fp->bp->tx_ring_size);
848         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
849 #endif
850
851         return (s16)(fp->bp->tx_ring_size) - used;
852 }
853
854 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
855 {
856         struct bnx2x *bp = fp->bp;
857         struct netdev_queue *txq;
858         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
859         int done = 0;
860
861 #ifdef BNX2X_STOP_ON_ERROR
862         if (unlikely(bp->panic))
863                 return;
864 #endif
865
866         txq = netdev_get_tx_queue(bp->dev, fp->index);
867         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
868         sw_cons = fp->tx_pkt_cons;
869
870         while (sw_cons != hw_cons) {
871                 u16 pkt_cons;
872
873                 pkt_cons = TX_BD(sw_cons);
874
875                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
876
877                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
878                    hw_cons, sw_cons, pkt_cons);
879
880 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
881                         rmb();
882                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
883                 }
884 */
885                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
886                 sw_cons++;
887                 done++;
888
889                 if (done == work)
890                         break;
891         }
892
893         fp->tx_pkt_cons = sw_cons;
894         fp->tx_bd_cons = bd_cons;
895
896         /* Need to make the tx_bd_cons update visible to start_xmit()
897          * before checking for netif_tx_queue_stopped().  Without the
898          * memory barrier, there is a small possibility that start_xmit()
899          * will miss it and cause the queue to be stopped forever.
900          */
901         smp_mb();
902
903         /* TBD need a thresh? */
904         if (unlikely(netif_tx_queue_stopped(txq))) {
905
906                 __netif_tx_lock(txq, smp_processor_id());
907
908                 if ((netif_tx_queue_stopped(txq)) &&
909                     (bp->state == BNX2X_STATE_OPEN) &&
910                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
911                         netif_tx_wake_queue(txq);
912
913                 __netif_tx_unlock(txq);
914         }
915 }
916
917
918 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
919                            union eth_rx_cqe *rr_cqe)
920 {
921         struct bnx2x *bp = fp->bp;
922         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
923         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
924
925         DP(BNX2X_MSG_SP,
926            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
927            FP_IDX(fp), cid, command, bp->state,
928            rr_cqe->ramrod_cqe.ramrod_type);
929
930         bp->spq_left++;
931
932         if (FP_IDX(fp)) {
933                 switch (command | fp->state) {
934                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
935                                                 BNX2X_FP_STATE_OPENING):
936                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
937                            cid);
938                         fp->state = BNX2X_FP_STATE_OPEN;
939                         break;
940
941                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
942                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
943                            cid);
944                         fp->state = BNX2X_FP_STATE_HALTED;
945                         break;
946
947                 default:
948                         BNX2X_ERR("unexpected MC reply (%d)  "
949                                   "fp->state is %x\n", command, fp->state);
950                         break;
951                 }
952                 mb(); /* force bnx2x_wait_ramrod() to see the change */
953                 return;
954         }
955
956         switch (command | bp->state) {
957         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
958                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
959                 bp->state = BNX2X_STATE_OPEN;
960                 break;
961
962         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
963                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
964                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
965                 fp->state = BNX2X_FP_STATE_HALTED;
966                 break;
967
968         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
969                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
970                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
971                 break;
972
973
974         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
975         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
976                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
977                 bp->set_mac_pending = 0;
978                 break;
979
980         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
981                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
982                 break;
983
984         default:
985                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
986                           command, bp->state);
987                 break;
988         }
989         mb(); /* force bnx2x_wait_ramrod() to see the change */
990 }
991
992 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
993                                      struct bnx2x_fastpath *fp, u16 index)
994 {
995         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
996         struct page *page = sw_buf->page;
997         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
998
999         /* Skip "next page" elements */
1000         if (!page)
1001                 return;
1002
1003         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1004                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1005         __free_pages(page, PAGES_PER_SGE_SHIFT);
1006
1007         sw_buf->page = NULL;
1008         sge->addr_hi = 0;
1009         sge->addr_lo = 0;
1010 }
1011
1012 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1013                                            struct bnx2x_fastpath *fp, int last)
1014 {
1015         int i;
1016
1017         for (i = 0; i < last; i++)
1018                 bnx2x_free_rx_sge(bp, fp, i);
1019 }
1020
1021 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1022                                      struct bnx2x_fastpath *fp, u16 index)
1023 {
1024         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1025         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1026         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1027         dma_addr_t mapping;
1028
1029         if (unlikely(page == NULL))
1030                 return -ENOMEM;
1031
1032         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1033                                PCI_DMA_FROMDEVICE);
1034         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1035                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1036                 return -ENOMEM;
1037         }
1038
1039         sw_buf->page = page;
1040         pci_unmap_addr_set(sw_buf, mapping, mapping);
1041
1042         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1043         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1044
1045         return 0;
1046 }
1047
1048 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1049                                      struct bnx2x_fastpath *fp, u16 index)
1050 {
1051         struct sk_buff *skb;
1052         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1053         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1054         dma_addr_t mapping;
1055
1056         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1057         if (unlikely(skb == NULL))
1058                 return -ENOMEM;
1059
1060         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1061                                  PCI_DMA_FROMDEVICE);
1062         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1063                 dev_kfree_skb(skb);
1064                 return -ENOMEM;
1065         }
1066
1067         rx_buf->skb = skb;
1068         pci_unmap_addr_set(rx_buf, mapping, mapping);
1069
1070         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1071         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1072
1073         return 0;
1074 }
1075
1076 /* note that we are not allocating a new skb,
1077  * we are just moving one from cons to prod
1078  * we are not creating a new mapping,
1079  * so there is no need to check for dma_mapping_error().
1080  */
1081 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1082                                struct sk_buff *skb, u16 cons, u16 prod)
1083 {
1084         struct bnx2x *bp = fp->bp;
1085         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1086         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1087         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1088         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1089
1090         pci_dma_sync_single_for_device(bp->pdev,
1091                                        pci_unmap_addr(cons_rx_buf, mapping),
1092                                        RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1093
1094         prod_rx_buf->skb = cons_rx_buf->skb;
1095         pci_unmap_addr_set(prod_rx_buf, mapping,
1096                            pci_unmap_addr(cons_rx_buf, mapping));
1097         *prod_bd = *cons_bd;
1098 }
1099
1100 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1101                                              u16 idx)
1102 {
1103         u16 last_max = fp->last_max_sge;
1104
1105         if (SUB_S16(idx, last_max) > 0)
1106                 fp->last_max_sge = idx;
1107 }
1108
1109 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1110 {
1111         int i, j;
1112
1113         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1114                 int idx = RX_SGE_CNT * i - 1;
1115
1116                 for (j = 0; j < 2; j++) {
1117                         SGE_MASK_CLEAR_BIT(fp, idx);
1118                         idx--;
1119                 }
1120         }
1121 }
1122
1123 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1124                                   struct eth_fast_path_rx_cqe *fp_cqe)
1125 {
1126         struct bnx2x *bp = fp->bp;
1127         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1128                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1129                       SGE_PAGE_SHIFT;
1130         u16 last_max, last_elem, first_elem;
1131         u16 delta = 0;
1132         u16 i;
1133
1134         if (!sge_len)
1135                 return;
1136
1137         /* First mark all used pages */
1138         for (i = 0; i < sge_len; i++)
1139                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1140
1141         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1142            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1143
1144         /* Here we assume that the last SGE index is the biggest */
1145         prefetch((void *)(fp->sge_mask));
1146         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1147
1148         last_max = RX_SGE(fp->last_max_sge);
1149         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1150         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1151
1152         /* If ring is not full */
1153         if (last_elem + 1 != first_elem)
1154                 last_elem++;
1155
1156         /* Now update the prod */
1157         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1158                 if (likely(fp->sge_mask[i]))
1159                         break;
1160
1161                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1162                 delta += RX_SGE_MASK_ELEM_SZ;
1163         }
1164
1165         if (delta > 0) {
1166                 fp->rx_sge_prod += delta;
1167                 /* clear page-end entries */
1168                 bnx2x_clear_sge_mask_next_elems(fp);
1169         }
1170
1171         DP(NETIF_MSG_RX_STATUS,
1172            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1173            fp->last_max_sge, fp->rx_sge_prod);
1174 }
1175
1176 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1177 {
1178         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1179         memset(fp->sge_mask, 0xff,
1180                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1181
1182         /* Clear the two last indices in the page to 1:
1183            these are the indices that correspond to the "next" element,
1184            hence will never be indicated and should be removed from
1185            the calculations. */
1186         bnx2x_clear_sge_mask_next_elems(fp);
1187 }
1188
1189 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1190                             struct sk_buff *skb, u16 cons, u16 prod)
1191 {
1192         struct bnx2x *bp = fp->bp;
1193         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1194         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1195         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1196         dma_addr_t mapping;
1197
1198         /* move empty skb from pool to prod and map it */
1199         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1200         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1201                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1202         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1203
1204         /* move partial skb from cons to pool (don't unmap yet) */
1205         fp->tpa_pool[queue] = *cons_rx_buf;
1206
1207         /* mark bin state as start - print error if current state != stop */
1208         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1209                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1210
1211         fp->tpa_state[queue] = BNX2X_TPA_START;
1212
1213         /* point prod_bd to new skb */
1214         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1215         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1216
1217 #ifdef BNX2X_STOP_ON_ERROR
1218         fp->tpa_queue_used |= (1 << queue);
1219 #ifdef __powerpc64__
1220         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1221 #else
1222         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1223 #endif
1224            fp->tpa_queue_used);
1225 #endif
1226 }
1227
1228 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1229                                struct sk_buff *skb,
1230                                struct eth_fast_path_rx_cqe *fp_cqe,
1231                                u16 cqe_idx)
1232 {
1233         struct sw_rx_page *rx_pg, old_rx_pg;
1234         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1235         u32 i, frag_len, frag_size, pages;
1236         int err;
1237         int j;
1238
1239         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1240         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1241
1242         /* This is needed in order to enable forwarding support */
1243         if (frag_size)
1244                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1245                                                max(frag_size, (u32)len_on_bd));
1246
1247 #ifdef BNX2X_STOP_ON_ERROR
1248         if (pages >
1249             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1250                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1251                           pages, cqe_idx);
1252                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1253                           fp_cqe->pkt_len, len_on_bd);
1254                 bnx2x_panic();
1255                 return -EINVAL;
1256         }
1257 #endif
1258
1259         /* Run through the SGL and compose the fragmented skb */
1260         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1261                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1262
1263                 /* FW gives the indices of the SGE as if the ring is an array
1264                    (meaning that "next" element will consume 2 indices) */
1265                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1266                 rx_pg = &fp->rx_page_ring[sge_idx];
1267                 old_rx_pg = *rx_pg;
1268
1269                 /* If we fail to allocate a substitute page, we simply stop
1270                    where we are and drop the whole packet */
1271                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1272                 if (unlikely(err)) {
1273                         fp->eth_q_stats.rx_skb_alloc_failed++;
1274                         return err;
1275                 }
1276
1277                 /* Unmap the page as we r going to pass it to the stack */
1278                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1279                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1280
1281                 /* Add one frag and update the appropriate fields in the skb */
1282                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1283
1284                 skb->data_len += frag_len;
1285                 skb->truesize += frag_len;
1286                 skb->len += frag_len;
1287
1288                 frag_size -= frag_len;
1289         }
1290
1291         return 0;
1292 }
1293
1294 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1295                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1296                            u16 cqe_idx)
1297 {
1298         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1299         struct sk_buff *skb = rx_buf->skb;
1300         /* alloc new skb */
1301         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1302
1303         /* Unmap skb in the pool anyway, as we are going to change
1304            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1305            fails. */
1306         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1307                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1308
1309         if (likely(new_skb)) {
1310                 /* fix ip xsum and give it to the stack */
1311                 /* (no need to map the new skb) */
1312 #ifdef BCM_VLAN
1313                 int is_vlan_cqe =
1314                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1315                          PARSING_FLAGS_VLAN);
1316                 int is_not_hwaccel_vlan_cqe =
1317                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1318 #endif
1319
1320                 prefetch(skb);
1321                 prefetch(((char *)(skb)) + 128);
1322
1323 #ifdef BNX2X_STOP_ON_ERROR
1324                 if (pad + len > bp->rx_buf_size) {
1325                         BNX2X_ERR("skb_put is about to fail...  "
1326                                   "pad %d  len %d  rx_buf_size %d\n",
1327                                   pad, len, bp->rx_buf_size);
1328                         bnx2x_panic();
1329                         return;
1330                 }
1331 #endif
1332
1333                 skb_reserve(skb, pad);
1334                 skb_put(skb, len);
1335
1336                 skb->protocol = eth_type_trans(skb, bp->dev);
1337                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1338
1339                 {
1340                         struct iphdr *iph;
1341
1342                         iph = (struct iphdr *)skb->data;
1343 #ifdef BCM_VLAN
1344                         /* If there is no Rx VLAN offloading -
1345                            take VLAN tag into an account */
1346                         if (unlikely(is_not_hwaccel_vlan_cqe))
1347                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1348 #endif
1349                         iph->check = 0;
1350                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1351                 }
1352
1353                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1354                                          &cqe->fast_path_cqe, cqe_idx)) {
1355 #ifdef BCM_VLAN
1356                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1357                             (!is_not_hwaccel_vlan_cqe))
1358                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1359                                                 le16_to_cpu(cqe->fast_path_cqe.
1360                                                             vlan_tag));
1361                         else
1362 #endif
1363                                 netif_receive_skb(skb);
1364                 } else {
1365                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1366                            " - dropping packet!\n");
1367                         dev_kfree_skb(skb);
1368                 }
1369
1370
1371                 /* put new skb in bin */
1372                 fp->tpa_pool[queue].skb = new_skb;
1373
1374         } else {
1375                 /* else drop the packet and keep the buffer in the bin */
1376                 DP(NETIF_MSG_RX_STATUS,
1377                    "Failed to allocate new skb - dropping packet!\n");
1378                 fp->eth_q_stats.rx_skb_alloc_failed++;
1379         }
1380
1381         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1382 }
1383
1384 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1385                                         struct bnx2x_fastpath *fp,
1386                                         u16 bd_prod, u16 rx_comp_prod,
1387                                         u16 rx_sge_prod)
1388 {
1389         struct ustorm_eth_rx_producers rx_prods = {0};
1390         int i;
1391
1392         /* Update producers */
1393         rx_prods.bd_prod = bd_prod;
1394         rx_prods.cqe_prod = rx_comp_prod;
1395         rx_prods.sge_prod = rx_sge_prod;
1396
1397         /*
1398          * Make sure that the BD and SGE data is updated before updating the
1399          * producers since FW might read the BD/SGE right after the producer
1400          * is updated.
1401          * This is only applicable for weak-ordered memory model archs such
1402          * as IA-64. The following barrier is also mandatory since FW will
1403          * assumes BDs must have buffers.
1404          */
1405         wmb();
1406
1407         for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1408                 REG_WR(bp, BAR_USTRORM_INTMEM +
1409                        USTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1410                        ((u32 *)&rx_prods)[i]);
1411
1412         mmiowb(); /* keep prod updates ordered */
1413
1414         DP(NETIF_MSG_RX_STATUS,
1415            "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
1416            fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1417 }
1418
1419 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1420 {
1421         struct bnx2x *bp = fp->bp;
1422         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1423         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1424         int rx_pkt = 0;
1425
1426 #ifdef BNX2X_STOP_ON_ERROR
1427         if (unlikely(bp->panic))
1428                 return 0;
1429 #endif
1430
1431         /* CQ "next element" is of the size of the regular element,
1432            that's why it's ok here */
1433         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1434         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1435                 hw_comp_cons++;
1436
1437         bd_cons = fp->rx_bd_cons;
1438         bd_prod = fp->rx_bd_prod;
1439         bd_prod_fw = bd_prod;
1440         sw_comp_cons = fp->rx_comp_cons;
1441         sw_comp_prod = fp->rx_comp_prod;
1442
1443         /* Memory barrier necessary as speculative reads of the rx
1444          * buffer can be ahead of the index in the status block
1445          */
1446         rmb();
1447
1448         DP(NETIF_MSG_RX_STATUS,
1449            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1450            FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1451
1452         while (sw_comp_cons != hw_comp_cons) {
1453                 struct sw_rx_bd *rx_buf = NULL;
1454                 struct sk_buff *skb;
1455                 union eth_rx_cqe *cqe;
1456                 u8 cqe_fp_flags;
1457                 u16 len, pad;
1458
1459                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1460                 bd_prod = RX_BD(bd_prod);
1461                 bd_cons = RX_BD(bd_cons);
1462
1463                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1464                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1465
1466                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1467                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1468                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1469                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1470                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1471                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1472
1473                 /* is this a slowpath msg? */
1474                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1475                         bnx2x_sp_event(fp, cqe);
1476                         goto next_cqe;
1477
1478                 /* this is an rx packet */
1479                 } else {
1480                         rx_buf = &fp->rx_buf_ring[bd_cons];
1481                         skb = rx_buf->skb;
1482                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1483                         pad = cqe->fast_path_cqe.placement_offset;
1484
1485                         /* If CQE is marked both TPA_START and TPA_END
1486                            it is a non-TPA CQE */
1487                         if ((!fp->disable_tpa) &&
1488                             (TPA_TYPE(cqe_fp_flags) !=
1489                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1490                                 u16 queue = cqe->fast_path_cqe.queue_index;
1491
1492                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1493                                         DP(NETIF_MSG_RX_STATUS,
1494                                            "calling tpa_start on queue %d\n",
1495                                            queue);
1496
1497                                         bnx2x_tpa_start(fp, queue, skb,
1498                                                         bd_cons, bd_prod);
1499                                         goto next_rx;
1500                                 }
1501
1502                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1503                                         DP(NETIF_MSG_RX_STATUS,
1504                                            "calling tpa_stop on queue %d\n",
1505                                            queue);
1506
1507                                         if (!BNX2X_RX_SUM_FIX(cqe))
1508                                                 BNX2X_ERR("STOP on none TCP "
1509                                                           "data\n");
1510
1511                                         /* This is a size of the linear data
1512                                            on this skb */
1513                                         len = le16_to_cpu(cqe->fast_path_cqe.
1514                                                                 len_on_bd);
1515                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1516                                                     len, cqe, comp_ring_cons);
1517 #ifdef BNX2X_STOP_ON_ERROR
1518                                         if (bp->panic)
1519                                                 return -EINVAL;
1520 #endif
1521
1522                                         bnx2x_update_sge_prod(fp,
1523                                                         &cqe->fast_path_cqe);
1524                                         goto next_cqe;
1525                                 }
1526                         }
1527
1528                         pci_dma_sync_single_for_device(bp->pdev,
1529                                         pci_unmap_addr(rx_buf, mapping),
1530                                                        pad + RX_COPY_THRESH,
1531                                                        PCI_DMA_FROMDEVICE);
1532                         prefetch(skb);
1533                         prefetch(((char *)(skb)) + 128);
1534
1535                         /* is this an error packet? */
1536                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1537                                 DP(NETIF_MSG_RX_ERR,
1538                                    "ERROR  flags %x  rx packet %u\n",
1539                                    cqe_fp_flags, sw_comp_cons);
1540                                 fp->eth_q_stats.rx_err_discard_pkt++;
1541                                 goto reuse_rx;
1542                         }
1543
1544                         /* Since we don't have a jumbo ring
1545                          * copy small packets if mtu > 1500
1546                          */
1547                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1548                             (len <= RX_COPY_THRESH)) {
1549                                 struct sk_buff *new_skb;
1550
1551                                 new_skb = netdev_alloc_skb(bp->dev,
1552                                                            len + pad);
1553                                 if (new_skb == NULL) {
1554                                         DP(NETIF_MSG_RX_ERR,
1555                                            "ERROR  packet dropped "
1556                                            "because of alloc failure\n");
1557                                         fp->eth_q_stats.rx_skb_alloc_failed++;
1558                                         goto reuse_rx;
1559                                 }
1560
1561                                 /* aligned copy */
1562                                 skb_copy_from_linear_data_offset(skb, pad,
1563                                                     new_skb->data + pad, len);
1564                                 skb_reserve(new_skb, pad);
1565                                 skb_put(new_skb, len);
1566
1567                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1568
1569                                 skb = new_skb;
1570
1571                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1572                                 pci_unmap_single(bp->pdev,
1573                                         pci_unmap_addr(rx_buf, mapping),
1574                                                  bp->rx_buf_size,
1575                                                  PCI_DMA_FROMDEVICE);
1576                                 skb_reserve(skb, pad);
1577                                 skb_put(skb, len);
1578
1579                         } else {
1580                                 DP(NETIF_MSG_RX_ERR,
1581                                    "ERROR  packet dropped because "
1582                                    "of alloc failure\n");
1583                                 fp->eth_q_stats.rx_skb_alloc_failed++;
1584 reuse_rx:
1585                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1586                                 goto next_rx;
1587                         }
1588
1589                         skb->protocol = eth_type_trans(skb, bp->dev);
1590
1591                         skb->ip_summed = CHECKSUM_NONE;
1592                         if (bp->rx_csum) {
1593                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1594                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1595                                 else
1596                                         fp->eth_q_stats.hw_csum_err++;
1597                         }
1598                 }
1599
1600                 skb_record_rx_queue(skb, fp->index);
1601 #ifdef BCM_VLAN
1602                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1603                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1604                      PARSING_FLAGS_VLAN))
1605                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1606                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1607                 else
1608 #endif
1609                         netif_receive_skb(skb);
1610
1611
1612 next_rx:
1613                 rx_buf->skb = NULL;
1614
1615                 bd_cons = NEXT_RX_IDX(bd_cons);
1616                 bd_prod = NEXT_RX_IDX(bd_prod);
1617                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1618                 rx_pkt++;
1619 next_cqe:
1620                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1621                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1622
1623                 if (rx_pkt == budget)
1624                         break;
1625         } /* while */
1626
1627         fp->rx_bd_cons = bd_cons;
1628         fp->rx_bd_prod = bd_prod_fw;
1629         fp->rx_comp_cons = sw_comp_cons;
1630         fp->rx_comp_prod = sw_comp_prod;
1631
1632         /* Update producers */
1633         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1634                              fp->rx_sge_prod);
1635
1636         fp->rx_pkt += rx_pkt;
1637         fp->rx_calls++;
1638
1639         return rx_pkt;
1640 }
1641
1642 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1643 {
1644         struct bnx2x_fastpath *fp = fp_cookie;
1645         struct bnx2x *bp = fp->bp;
1646         int index = FP_IDX(fp);
1647
1648         /* Return here if interrupt is disabled */
1649         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1650                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1651                 return IRQ_HANDLED;
1652         }
1653
1654         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1655            index, FP_SB_ID(fp));
1656         bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1657
1658 #ifdef BNX2X_STOP_ON_ERROR
1659         if (unlikely(bp->panic))
1660                 return IRQ_HANDLED;
1661 #endif
1662
1663         prefetch(fp->rx_cons_sb);
1664         prefetch(fp->tx_cons_sb);
1665         prefetch(&fp->status_blk->c_status_block.status_block_index);
1666         prefetch(&fp->status_blk->u_status_block.status_block_index);
1667
1668         napi_schedule(&bnx2x_fp(bp, index, napi));
1669
1670         return IRQ_HANDLED;
1671 }
1672
1673 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1674 {
1675         struct bnx2x *bp = netdev_priv(dev_instance);
1676         u16 status = bnx2x_ack_int(bp);
1677         u16 mask;
1678
1679         /* Return here if interrupt is shared and it's not for us */
1680         if (unlikely(status == 0)) {
1681                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1682                 return IRQ_NONE;
1683         }
1684         DP(NETIF_MSG_INTR, "got an interrupt  status %u\n", status);
1685
1686         /* Return here if interrupt is disabled */
1687         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1688                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1689                 return IRQ_HANDLED;
1690         }
1691
1692 #ifdef BNX2X_STOP_ON_ERROR
1693         if (unlikely(bp->panic))
1694                 return IRQ_HANDLED;
1695 #endif
1696
1697         mask = 0x2 << bp->fp[0].sb_id;
1698         if (status & mask) {
1699                 struct bnx2x_fastpath *fp = &bp->fp[0];
1700
1701                 prefetch(fp->rx_cons_sb);
1702                 prefetch(fp->tx_cons_sb);
1703                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1704                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1705
1706                 napi_schedule(&bnx2x_fp(bp, 0, napi));
1707
1708                 status &= ~mask;
1709         }
1710
1711
1712         if (unlikely(status & 0x1)) {
1713                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1714
1715                 status &= ~0x1;
1716                 if (!status)
1717                         return IRQ_HANDLED;
1718         }
1719
1720         if (status)
1721                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1722                    status);
1723
1724         return IRQ_HANDLED;
1725 }
1726
1727 /* end of fast path */
1728
1729 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1730
1731 /* Link */
1732
1733 /*
1734  * General service functions
1735  */
1736
1737 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1738 {
1739         u32 lock_status;
1740         u32 resource_bit = (1 << resource);
1741         int func = BP_FUNC(bp);
1742         u32 hw_lock_control_reg;
1743         int cnt;
1744
1745         /* Validating that the resource is within range */
1746         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1747                 DP(NETIF_MSG_HW,
1748                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1749                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1750                 return -EINVAL;
1751         }
1752
1753         if (func <= 5) {
1754                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1755         } else {
1756                 hw_lock_control_reg =
1757                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1758         }
1759
1760         /* Validating that the resource is not already taken */
1761         lock_status = REG_RD(bp, hw_lock_control_reg);
1762         if (lock_status & resource_bit) {
1763                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1764                    lock_status, resource_bit);
1765                 return -EEXIST;
1766         }
1767
1768         /* Try for 5 second every 5ms */
1769         for (cnt = 0; cnt < 1000; cnt++) {
1770                 /* Try to acquire the lock */
1771                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1772                 lock_status = REG_RD(bp, hw_lock_control_reg);
1773                 if (lock_status & resource_bit)
1774                         return 0;
1775
1776                 msleep(5);
1777         }
1778         DP(NETIF_MSG_HW, "Timeout\n");
1779         return -EAGAIN;
1780 }
1781
1782 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1783 {
1784         u32 lock_status;
1785         u32 resource_bit = (1 << resource);
1786         int func = BP_FUNC(bp);
1787         u32 hw_lock_control_reg;
1788
1789         /* Validating that the resource is within range */
1790         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1791                 DP(NETIF_MSG_HW,
1792                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1793                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1794                 return -EINVAL;
1795         }
1796
1797         if (func <= 5) {
1798                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1799         } else {
1800                 hw_lock_control_reg =
1801                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1802         }
1803
1804         /* Validating that the resource is currently taken */
1805         lock_status = REG_RD(bp, hw_lock_control_reg);
1806         if (!(lock_status & resource_bit)) {
1807                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1808                    lock_status, resource_bit);
1809                 return -EFAULT;
1810         }
1811
1812         REG_WR(bp, hw_lock_control_reg, resource_bit);
1813         return 0;
1814 }
1815
1816 /* HW Lock for shared dual port PHYs */
1817 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1818 {
1819         mutex_lock(&bp->port.phy_mutex);
1820
1821         if (bp->port.need_hw_lock)
1822                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1823 }
1824
1825 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1826 {
1827         if (bp->port.need_hw_lock)
1828                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1829
1830         mutex_unlock(&bp->port.phy_mutex);
1831 }
1832
1833 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1834 {
1835         /* The GPIO should be swapped if swap register is set and active */
1836         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1837                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1838         int gpio_shift = gpio_num +
1839                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1840         u32 gpio_mask = (1 << gpio_shift);
1841         u32 gpio_reg;
1842         int value;
1843
1844         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1845                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1846                 return -EINVAL;
1847         }
1848
1849         /* read GPIO value */
1850         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1851
1852         /* get the requested pin value */
1853         if ((gpio_reg & gpio_mask) == gpio_mask)
1854                 value = 1;
1855         else
1856                 value = 0;
1857
1858         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1859
1860         return value;
1861 }
1862
1863 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1864 {
1865         /* The GPIO should be swapped if swap register is set and active */
1866         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1867                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1868         int gpio_shift = gpio_num +
1869                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1870         u32 gpio_mask = (1 << gpio_shift);
1871         u32 gpio_reg;
1872
1873         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1874                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1875                 return -EINVAL;
1876         }
1877
1878         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1879         /* read GPIO and mask except the float bits */
1880         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1881
1882         switch (mode) {
1883         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1884                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1885                    gpio_num, gpio_shift);
1886                 /* clear FLOAT and set CLR */
1887                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1888                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1889                 break;
1890
1891         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1892                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1893                    gpio_num, gpio_shift);
1894                 /* clear FLOAT and set SET */
1895                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1896                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1897                 break;
1898
1899         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1900                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1901                    gpio_num, gpio_shift);
1902                 /* set FLOAT */
1903                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1904                 break;
1905
1906         default:
1907                 break;
1908         }
1909
1910         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1911         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1912
1913         return 0;
1914 }
1915
1916 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1917 {
1918         /* The GPIO should be swapped if swap register is set and active */
1919         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1920                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1921         int gpio_shift = gpio_num +
1922                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1923         u32 gpio_mask = (1 << gpio_shift);
1924         u32 gpio_reg;
1925
1926         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1927                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1928                 return -EINVAL;
1929         }
1930
1931         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1932         /* read GPIO int */
1933         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1934
1935         switch (mode) {
1936         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1937                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1938                                    "output low\n", gpio_num, gpio_shift);
1939                 /* clear SET and set CLR */
1940                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1941                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1942                 break;
1943
1944         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1945                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1946                                    "output high\n", gpio_num, gpio_shift);
1947                 /* clear CLR and set SET */
1948                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1949                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1950                 break;
1951
1952         default:
1953                 break;
1954         }
1955
1956         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1957         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1958
1959         return 0;
1960 }
1961
1962 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1963 {
1964         u32 spio_mask = (1 << spio_num);
1965         u32 spio_reg;
1966
1967         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1968             (spio_num > MISC_REGISTERS_SPIO_7)) {
1969                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1970                 return -EINVAL;
1971         }
1972
1973         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1974         /* read SPIO and mask except the float bits */
1975         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1976
1977         switch (mode) {
1978         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1979                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1980                 /* clear FLOAT and set CLR */
1981                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1982                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1983                 break;
1984
1985         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1986                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1987                 /* clear FLOAT and set SET */
1988                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1989                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1990                 break;
1991
1992         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1993                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1994                 /* set FLOAT */
1995                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1996                 break;
1997
1998         default:
1999                 break;
2000         }
2001
2002         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2003         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2004
2005         return 0;
2006 }
2007
2008 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2009 {
2010         switch (bp->link_vars.ieee_fc &
2011                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2012         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2013                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2014                                           ADVERTISED_Pause);
2015                 break;
2016         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2017                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2018                                          ADVERTISED_Pause);
2019                 break;
2020         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2021                 bp->port.advertising |= ADVERTISED_Asym_Pause;
2022                 break;
2023         default:
2024                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2025                                           ADVERTISED_Pause);
2026                 break;
2027         }
2028 }
2029
2030 static void bnx2x_link_report(struct bnx2x *bp)
2031 {
2032         if (bp->link_vars.link_up) {
2033                 if (bp->state == BNX2X_STATE_OPEN)
2034                         netif_carrier_on(bp->dev);
2035                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2036
2037                 printk("%d Mbps ", bp->link_vars.line_speed);
2038
2039                 if (bp->link_vars.duplex == DUPLEX_FULL)
2040                         printk("full duplex");
2041                 else
2042                         printk("half duplex");
2043
2044                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2045                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2046                                 printk(", receive ");
2047                                 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2048                                         printk("& transmit ");
2049                         } else {
2050                                 printk(", transmit ");
2051                         }
2052                         printk("flow control ON");
2053                 }
2054                 printk("\n");
2055
2056         } else { /* link_down */
2057                 netif_carrier_off(bp->dev);
2058                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2059         }
2060 }
2061
2062 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
2063 {
2064         if (!BP_NOMCP(bp)) {
2065                 u8 rc;
2066
2067                 /* Initialize link parameters structure variables */
2068                 /* It is recommended to turn off RX FC for jumbo frames
2069                    for better performance */
2070                 if (IS_E1HMF(bp))
2071                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2072                 else if (bp->dev->mtu > 5000)
2073                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2074                 else
2075                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2076
2077                 bnx2x_acquire_phy_lock(bp);
2078                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2079                 bnx2x_release_phy_lock(bp);
2080
2081                 bnx2x_calc_fc_adv(bp);
2082
2083                 if (bp->link_vars.link_up)
2084                         bnx2x_link_report(bp);
2085
2086
2087                 return rc;
2088         }
2089         BNX2X_ERR("Bootcode is missing -not initializing link\n");
2090         return -EINVAL;
2091 }
2092
2093 static void bnx2x_link_set(struct bnx2x *bp)
2094 {
2095         if (!BP_NOMCP(bp)) {
2096                 bnx2x_acquire_phy_lock(bp);
2097                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2098                 bnx2x_release_phy_lock(bp);
2099
2100                 bnx2x_calc_fc_adv(bp);
2101         } else
2102                 BNX2X_ERR("Bootcode is missing -not setting link\n");
2103 }
2104
2105 static void bnx2x__link_reset(struct bnx2x *bp)
2106 {
2107         if (!BP_NOMCP(bp)) {
2108                 bnx2x_acquire_phy_lock(bp);
2109                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2110                 bnx2x_release_phy_lock(bp);
2111         } else
2112                 BNX2X_ERR("Bootcode is missing -not resetting link\n");
2113 }
2114
2115 static u8 bnx2x_link_test(struct bnx2x *bp)
2116 {
2117         u8 rc;
2118
2119         bnx2x_acquire_phy_lock(bp);
2120         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2121         bnx2x_release_phy_lock(bp);
2122
2123         return rc;
2124 }
2125
2126 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2127 {
2128         u32 r_param = bp->link_vars.line_speed / 8;
2129         u32 fair_periodic_timeout_usec;
2130         u32 t_fair;
2131
2132         memset(&(bp->cmng.rs_vars), 0,
2133                sizeof(struct rate_shaping_vars_per_port));
2134         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2135
2136         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2137         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2138
2139         /* this is the threshold below which no timer arming will occur
2140            1.25 coefficient is for the threshold to be a little bigger
2141            than the real time, to compensate for timer in-accuracy */
2142         bp->cmng.rs_vars.rs_threshold =
2143                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2144
2145         /* resolution of fairness timer */
2146         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2147         /* for 10G it is 1000usec. for 1G it is 10000usec. */
2148         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2149
2150         /* this is the threshold below which we won't arm the timer anymore */
2151         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2152
2153         /* we multiply by 1e3/8 to get bytes/msec.
2154            We don't want the credits to pass a credit
2155            of the t_fair*FAIR_MEM (algorithm resolution) */
2156         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2157         /* since each tick is 4 usec */
2158         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2159 }
2160
2161 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2162 {
2163         struct rate_shaping_vars_per_vn m_rs_vn;
2164         struct fairness_vars_per_vn m_fair_vn;
2165         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2166         u16 vn_min_rate, vn_max_rate;
2167         int i;
2168
2169         /* If function is hidden - set min and max to zeroes */
2170         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2171                 vn_min_rate = 0;
2172                 vn_max_rate = 0;
2173
2174         } else {
2175                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2176                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2177                 /* If fairness is enabled (not all min rates are zeroes) and
2178                    if current min rate is zero - set it to 1.
2179                    This is a requirement of the algorithm. */
2180                 if (bp->vn_weight_sum && (vn_min_rate == 0))
2181                         vn_min_rate = DEF_MIN_RATE;
2182                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2183                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2184         }
2185
2186         DP(NETIF_MSG_IFUP,
2187            "func %d: vn_min_rate=%d  vn_max_rate=%d  vn_weight_sum=%d\n",
2188            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2189
2190         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2191         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2192
2193         /* global vn counter - maximal Mbps for this vn */
2194         m_rs_vn.vn_counter.rate = vn_max_rate;
2195
2196         /* quota - number of bytes transmitted in this period */
2197         m_rs_vn.vn_counter.quota =
2198                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2199
2200         if (bp->vn_weight_sum) {
2201                 /* credit for each period of the fairness algorithm:
2202                    number of bytes in T_FAIR (the vn share the port rate).
2203                    vn_weight_sum should not be larger than 10000, thus
2204                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2205                    than zero */
2206                 m_fair_vn.vn_credit_delta =
2207                         max((u32)(vn_min_rate * (T_FAIR_COEF /
2208                                                  (8 * bp->vn_weight_sum))),
2209                             (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2210                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2211                    m_fair_vn.vn_credit_delta);
2212         }
2213
2214         /* Store it to internal memory */
2215         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2216                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2217                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2218                        ((u32 *)(&m_rs_vn))[i]);
2219
2220         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2221                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2222                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2223                        ((u32 *)(&m_fair_vn))[i]);
2224 }
2225
2226
2227 /* This function is called upon link interrupt */
2228 static void bnx2x_link_attn(struct bnx2x *bp)
2229 {
2230         /* Make sure that we are synced with the current statistics */
2231         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2232
2233         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2234
2235         if (bp->link_vars.link_up) {
2236
2237                 /* dropless flow control */
2238                 if (CHIP_IS_E1H(bp)) {
2239                         int port = BP_PORT(bp);
2240                         u32 pause_enabled = 0;
2241
2242                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2243                                 pause_enabled = 1;
2244
2245                         REG_WR(bp, BAR_USTRORM_INTMEM +
2246                                USTORM_PAUSE_ENABLED_OFFSET(port),
2247                                pause_enabled);
2248                 }
2249
2250                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2251                         struct host_port_stats *pstats;
2252
2253                         pstats = bnx2x_sp(bp, port_stats);
2254                         /* reset old bmac stats */
2255                         memset(&(pstats->mac_stx[0]), 0,
2256                                sizeof(struct mac_stx));
2257                 }
2258                 if ((bp->state == BNX2X_STATE_OPEN) ||
2259                     (bp->state == BNX2X_STATE_DISABLED))
2260                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2261         }
2262
2263         /* indicate link status */
2264         bnx2x_link_report(bp);
2265
2266         if (IS_E1HMF(bp)) {
2267                 int port = BP_PORT(bp);
2268                 int func;
2269                 int vn;
2270
2271                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2272                         if (vn == BP_E1HVN(bp))
2273                                 continue;
2274
2275                         func = ((vn << 1) | port);
2276
2277                         /* Set the attention towards other drivers
2278                            on the same port */
2279                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2280                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2281                 }
2282
2283                 if (bp->link_vars.link_up) {
2284                         int i;
2285
2286                         /* Init rate shaping and fairness contexts */
2287                         bnx2x_init_port_minmax(bp);
2288
2289                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2290                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2291
2292                         /* Store it to internal memory */
2293                         for (i = 0;
2294                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
2295                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2296                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2297                                        ((u32 *)(&bp->cmng))[i]);
2298                 }
2299         }
2300 }
2301
2302 static void bnx2x__link_status_update(struct bnx2x *bp)
2303 {
2304         if (bp->state != BNX2X_STATE_OPEN)
2305                 return;
2306
2307         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2308
2309         if (bp->link_vars.link_up)
2310                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2311         else
2312                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2313
2314         /* indicate link status */
2315         bnx2x_link_report(bp);
2316 }
2317
2318 static void bnx2x_pmf_update(struct bnx2x *bp)
2319 {
2320         int port = BP_PORT(bp);
2321         u32 val;
2322
2323         bp->port.pmf = 1;
2324         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2325
2326         /* enable nig attention */
2327         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2328         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2329         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2330
2331         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2332 }
2333
2334 /* end of Link */
2335
2336 /* slow path */
2337
2338 /*
2339  * General service functions
2340  */
2341
2342 /* the slow path queue is odd since completions arrive on the fastpath ring */
2343 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2344                          u32 data_hi, u32 data_lo, int common)
2345 {
2346         int func = BP_FUNC(bp);
2347
2348         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2349            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2350            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2351            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2352            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2353
2354 #ifdef BNX2X_STOP_ON_ERROR
2355         if (unlikely(bp->panic))
2356                 return -EIO;
2357 #endif
2358
2359         spin_lock_bh(&bp->spq_lock);
2360
2361         if (!bp->spq_left) {
2362                 BNX2X_ERR("BUG! SPQ ring full!\n");
2363                 spin_unlock_bh(&bp->spq_lock);
2364                 bnx2x_panic();
2365                 return -EBUSY;
2366         }
2367
2368         /* CID needs port number to be encoded int it */
2369         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2370                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2371                                      HW_CID(bp, cid)));
2372         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2373         if (common)
2374                 bp->spq_prod_bd->hdr.type |=
2375                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2376
2377         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2378         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2379
2380         bp->spq_left--;
2381
2382         if (bp->spq_prod_bd == bp->spq_last_bd) {
2383                 bp->spq_prod_bd = bp->spq;
2384                 bp->spq_prod_idx = 0;
2385                 DP(NETIF_MSG_TIMER, "end of spq\n");
2386
2387         } else {
2388                 bp->spq_prod_bd++;
2389                 bp->spq_prod_idx++;
2390         }
2391
2392         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2393                bp->spq_prod_idx);
2394
2395         spin_unlock_bh(&bp->spq_lock);
2396         return 0;
2397 }
2398
2399 /* acquire split MCP access lock register */
2400 static int bnx2x_acquire_alr(struct bnx2x *bp)
2401 {
2402         u32 i, j, val;
2403         int rc = 0;
2404
2405         might_sleep();
2406         i = 100;
2407         for (j = 0; j < i*10; j++) {
2408                 val = (1UL << 31);
2409                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2410                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2411                 if (val & (1L << 31))
2412                         break;
2413
2414                 msleep(5);
2415         }
2416         if (!(val & (1L << 31))) {
2417                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2418                 rc = -EBUSY;
2419         }
2420
2421         return rc;
2422 }
2423
2424 /* release split MCP access lock register */
2425 static void bnx2x_release_alr(struct bnx2x *bp)
2426 {
2427         u32 val = 0;
2428
2429         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2430 }
2431
2432 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2433 {
2434         struct host_def_status_block *def_sb = bp->def_status_blk;
2435         u16 rc = 0;
2436
2437         barrier(); /* status block is written to by the chip */
2438         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2439                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2440                 rc |= 1;
2441         }
2442         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2443                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2444                 rc |= 2;
2445         }
2446         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2447                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2448                 rc |= 4;
2449         }
2450         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2451                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2452                 rc |= 8;
2453         }
2454         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2455                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2456                 rc |= 16;
2457         }
2458         return rc;
2459 }
2460
2461 /*
2462  * slow path service functions
2463  */
2464
2465 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2466 {
2467         int port = BP_PORT(bp);
2468         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2469                        COMMAND_REG_ATTN_BITS_SET);
2470         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2471                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2472         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2473                                        NIG_REG_MASK_INTERRUPT_PORT0;
2474         u32 aeu_mask;
2475         u32 nig_mask = 0;
2476
2477         if (bp->attn_state & asserted)
2478                 BNX2X_ERR("IGU ERROR\n");
2479
2480         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2481         aeu_mask = REG_RD(bp, aeu_addr);
2482
2483         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2484            aeu_mask, asserted);
2485         aeu_mask &= ~(asserted & 0xff);
2486         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2487
2488         REG_WR(bp, aeu_addr, aeu_mask);
2489         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2490
2491         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2492         bp->attn_state |= asserted;
2493         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2494
2495         if (asserted & ATTN_HARD_WIRED_MASK) {
2496                 if (asserted & ATTN_NIG_FOR_FUNC) {
2497
2498                         bnx2x_acquire_phy_lock(bp);
2499
2500                         /* save nig interrupt mask */
2501                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2502                         REG_WR(bp, nig_int_mask_addr, 0);
2503
2504                         bnx2x_link_attn(bp);
2505
2506                         /* handle unicore attn? */
2507                 }
2508                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2509                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2510
2511                 if (asserted & GPIO_2_FUNC)
2512                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2513
2514                 if (asserted & GPIO_3_FUNC)
2515                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2516
2517                 if (asserted & GPIO_4_FUNC)
2518                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2519
2520                 if (port == 0) {
2521                         if (asserted & ATTN_GENERAL_ATTN_1) {
2522                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2523                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2524                         }
2525                         if (asserted & ATTN_GENERAL_ATTN_2) {
2526                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2527                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2528                         }
2529                         if (asserted & ATTN_GENERAL_ATTN_3) {
2530                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2531                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2532                         }
2533                 } else {
2534                         if (asserted & ATTN_GENERAL_ATTN_4) {
2535                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2536                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2537                         }
2538                         if (asserted & ATTN_GENERAL_ATTN_5) {
2539                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2540                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2541                         }
2542                         if (asserted & ATTN_GENERAL_ATTN_6) {
2543                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2544                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2545                         }
2546                 }
2547
2548         } /* if hardwired */
2549
2550         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2551            asserted, hc_addr);
2552         REG_WR(bp, hc_addr, asserted);
2553
2554         /* now set back the mask */
2555         if (asserted & ATTN_NIG_FOR_FUNC) {
2556                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2557                 bnx2x_release_phy_lock(bp);
2558         }
2559 }
2560
2561 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2562 {
2563         int port = BP_PORT(bp);
2564         int reg_offset;
2565         u32 val;
2566
2567         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2568                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2569
2570         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2571
2572                 val = REG_RD(bp, reg_offset);
2573                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2574                 REG_WR(bp, reg_offset, val);
2575
2576                 BNX2X_ERR("SPIO5 hw attention\n");
2577
2578                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2579                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2580                         /* Fan failure attention */
2581
2582                         /* The PHY reset is controlled by GPIO 1 */
2583                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2584                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2585                         /* Low power mode is controlled by GPIO 2 */
2586                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2587                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2588                         /* mark the failure */
2589                         bp->link_params.ext_phy_config &=
2590                                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2591                         bp->link_params.ext_phy_config |=
2592                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2593                         SHMEM_WR(bp,
2594                                  dev_info.port_hw_config[port].
2595                                                         external_phy_config,
2596                                  bp->link_params.ext_phy_config);
2597                         /* log the failure */
2598                         printk(KERN_ERR PFX "Fan Failure on Network"
2599                                " Controller %s has caused the driver to"
2600                                " shutdown the card to prevent permanent"
2601                                " damage.  Please contact Dell Support for"
2602                                " assistance\n", bp->dev->name);
2603                         break;
2604
2605                 default:
2606                         break;
2607                 }
2608         }
2609
2610         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2611                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2612                 bnx2x_acquire_phy_lock(bp);
2613                 bnx2x_handle_module_detect_int(&bp->link_params);
2614                 bnx2x_release_phy_lock(bp);
2615         }
2616
2617         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2618
2619                 val = REG_RD(bp, reg_offset);
2620                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2621                 REG_WR(bp, reg_offset, val);
2622
2623                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2624                           (attn & HW_INTERRUT_ASSERT_SET_0));
2625                 bnx2x_panic();
2626         }
2627 }
2628
2629 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2630 {
2631         u32 val;
2632
2633         if (attn & BNX2X_DOORQ_ASSERT) {
2634
2635                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2636                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2637                 /* DORQ discard attention */
2638                 if (val & 0x2)
2639                         BNX2X_ERR("FATAL error from DORQ\n");
2640         }
2641
2642         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2643
2644                 int port = BP_PORT(bp);
2645                 int reg_offset;
2646
2647                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2648                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2649
2650                 val = REG_RD(bp, reg_offset);
2651                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2652                 REG_WR(bp, reg_offset, val);
2653
2654                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2655                           (attn & HW_INTERRUT_ASSERT_SET_1));
2656                 bnx2x_panic();
2657         }
2658 }
2659
2660 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2661 {
2662         u32 val;
2663
2664         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2665
2666                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2667                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2668                 /* CFC error attention */
2669                 if (val & 0x2)
2670                         BNX2X_ERR("FATAL error from CFC\n");
2671         }
2672
2673         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2674
2675                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2676                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2677                 /* RQ_USDMDP_FIFO_OVERFLOW */
2678                 if (val & 0x18000)
2679                         BNX2X_ERR("FATAL error from PXP\n");
2680         }
2681
2682         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2683
2684                 int port = BP_PORT(bp);
2685                 int reg_offset;
2686
2687                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2688                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2689
2690                 val = REG_RD(bp, reg_offset);
2691                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2692                 REG_WR(bp, reg_offset, val);
2693
2694                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2695                           (attn & HW_INTERRUT_ASSERT_SET_2));
2696                 bnx2x_panic();
2697         }
2698 }
2699
2700 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2701 {
2702         u32 val;
2703
2704         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2705
2706                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2707                         int func = BP_FUNC(bp);
2708
2709                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2710                         bnx2x__link_status_update(bp);
2711                         if (SHMEM_RD(bp, func_mb[func].drv_status) &
2712                                                         DRV_STATUS_PMF)
2713                                 bnx2x_pmf_update(bp);
2714
2715                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2716
2717                         BNX2X_ERR("MC assert!\n");
2718                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2719                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2720                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2721                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2722                         bnx2x_panic();
2723
2724                 } else if (attn & BNX2X_MCP_ASSERT) {
2725
2726                         BNX2X_ERR("MCP assert!\n");
2727                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2728                         bnx2x_fw_dump(bp);
2729
2730                 } else
2731                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2732         }
2733
2734         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2735                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2736                 if (attn & BNX2X_GRC_TIMEOUT) {
2737                         val = CHIP_IS_E1H(bp) ?
2738                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2739                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2740                 }
2741                 if (attn & BNX2X_GRC_RSV) {
2742                         val = CHIP_IS_E1H(bp) ?
2743                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2744                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2745                 }
2746                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2747         }
2748 }
2749
2750 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2751 {
2752         struct attn_route attn;
2753         struct attn_route group_mask;
2754         int port = BP_PORT(bp);
2755         int index;
2756         u32 reg_addr;
2757         u32 val;
2758         u32 aeu_mask;
2759
2760         /* need to take HW lock because MCP or other port might also
2761            try to handle this event */
2762         bnx2x_acquire_alr(bp);
2763
2764         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2765         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2766         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2767         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2768         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2769            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2770
2771         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2772                 if (deasserted & (1 << index)) {
2773                         group_mask = bp->attn_group[index];
2774
2775                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2776                            index, group_mask.sig[0], group_mask.sig[1],
2777                            group_mask.sig[2], group_mask.sig[3]);
2778
2779                         bnx2x_attn_int_deasserted3(bp,
2780                                         attn.sig[3] & group_mask.sig[3]);
2781                         bnx2x_attn_int_deasserted1(bp,
2782                                         attn.sig[1] & group_mask.sig[1]);
2783                         bnx2x_attn_int_deasserted2(bp,
2784                                         attn.sig[2] & group_mask.sig[2]);
2785                         bnx2x_attn_int_deasserted0(bp,
2786                                         attn.sig[0] & group_mask.sig[0]);
2787
2788                         if ((attn.sig[0] & group_mask.sig[0] &
2789                                                 HW_PRTY_ASSERT_SET_0) ||
2790                             (attn.sig[1] & group_mask.sig[1] &
2791                                                 HW_PRTY_ASSERT_SET_1) ||
2792                             (attn.sig[2] & group_mask.sig[2] &
2793                                                 HW_PRTY_ASSERT_SET_2))
2794                                 BNX2X_ERR("FATAL HW block parity attention\n");
2795                 }
2796         }
2797
2798         bnx2x_release_alr(bp);
2799
2800         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2801
2802         val = ~deasserted;
2803         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2804            val, reg_addr);
2805         REG_WR(bp, reg_addr, val);
2806
2807         if (~bp->attn_state & deasserted)
2808                 BNX2X_ERR("IGU ERROR\n");
2809
2810         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2811                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2812
2813         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2814         aeu_mask = REG_RD(bp, reg_addr);
2815
2816         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2817            aeu_mask, deasserted);
2818         aeu_mask |= (deasserted & 0xff);
2819         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2820
2821         REG_WR(bp, reg_addr, aeu_mask);
2822         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2823
2824         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2825         bp->attn_state &= ~deasserted;
2826         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2827 }
2828
2829 static void bnx2x_attn_int(struct bnx2x *bp)
2830 {
2831         /* read local copy of bits */
2832         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2833                                                                 attn_bits);
2834         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2835                                                                 attn_bits_ack);
2836         u32 attn_state = bp->attn_state;
2837
2838         /* look for changed bits */
2839         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2840         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2841
2842         DP(NETIF_MSG_HW,
2843            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2844            attn_bits, attn_ack, asserted, deasserted);
2845
2846         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2847                 BNX2X_ERR("BAD attention state\n");
2848
2849         /* handle bits that were raised */
2850         if (asserted)
2851                 bnx2x_attn_int_asserted(bp, asserted);
2852
2853         if (deasserted)
2854                 bnx2x_attn_int_deasserted(bp, deasserted);
2855 }
2856
2857 static void bnx2x_sp_task(struct work_struct *work)
2858 {
2859         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2860         u16 status;
2861
2862
2863         /* Return here if interrupt is disabled */
2864         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2865                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2866                 return;
2867         }
2868
2869         status = bnx2x_update_dsb_idx(bp);
2870 /*      if (status == 0)                                     */
2871 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2872
2873         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2874
2875         /* HW attentions */
2876         if (status & 0x1)
2877                 bnx2x_attn_int(bp);
2878
2879         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2880                      IGU_INT_NOP, 1);
2881         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2882                      IGU_INT_NOP, 1);
2883         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2884                      IGU_INT_NOP, 1);
2885         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2886                      IGU_INT_NOP, 1);
2887         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2888                      IGU_INT_ENABLE, 1);
2889
2890 }
2891
2892 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2893 {
2894         struct net_device *dev = dev_instance;
2895         struct bnx2x *bp = netdev_priv(dev);
2896
2897         /* Return here if interrupt is disabled */
2898         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2899                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2900                 return IRQ_HANDLED;
2901         }
2902
2903         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2904
2905 #ifdef BNX2X_STOP_ON_ERROR
2906         if (unlikely(bp->panic))
2907                 return IRQ_HANDLED;
2908 #endif
2909
2910         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2911
2912         return IRQ_HANDLED;
2913 }
2914
2915 /* end of slow path */
2916
2917 /* Statistics */
2918
2919 /****************************************************************************
2920 * Macros
2921 ****************************************************************************/
2922
2923 /* sum[hi:lo] += add[hi:lo] */
2924 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2925         do { \
2926                 s_lo += a_lo; \
2927                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2928         } while (0)
2929
2930 /* difference = minuend - subtrahend */
2931 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2932         do { \
2933                 if (m_lo < s_lo) { \
2934                         /* underflow */ \
2935                         d_hi = m_hi - s_hi; \
2936                         if (d_hi > 0) { \
2937                                 /* we can 'loan' 1 */ \
2938                                 d_hi--; \
2939                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2940                         } else { \
2941                                 /* m_hi <= s_hi */ \
2942                                 d_hi = 0; \
2943                                 d_lo = 0; \
2944                         } \
2945                 } else { \
2946                         /* m_lo >= s_lo */ \
2947                         if (m_hi < s_hi) { \
2948                                 d_hi = 0; \
2949                                 d_lo = 0; \
2950                         } else { \
2951                                 /* m_hi >= s_hi */ \
2952                                 d_hi = m_hi - s_hi; \
2953                                 d_lo = m_lo - s_lo; \
2954                         } \
2955                 } \
2956         } while (0)
2957
2958 #define UPDATE_STAT64(s, t) \
2959         do { \
2960                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2961                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2962                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2963                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2964                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2965                        pstats->mac_stx[1].t##_lo, diff.lo); \
2966         } while (0)
2967
2968 #define UPDATE_STAT64_NIG(s, t) \
2969         do { \
2970                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2971                         diff.lo, new->s##_lo, old->s##_lo); \
2972                 ADD_64(estats->t##_hi, diff.hi, \
2973                        estats->t##_lo, diff.lo); \
2974         } while (0)
2975
2976 /* sum[hi:lo] += add */
2977 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2978         do { \
2979                 s_lo += a; \
2980                 s_hi += (s_lo < a) ? 1 : 0; \
2981         } while (0)
2982
2983 #define UPDATE_EXTEND_STAT(s) \
2984         do { \
2985                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2986                               pstats->mac_stx[1].s##_lo, \
2987                               new->s); \
2988         } while (0)
2989
2990 #define UPDATE_EXTEND_TSTAT(s, t) \
2991         do { \
2992                 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2993                 old_tclient->s = le32_to_cpu(tclient->s); \
2994                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
2995         } while (0)
2996
2997 #define UPDATE_EXTEND_USTAT(s, t) \
2998         do { \
2999                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3000                 old_uclient->s = uclient->s; \
3001                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3002         } while (0)
3003
3004 #define UPDATE_EXTEND_XSTAT(s, t) \
3005         do { \
3006                 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
3007                 old_xclient->s = le32_to_cpu(xclient->s); \
3008                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3009         } while (0)
3010
3011 /* minuend -= subtrahend */
3012 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3013         do { \
3014                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3015         } while (0)
3016
3017 /* minuend[hi:lo] -= subtrahend */
3018 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3019         do { \
3020                 SUB_64(m_hi, 0, m_lo, s); \
3021         } while (0)
3022
3023 #define SUB_EXTEND_USTAT(s, t) \
3024         do { \
3025                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3026                 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3027         } while (0)
3028
3029 /*
3030  * General service functions
3031  */
3032
3033 static inline long bnx2x_hilo(u32 *hiref)
3034 {
3035         u32 lo = *(hiref + 1);
3036 #if (BITS_PER_LONG == 64)
3037         u32 hi = *hiref;
3038
3039         return HILO_U64(hi, lo);
3040 #else
3041         return lo;
3042 #endif
3043 }
3044
3045 /*
3046  * Init service functions
3047  */
3048
3049 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3050 {
3051         if (!bp->stats_pending) {
3052                 struct eth_query_ramrod_data ramrod_data = {0};
3053                 int i, rc;
3054
3055                 ramrod_data.drv_counter = bp->stats_counter++;
3056                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3057                 for_each_queue(bp, i)
3058                         ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3059
3060                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3061                                    ((u32 *)&ramrod_data)[1],
3062                                    ((u32 *)&ramrod_data)[0], 0);
3063                 if (rc == 0) {
3064                         /* stats ramrod has it's own slot on the spq */
3065                         bp->spq_left++;
3066                         bp->stats_pending = 1;
3067                 }
3068         }
3069 }
3070
3071 static void bnx2x_stats_init(struct bnx2x *bp)
3072 {
3073         int port = BP_PORT(bp);
3074         int i;
3075
3076         bp->stats_pending = 0;
3077         bp->executer_idx = 0;
3078         bp->stats_counter = 0;
3079
3080         /* port stats */
3081         if (!BP_NOMCP(bp))
3082                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3083         else
3084                 bp->port.port_stx = 0;
3085         DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3086
3087         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3088         bp->port.old_nig_stats.brb_discard =
3089                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3090         bp->port.old_nig_stats.brb_truncate =
3091                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3092         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3093                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3094         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3095                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3096
3097         /* function stats */
3098         for_each_queue(bp, i) {
3099                 struct bnx2x_fastpath *fp = &bp->fp[i];
3100
3101                 memset(&fp->old_tclient, 0,
3102                        sizeof(struct tstorm_per_client_stats));
3103                 memset(&fp->old_uclient, 0,
3104                        sizeof(struct ustorm_per_client_stats));
3105                 memset(&fp->old_xclient, 0,
3106                        sizeof(struct xstorm_per_client_stats));
3107                 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3108         }
3109
3110         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3111         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3112
3113         bp->stats_state = STATS_STATE_DISABLED;
3114         if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3115                 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3116 }
3117
3118 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3119 {
3120         struct dmae_command *dmae = &bp->stats_dmae;
3121         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3122
3123         *stats_comp = DMAE_COMP_VAL;
3124         if (CHIP_REV_IS_SLOW(bp))
3125                 return;
3126
3127         /* loader */
3128         if (bp->executer_idx) {
3129                 int loader_idx = PMF_DMAE_C(bp);
3130
3131                 memset(dmae, 0, sizeof(struct dmae_command));
3132
3133                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3134                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3135                                 DMAE_CMD_DST_RESET |
3136 #ifdef __BIG_ENDIAN
3137                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3138 #else
3139                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3140 #endif
3141                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3142                                                DMAE_CMD_PORT_0) |
3143                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3144                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3145                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3146                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3147                                      sizeof(struct dmae_command) *
3148                                      (loader_idx + 1)) >> 2;
3149                 dmae->dst_addr_hi = 0;
3150                 dmae->len = sizeof(struct dmae_command) >> 2;
3151                 if (CHIP_IS_E1(bp))
3152                         dmae->len--;
3153                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3154                 dmae->comp_addr_hi = 0;
3155                 dmae->comp_val = 1;
3156
3157                 *stats_comp = 0;
3158                 bnx2x_post_dmae(bp, dmae, loader_idx);
3159
3160         } else if (bp->func_stx) {
3161                 *stats_comp = 0;
3162                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3163         }
3164 }
3165
3166 static int bnx2x_stats_comp(struct bnx2x *bp)
3167 {
3168         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3169         int cnt = 10;
3170
3171         might_sleep();
3172         while (*stats_comp != DMAE_COMP_VAL) {
3173                 if (!cnt) {
3174                         BNX2X_ERR("timeout waiting for stats finished\n");
3175                         break;
3176                 }
3177                 cnt--;
3178                 msleep(1);
3179         }
3180         return 1;
3181 }
3182
3183 /*
3184  * Statistics service functions
3185  */
3186
3187 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3188 {
3189         struct dmae_command *dmae;
3190         u32 opcode;
3191         int loader_idx = PMF_DMAE_C(bp);
3192         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3193
3194         /* sanity */
3195         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3196                 BNX2X_ERR("BUG!\n");
3197                 return;
3198         }
3199
3200         bp->executer_idx = 0;
3201
3202         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3203                   DMAE_CMD_C_ENABLE |
3204                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3205 #ifdef __BIG_ENDIAN
3206                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3207 #else
3208                   DMAE_CMD_ENDIANITY_DW_SWAP |
3209 #endif
3210                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3211                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3212
3213         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3214         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3215         dmae->src_addr_lo = bp->port.port_stx >> 2;
3216         dmae->src_addr_hi = 0;
3217         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3218         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3219         dmae->len = DMAE_LEN32_RD_MAX;
3220         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3221         dmae->comp_addr_hi = 0;
3222         dmae->comp_val = 1;
3223
3224         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3225         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3226         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3227         dmae->src_addr_hi = 0;
3228         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3229                                    DMAE_LEN32_RD_MAX * 4);
3230         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3231                                    DMAE_LEN32_RD_MAX * 4);
3232         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3233         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3234         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3235         dmae->comp_val = DMAE_COMP_VAL;
3236
3237         *stats_comp = 0;
3238         bnx2x_hw_stats_post(bp);
3239         bnx2x_stats_comp(bp);
3240 }
3241
3242 static void bnx2x_port_stats_init(struct bnx2x *bp)
3243 {
3244         struct dmae_command *dmae;
3245         int port = BP_PORT(bp);
3246         int vn = BP_E1HVN(bp);
3247         u32 opcode;
3248         int loader_idx = PMF_DMAE_C(bp);
3249         u32 mac_addr;
3250         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3251
3252         /* sanity */
3253         if (!bp->link_vars.link_up || !bp->port.pmf) {
3254                 BNX2X_ERR("BUG!\n");
3255                 return;
3256         }
3257
3258         bp->executer_idx = 0;
3259
3260         /* MCP */
3261         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3262                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3263                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3264 #ifdef __BIG_ENDIAN
3265                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3266 #else
3267                   DMAE_CMD_ENDIANITY_DW_SWAP |
3268 #endif
3269                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3270                   (vn << DMAE_CMD_E1HVN_SHIFT));
3271
3272         if (bp->port.port_stx) {
3273
3274                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3275                 dmae->opcode = opcode;
3276                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3277                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3278                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3279                 dmae->dst_addr_hi = 0;
3280                 dmae->len = sizeof(struct host_port_stats) >> 2;
3281                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3282                 dmae->comp_addr_hi = 0;
3283                 dmae->comp_val = 1;
3284         }
3285
3286         if (bp->func_stx) {
3287
3288                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3289                 dmae->opcode = opcode;
3290                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3291                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3292                 dmae->dst_addr_lo = bp->func_stx >> 2;
3293                 dmae->dst_addr_hi = 0;
3294                 dmae->len = sizeof(struct host_func_stats) >> 2;
3295                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3296                 dmae->comp_addr_hi = 0;
3297                 dmae->comp_val = 1;
3298         }
3299
3300         /* MAC */
3301         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3302                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3303                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3304 #ifdef __BIG_ENDIAN
3305                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3306 #else
3307                   DMAE_CMD_ENDIANITY_DW_SWAP |
3308 #endif
3309                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3310                   (vn << DMAE_CMD_E1HVN_SHIFT));
3311
3312         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3313
3314                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3315                                    NIG_REG_INGRESS_BMAC0_MEM);
3316
3317                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3318                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3319                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3320                 dmae->opcode = opcode;
3321                 dmae->src_addr_lo = (mac_addr +
3322                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3323                 dmae->src_addr_hi = 0;
3324                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3325                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3326                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3327                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3328                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3329                 dmae->comp_addr_hi = 0;
3330                 dmae->comp_val = 1;
3331
3332                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3333                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3334                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3335                 dmae->opcode = opcode;
3336                 dmae->src_addr_lo = (mac_addr +
3337                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3338                 dmae->src_addr_hi = 0;
3339                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3340                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3341                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3342                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3343                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3344                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3345                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3346                 dmae->comp_addr_hi = 0;
3347                 dmae->comp_val = 1;
3348
3349         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3350
3351                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3352
3353                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3354                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3355                 dmae->opcode = opcode;
3356                 dmae->src_addr_lo = (mac_addr +
3357                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3358                 dmae->src_addr_hi = 0;
3359                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3360                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3361                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3362                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3363                 dmae->comp_addr_hi = 0;
3364                 dmae->comp_val = 1;
3365
3366                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3367                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3368                 dmae->opcode = opcode;
3369                 dmae->src_addr_lo = (mac_addr +
3370                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3371                 dmae->src_addr_hi = 0;
3372                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3373                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3374                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3375                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3376                 dmae->len = 1;
3377                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3378                 dmae->comp_addr_hi = 0;
3379                 dmae->comp_val = 1;
3380
3381                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3382                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3383                 dmae->opcode = opcode;
3384                 dmae->src_addr_lo = (mac_addr +
3385                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3386                 dmae->src_addr_hi = 0;
3387                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3388                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3389                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3390                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3391                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3392                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3393                 dmae->comp_addr_hi = 0;
3394                 dmae->comp_val = 1;
3395         }
3396
3397         /* NIG */
3398         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3399         dmae->opcode = opcode;
3400         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3401                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3402         dmae->src_addr_hi = 0;
3403         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3404         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3405         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3406         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3407         dmae->comp_addr_hi = 0;
3408         dmae->comp_val = 1;
3409
3410         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3411         dmae->opcode = opcode;
3412         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3413                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3414         dmae->src_addr_hi = 0;
3415         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3416                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3417         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3418                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3419         dmae->len = (2*sizeof(u32)) >> 2;
3420         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3421         dmae->comp_addr_hi = 0;
3422         dmae->comp_val = 1;
3423
3424         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3425         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3426                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3427                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3428 #ifdef __BIG_ENDIAN
3429                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3430 #else
3431                         DMAE_CMD_ENDIANITY_DW_SWAP |
3432 #endif
3433                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3434                         (vn << DMAE_CMD_E1HVN_SHIFT));
3435         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3436                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3437         dmae->src_addr_hi = 0;
3438         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3439                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3440         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3441                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3442         dmae->len = (2*sizeof(u32)) >> 2;
3443         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3444         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3445         dmae->comp_val = DMAE_COMP_VAL;
3446
3447         *stats_comp = 0;
3448 }
3449
3450 static void bnx2x_func_stats_init(struct bnx2x *bp)
3451 {
3452         struct dmae_command *dmae = &bp->stats_dmae;
3453         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3454
3455         /* sanity */
3456         if (!bp->func_stx) {
3457                 BNX2X_ERR("BUG!\n");
3458                 return;
3459         }
3460
3461         bp->executer_idx = 0;
3462         memset(dmae, 0, sizeof(struct dmae_command));
3463
3464         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3465                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3466                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3467 #ifdef __BIG_ENDIAN
3468                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3469 #else
3470                         DMAE_CMD_ENDIANITY_DW_SWAP |
3471 #endif
3472                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3473                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3474         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3475         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3476         dmae->dst_addr_lo = bp->func_stx >> 2;
3477         dmae->dst_addr_hi = 0;
3478         dmae->len = sizeof(struct host_func_stats) >> 2;
3479         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3480         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3481         dmae->comp_val = DMAE_COMP_VAL;
3482
3483         *stats_comp = 0;
3484 }
3485
3486 static void bnx2x_stats_start(struct bnx2x *bp)
3487 {
3488         if (bp->port.pmf)
3489                 bnx2x_port_stats_init(bp);
3490
3491         else if (bp->func_stx)
3492                 bnx2x_func_stats_init(bp);
3493
3494         bnx2x_hw_stats_post(bp);
3495         bnx2x_storm_stats_post(bp);
3496 }
3497
3498 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3499 {
3500         bnx2x_stats_comp(bp);
3501         bnx2x_stats_pmf_update(bp);
3502         bnx2x_stats_start(bp);
3503 }
3504
3505 static void bnx2x_stats_restart(struct bnx2x *bp)
3506 {
3507         bnx2x_stats_comp(bp);
3508         bnx2x_stats_start(bp);
3509 }
3510
3511 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3512 {
3513         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3514         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3515         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3516         struct regpair diff;
3517
3518         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3519         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3520         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3521         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3522         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3523         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3524         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3525         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3526         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3527         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3528         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3529         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3530         UPDATE_STAT64(tx_stat_gt127,
3531                                 tx_stat_etherstatspkts65octetsto127octets);
3532         UPDATE_STAT64(tx_stat_gt255,
3533                                 tx_stat_etherstatspkts128octetsto255octets);
3534         UPDATE_STAT64(tx_stat_gt511,
3535                                 tx_stat_etherstatspkts256octetsto511octets);
3536         UPDATE_STAT64(tx_stat_gt1023,
3537                                 tx_stat_etherstatspkts512octetsto1023octets);
3538         UPDATE_STAT64(tx_stat_gt1518,
3539                                 tx_stat_etherstatspkts1024octetsto1522octets);
3540         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3541         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3542         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3543         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3544         UPDATE_STAT64(tx_stat_gterr,
3545                                 tx_stat_dot3statsinternalmactransmiterrors);
3546         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3547
3548         estats->pause_frames_received_hi =
3549                                 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3550         estats->pause_frames_received_lo =
3551                                 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3552
3553         estats->pause_frames_sent_hi =
3554                                 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3555         estats->pause_frames_sent_lo =
3556                                 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3557 }
3558
3559 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3560 {
3561         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3562         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3563         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3564
3565         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3566         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3567         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3568         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3569         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3570         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3571         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3572         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3573         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3574         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3575         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3576         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3577         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3578         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3579         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3580         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3581         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3582         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3583         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3584         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3585         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3586         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3587         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3588         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3589         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3590         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3591         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3592         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3593         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3594         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3595         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3596
3597         estats->pause_frames_received_hi =
3598                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3599         estats->pause_frames_received_lo =
3600                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3601         ADD_64(estats->pause_frames_received_hi,
3602                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3603                estats->pause_frames_received_lo,
3604                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3605
3606         estats->pause_frames_sent_hi =
3607                         pstats->mac_stx[1].tx_stat_outxonsent_hi;
3608         estats->pause_frames_sent_lo =
3609                         pstats->mac_stx[1].tx_stat_outxonsent_lo;
3610         ADD_64(estats->pause_frames_sent_hi,
3611                pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3612                estats->pause_frames_sent_lo,
3613                pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3614 }
3615
3616 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3617 {
3618         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3619         struct nig_stats *old = &(bp->port.old_nig_stats);
3620         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3621         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3622         struct regpair diff;
3623         u32 nig_timer_max;
3624
3625         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3626                 bnx2x_bmac_stats_update(bp);
3627
3628         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3629                 bnx2x_emac_stats_update(bp);
3630
3631         else { /* unreached */
3632                 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3633                 return -1;
3634         }
3635
3636         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3637                       new->brb_discard - old->brb_discard);
3638         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3639                       new->brb_truncate - old->brb_truncate);
3640
3641         UPDATE_STAT64_NIG(egress_mac_pkt0,
3642                                         etherstatspkts1024octetsto1522octets);
3643         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3644
3645         memcpy(old, new, sizeof(struct nig_stats));
3646
3647         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3648                sizeof(struct mac_stx));
3649         estats->brb_drop_hi = pstats->brb_drop_hi;
3650         estats->brb_drop_lo = pstats->brb_drop_lo;
3651
3652         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3653
3654         nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3655         if (nig_timer_max != estats->nig_timer_max) {
3656                 estats->nig_timer_max = nig_timer_max;
3657                 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3658         }
3659
3660         return 0;
3661 }
3662
3663 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3664 {
3665         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3666         struct tstorm_per_port_stats *tport =
3667                                         &stats->tstorm_common.port_statistics;
3668         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3669         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3670         int i;
3671
3672         memset(&(fstats->total_bytes_received_hi), 0,
3673                sizeof(struct host_func_stats) - 2*sizeof(u32));
3674         estats->error_bytes_received_hi = 0;
3675         estats->error_bytes_received_lo = 0;
3676         estats->etherstatsoverrsizepkts_hi = 0;
3677         estats->etherstatsoverrsizepkts_lo = 0;
3678         estats->no_buff_discard_hi = 0;
3679         estats->no_buff_discard_lo = 0;
3680
3681         for_each_queue(bp, i) {
3682                 struct bnx2x_fastpath *fp = &bp->fp[i];
3683                 int cl_id = fp->cl_id;
3684                 struct tstorm_per_client_stats *tclient =
3685                                 &stats->tstorm_common.client_statistics[cl_id];
3686                 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3687                 struct ustorm_per_client_stats *uclient =
3688                                 &stats->ustorm_common.client_statistics[cl_id];
3689                 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3690                 struct xstorm_per_client_stats *xclient =
3691                                 &stats->xstorm_common.client_statistics[cl_id];
3692                 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3693                 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3694                 u32 diff;
3695
3696                 /* are storm stats valid? */
3697                 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3698                                                         bp->stats_counter) {
3699                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3700                            "  xstorm counter (%d) != stats_counter (%d)\n",
3701                            i, xclient->stats_counter, bp->stats_counter);
3702                         return -1;
3703                 }
3704                 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3705                                                         bp->stats_counter) {
3706                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3707                            "  tstorm counter (%d) != stats_counter (%d)\n",
3708                            i, tclient->stats_counter, bp->stats_counter);
3709                         return -2;
3710                 }
3711                 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3712                                                         bp->stats_counter) {
3713                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3714                            "  ustorm counter (%d) != stats_counter (%d)\n",
3715                            i, uclient->stats_counter, bp->stats_counter);
3716                         return -4;
3717                 }
3718
3719                 qstats->total_bytes_received_hi =
3720                 qstats->valid_bytes_received_hi =
3721                                 le32_to_cpu(tclient->total_rcv_bytes.hi);
3722                 qstats->total_bytes_received_lo =
3723                 qstats->valid_bytes_received_lo =
3724                                 le32_to_cpu(tclient->total_rcv_bytes.lo);
3725
3726                 qstats->error_bytes_received_hi =
3727                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
3728                 qstats->error_bytes_received_lo =
3729                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
3730
3731                 ADD_64(qstats->total_bytes_received_hi,
3732                        qstats->error_bytes_received_hi,
3733                        qstats->total_bytes_received_lo,
3734                        qstats->error_bytes_received_lo);
3735
3736                 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3737                                         total_unicast_packets_received);
3738                 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3739                                         total_multicast_packets_received);
3740                 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3741                                         total_broadcast_packets_received);
3742                 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3743                                         etherstatsoverrsizepkts);
3744                 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3745
3746                 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3747                                         total_unicast_packets_received);
3748                 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3749                                         total_multicast_packets_received);
3750                 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3751                                         total_broadcast_packets_received);
3752                 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3753                 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3754                 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3755
3756                 qstats->total_bytes_transmitted_hi =
3757                                 le32_to_cpu(xclient->total_sent_bytes.hi);
3758                 qstats->total_bytes_transmitted_lo =
3759                                 le32_to_cpu(xclient->total_sent_bytes.lo);
3760
3761                 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3762                                         total_unicast_packets_transmitted);
3763                 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3764                                         total_multicast_packets_transmitted);
3765                 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3766                                         total_broadcast_packets_transmitted);
3767
3768                 old_tclient->checksum_discard = tclient->checksum_discard;
3769                 old_tclient->ttl0_discard = tclient->ttl0_discard;
3770
3771                 ADD_64(fstats->total_bytes_received_hi,
3772                        qstats->total_bytes_received_hi,
3773                        fstats->total_bytes_received_lo,
3774                        qstats->total_bytes_received_lo);
3775                 ADD_64(fstats->total_bytes_transmitted_hi,
3776                        qstats->total_bytes_transmitted_hi,
3777                        fstats->total_bytes_transmitted_lo,
3778                        qstats->total_bytes_transmitted_lo);
3779                 ADD_64(fstats->total_unicast_packets_received_hi,
3780                        qstats->total_unicast_packets_received_hi,
3781                        fstats->total_unicast_packets_received_lo,
3782                        qstats->total_unicast_packets_received_lo);
3783                 ADD_64(fstats->total_multicast_packets_received_hi,
3784                        qstats->total_multicast_packets_received_hi,
3785                        fstats->total_multicast_packets_received_lo,
3786                        qstats->total_multicast_packets_received_lo);
3787                 ADD_64(fstats->total_broadcast_packets_received_hi,
3788                        qstats->total_broadcast_packets_received_hi,
3789                        fstats->total_broadcast_packets_received_lo,
3790                        qstats->total_broadcast_packets_received_lo);
3791                 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3792                        qstats->total_unicast_packets_transmitted_hi,
3793                        fstats->total_unicast_packets_transmitted_lo,
3794                        qstats->total_unicast_packets_transmitted_lo);
3795                 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3796                        qstats->total_multicast_packets_transmitted_hi,
3797                        fstats->total_multicast_packets_transmitted_lo,
3798                        qstats->total_multicast_packets_transmitted_lo);
3799                 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3800                        qstats->total_broadcast_packets_transmitted_hi,
3801                        fstats->total_broadcast_packets_transmitted_lo,
3802                        qstats->total_broadcast_packets_transmitted_lo);
3803                 ADD_64(fstats->valid_bytes_received_hi,
3804                        qstats->valid_bytes_received_hi,
3805                        fstats->valid_bytes_received_lo,
3806                        qstats->valid_bytes_received_lo);
3807
3808                 ADD_64(estats->error_bytes_received_hi,
3809                        qstats->error_bytes_received_hi,
3810                        estats->error_bytes_received_lo,
3811                        qstats->error_bytes_received_lo);
3812                 ADD_64(estats->etherstatsoverrsizepkts_hi,
3813                        qstats->etherstatsoverrsizepkts_hi,
3814                        estats->etherstatsoverrsizepkts_lo,
3815                        qstats->etherstatsoverrsizepkts_lo);
3816                 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3817                        estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3818         }
3819
3820         ADD_64(fstats->total_bytes_received_hi,
3821                estats->rx_stat_ifhcinbadoctets_hi,
3822                fstats->total_bytes_received_lo,
3823                estats->rx_stat_ifhcinbadoctets_lo);
3824
3825         memcpy(estats, &(fstats->total_bytes_received_hi),
3826                sizeof(struct host_func_stats) - 2*sizeof(u32));
3827
3828         ADD_64(estats->etherstatsoverrsizepkts_hi,
3829                estats->rx_stat_dot3statsframestoolong_hi,
3830                estats->etherstatsoverrsizepkts_lo,
3831                estats->rx_stat_dot3statsframestoolong_lo);
3832         ADD_64(estats->error_bytes_received_hi,
3833                estats->rx_stat_ifhcinbadoctets_hi,
3834                estats->error_bytes_received_lo,
3835                estats->rx_stat_ifhcinbadoctets_lo);
3836
3837         if (bp->port.pmf) {
3838                 estats->mac_filter_discard =
3839                                 le32_to_cpu(tport->mac_filter_discard);
3840                 estats->xxoverflow_discard =
3841                                 le32_to_cpu(tport->xxoverflow_discard);
3842                 estats->brb_truncate_discard =
3843                                 le32_to_cpu(tport->brb_truncate_discard);
3844                 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3845         }
3846
3847         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3848
3849         bp->stats_pending = 0;
3850
3851         return 0;
3852 }
3853
3854 static void bnx2x_net_stats_update(struct bnx2x *bp)
3855 {
3856         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3857         struct net_device_stats *nstats = &bp->dev->stats;
3858         int i;
3859
3860         nstats->rx_packets =
3861                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3862                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3863                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3864
3865         nstats->tx_packets =
3866                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3867                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3868                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3869
3870         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
3871
3872         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3873
3874         nstats->rx_dropped = estats->mac_discard;
3875         for_each_queue(bp, i)
3876                 nstats->rx_dropped +=
3877                         le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3878
3879         nstats->tx_dropped = 0;
3880
3881         nstats->multicast =
3882                 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
3883
3884         nstats->collisions =
3885                 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
3886
3887         nstats->rx_length_errors =
3888                 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3889                 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3890         nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3891                                  bnx2x_hilo(&estats->brb_truncate_hi);
3892         nstats->rx_crc_errors =
3893                 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3894         nstats->rx_frame_errors =
3895                 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3896         nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
3897         nstats->rx_missed_errors = estats->xxoverflow_discard;
3898
3899         nstats->rx_errors = nstats->rx_length_errors +
3900                             nstats->rx_over_errors +
3901                             nstats->rx_crc_errors +
3902                             nstats->rx_frame_errors +
3903                             nstats->rx_fifo_errors +
3904                             nstats->rx_missed_errors;
3905
3906         nstats->tx_aborted_errors =
3907                 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3908                 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3909         nstats->tx_carrier_errors =
3910                 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
3911         nstats->tx_fifo_errors = 0;
3912         nstats->tx_heartbeat_errors = 0;
3913         nstats->tx_window_errors = 0;
3914
3915         nstats->tx_errors = nstats->tx_aborted_errors +
3916                             nstats->tx_carrier_errors +
3917             bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3918 }
3919
3920 static void bnx2x_drv_stats_update(struct bnx2x *bp)
3921 {
3922         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3923         int i;
3924
3925         estats->driver_xoff = 0;
3926         estats->rx_err_discard_pkt = 0;
3927         estats->rx_skb_alloc_failed = 0;
3928         estats->hw_csum_err = 0;
3929         for_each_queue(bp, i) {
3930                 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3931
3932                 estats->driver_xoff += qstats->driver_xoff;
3933                 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3934                 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3935                 estats->hw_csum_err += qstats->hw_csum_err;
3936         }
3937 }
3938
3939 static void bnx2x_stats_update(struct bnx2x *bp)
3940 {
3941         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3942
3943         if (*stats_comp != DMAE_COMP_VAL)
3944                 return;
3945
3946         if (bp->port.pmf)
3947                 bnx2x_hw_stats_update(bp);
3948
3949         if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3950                 BNX2X_ERR("storm stats were not updated for 3 times\n");
3951                 bnx2x_panic();
3952                 return;
3953         }
3954
3955         bnx2x_net_stats_update(bp);
3956         bnx2x_drv_stats_update(bp);
3957
3958         if (bp->msglevel & NETIF_MSG_TIMER) {
3959                 struct tstorm_per_client_stats *old_tclient =
3960                                                         &bp->fp->old_tclient;
3961                 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
3962                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3963                 struct net_device_stats *nstats = &bp->dev->stats;
3964                 int i;
3965
3966                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3967                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
3968                                   "  tx pkt (%lx)\n",
3969                        bnx2x_tx_avail(bp->fp),
3970                        le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3971                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
3972                                   "  rx pkt (%lx)\n",
3973                        (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3974                              bp->fp->rx_comp_cons),
3975                        le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3976                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u  "
3977                                   "brb truncate %u\n",
3978                        (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
3979                        qstats->driver_xoff,
3980                        estats->brb_drop_lo, estats->brb_truncate_lo);
3981                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
3982                         "packets_too_big_discard %lu  no_buff_discard %lu  "
3983                         "mac_discard %u  mac_filter_discard %u  "
3984                         "xxovrflow_discard %u  brb_truncate_discard %u  "
3985                         "ttl0_discard %u\n",
3986                        old_tclient->checksum_discard,
3987                        bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
3988                        bnx2x_hilo(&qstats->no_buff_discard_hi),
3989                        estats->mac_discard, estats->mac_filter_discard,
3990                        estats->xxoverflow_discard, estats->brb_truncate_discard,
3991                        old_tclient->ttl0_discard);
3992
3993                 for_each_queue(bp, i) {
3994                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3995                                bnx2x_fp(bp, i, tx_pkt),
3996                                bnx2x_fp(bp, i, rx_pkt),
3997                                bnx2x_fp(bp, i, rx_calls));
3998                 }
3999         }
4000
4001         bnx2x_hw_stats_post(bp);
4002         bnx2x_storm_stats_post(bp);
4003 }
4004
4005 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4006 {
4007         struct dmae_command *dmae;
4008         u32 opcode;
4009         int loader_idx = PMF_DMAE_C(bp);
4010         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4011
4012         bp->executer_idx = 0;
4013
4014         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4015                   DMAE_CMD_C_ENABLE |
4016                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4017 #ifdef __BIG_ENDIAN
4018                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4019 #else
4020                   DMAE_CMD_ENDIANITY_DW_SWAP |
4021 #endif
4022                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4023                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4024
4025         if (bp->port.port_stx) {
4026
4027                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4028                 if (bp->func_stx)
4029                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4030                 else
4031                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4032                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4033                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4034                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4035                 dmae->dst_addr_hi = 0;
4036                 dmae->len = sizeof(struct host_port_stats) >> 2;
4037                 if (bp->func_stx) {
4038                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4039                         dmae->comp_addr_hi = 0;
4040                         dmae->comp_val = 1;
4041                 } else {
4042                         dmae->comp_addr_lo =
4043                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4044                         dmae->comp_addr_hi =
4045                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4046                         dmae->comp_val = DMAE_COMP_VAL;
4047
4048                         *stats_comp = 0;
4049                 }
4050         }
4051
4052         if (bp->func_stx) {
4053
4054                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4055                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4056                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4057                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4058                 dmae->dst_addr_lo = bp->func_stx >> 2;
4059                 dmae->dst_addr_hi = 0;
4060                 dmae->len = sizeof(struct host_func_stats) >> 2;
4061                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4062                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4063                 dmae->comp_val = DMAE_COMP_VAL;
4064
4065                 *stats_comp = 0;
4066         }
4067 }
4068
4069 static void bnx2x_stats_stop(struct bnx2x *bp)
4070 {
4071         int update = 0;
4072
4073         bnx2x_stats_comp(bp);
4074
4075         if (bp->port.pmf)
4076                 update = (bnx2x_hw_stats_update(bp) == 0);
4077
4078         update |= (bnx2x_storm_stats_update(bp) == 0);
4079
4080         if (update) {
4081                 bnx2x_net_stats_update(bp);
4082
4083                 if (bp->port.pmf)
4084                         bnx2x_port_stats_stop(bp);
4085
4086                 bnx2x_hw_stats_post(bp);
4087                 bnx2x_stats_comp(bp);
4088         }
4089 }
4090
4091 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4092 {
4093 }
4094
4095 static const struct {
4096         void (*action)(struct bnx2x *bp);
4097         enum bnx2x_stats_state next_state;
4098 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4099 /* state        event   */
4100 {
4101 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4102 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
4103 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4104 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4105 },
4106 {
4107 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
4108 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
4109 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
4110 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
4111 }
4112 };
4113
4114 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4115 {
4116         enum bnx2x_stats_state state = bp->stats_state;
4117
4118         bnx2x_stats_stm[state][event].action(bp);
4119         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4120
4121         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4122                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4123                    state, event, bp->stats_state);
4124 }
4125
4126 static void bnx2x_timer(unsigned long data)
4127 {
4128         struct bnx2x *bp = (struct bnx2x *) data;
4129
4130         if (!netif_running(bp->dev))
4131                 return;
4132
4133         if (atomic_read(&bp->intr_sem) != 0)
4134                 goto timer_restart;
4135
4136         if (poll) {
4137                 struct bnx2x_fastpath *fp = &bp->fp[0];
4138                 int rc;
4139
4140                 bnx2x_tx_int(fp, 1000);
4141                 rc = bnx2x_rx_int(fp, 1000);
4142         }
4143
4144         if (!BP_NOMCP(bp)) {
4145                 int func = BP_FUNC(bp);
4146                 u32 drv_pulse;
4147                 u32 mcp_pulse;
4148
4149                 ++bp->fw_drv_pulse_wr_seq;
4150                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4151                 /* TBD - add SYSTEM_TIME */
4152                 drv_pulse = bp->fw_drv_pulse_wr_seq;
4153                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4154
4155                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4156                              MCP_PULSE_SEQ_MASK);
4157                 /* The delta between driver pulse and mcp response
4158                  * should be 1 (before mcp response) or 0 (after mcp response)
4159                  */
4160                 if ((drv_pulse != mcp_pulse) &&
4161                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4162                         /* someone lost a heartbeat... */
4163                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4164                                   drv_pulse, mcp_pulse);
4165                 }
4166         }
4167
4168         if ((bp->state == BNX2X_STATE_OPEN) ||
4169             (bp->state == BNX2X_STATE_DISABLED))
4170                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4171
4172 timer_restart:
4173         mod_timer(&bp->timer, jiffies + bp->current_interval);
4174 }
4175
4176 /* end of Statistics */
4177
4178 /* nic init */
4179
4180 /*
4181  * nic init service functions
4182  */
4183
4184 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4185 {
4186         int port = BP_PORT(bp);
4187
4188         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4189                         USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4190                         sizeof(struct ustorm_status_block)/4);
4191         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4192                         CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4193                         sizeof(struct cstorm_status_block)/4);
4194 }
4195
4196 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4197                           dma_addr_t mapping, int sb_id)
4198 {
4199         int port = BP_PORT(bp);
4200         int func = BP_FUNC(bp);
4201         int index;
4202         u64 section;
4203
4204         /* USTORM */
4205         section = ((u64)mapping) + offsetof(struct host_status_block,
4206                                             u_status_block);
4207         sb->u_status_block.status_block_id = sb_id;
4208
4209         REG_WR(bp, BAR_USTRORM_INTMEM +
4210                USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4211         REG_WR(bp, BAR_USTRORM_INTMEM +
4212                ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4213                U64_HI(section));
4214         REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4215                 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4216
4217         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4218                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4219                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4220
4221         /* CSTORM */
4222         section = ((u64)mapping) + offsetof(struct host_status_block,
4223                                             c_status_block);
4224         sb->c_status_block.status_block_id = sb_id;
4225
4226         REG_WR(bp, BAR_CSTRORM_INTMEM +
4227                CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4228         REG_WR(bp, BAR_CSTRORM_INTMEM +
4229                ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4230                U64_HI(section));
4231         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4232                 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4233
4234         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4235                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4236                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4237
4238         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4239 }
4240
4241 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4242 {
4243         int func = BP_FUNC(bp);
4244
4245         bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4246                         USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4247                         sizeof(struct ustorm_def_status_block)/4);
4248         bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4249                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4250                         sizeof(struct cstorm_def_status_block)/4);
4251         bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4252                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4253                         sizeof(struct xstorm_def_status_block)/4);
4254         bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4255                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4256                         sizeof(struct tstorm_def_status_block)/4);
4257 }
4258
4259 static void bnx2x_init_def_sb(struct bnx2x *bp,
4260                               struct host_def_status_block *def_sb,
4261                               dma_addr_t mapping, int sb_id)
4262 {
4263         int port = BP_PORT(bp);
4264         int func = BP_FUNC(bp);
4265         int index, val, reg_offset;
4266         u64 section;
4267
4268         /* ATTN */
4269         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4270                                             atten_status_block);
4271         def_sb->atten_status_block.status_block_id = sb_id;
4272
4273         bp->attn_state = 0;
4274
4275         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4276                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4277
4278         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4279                 bp->attn_group[index].sig[0] = REG_RD(bp,
4280                                                      reg_offset + 0x10*index);
4281                 bp->attn_group[index].sig[1] = REG_RD(bp,
4282                                                reg_offset + 0x4 + 0x10*index);
4283                 bp->attn_group[index].sig[2] = REG_RD(bp,
4284                                                reg_offset + 0x8 + 0x10*index);
4285                 bp->attn_group[index].sig[3] = REG_RD(bp,
4286                                                reg_offset + 0xc + 0x10*index);
4287         }
4288
4289         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4290                              HC_REG_ATTN_MSG0_ADDR_L);
4291
4292         REG_WR(bp, reg_offset, U64_LO(section));
4293         REG_WR(bp, reg_offset + 4, U64_HI(section));
4294
4295         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4296
4297         val = REG_RD(bp, reg_offset);
4298         val |= sb_id;
4299         REG_WR(bp, reg_offset, val);
4300
4301         /* USTORM */
4302         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4303                                             u_def_status_block);
4304         def_sb->u_def_status_block.status_block_id = sb_id;
4305
4306         REG_WR(bp, BAR_USTRORM_INTMEM +
4307                USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4308         REG_WR(bp, BAR_USTRORM_INTMEM +
4309                ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4310                U64_HI(section));
4311         REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4312                 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4313
4314         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4315                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4316                          USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4317
4318         /* CSTORM */
4319         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4320                                             c_def_status_block);
4321         def_sb->c_def_status_block.status_block_id = sb_id;
4322
4323         REG_WR(bp, BAR_CSTRORM_INTMEM +
4324                CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4325         REG_WR(bp, BAR_CSTRORM_INTMEM +
4326                ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4327                U64_HI(section));
4328         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4329                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4330
4331         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4332                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4333                          CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4334
4335         /* TSTORM */
4336         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4337                                             t_def_status_block);
4338         def_sb->t_def_status_block.status_block_id = sb_id;
4339
4340         REG_WR(bp, BAR_TSTRORM_INTMEM +
4341                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4342         REG_WR(bp, BAR_TSTRORM_INTMEM +
4343                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4344                U64_HI(section));
4345         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4346                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4347
4348         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4349                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4350                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4351
4352         /* XSTORM */
4353         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4354                                             x_def_status_block);
4355         def_sb->x_def_status_block.status_block_id = sb_id;
4356
4357         REG_WR(bp, BAR_XSTRORM_INTMEM +
4358                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4359         REG_WR(bp, BAR_XSTRORM_INTMEM +
4360                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4361                U64_HI(section));
4362         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4363                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4364
4365         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4366                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4367                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4368
4369         bp->stats_pending = 0;
4370         bp->set_mac_pending = 0;
4371
4372         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4373 }
4374
4375 static void bnx2x_update_coalesce(struct bnx2x *bp)
4376 {
4377         int port = BP_PORT(bp);
4378         int i;
4379
4380         for_each_queue(bp, i) {
4381                 int sb_id = bp->fp[i].sb_id;
4382
4383                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4384                 REG_WR8(bp, BAR_USTRORM_INTMEM +
4385                         USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4386                                                     U_SB_ETH_RX_CQ_INDEX),
4387                         bp->rx_ticks/12);
4388                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4389                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4390                                                      U_SB_ETH_RX_CQ_INDEX),
4391                          bp->rx_ticks ? 0 : 1);
4392
4393                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4394                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4395                         CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4396                                                     C_SB_ETH_TX_CQ_INDEX),
4397                         bp->tx_ticks/12);
4398                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4399                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4400                                                      C_SB_ETH_TX_CQ_INDEX),
4401                          bp->tx_ticks ? 0 : 1);
4402         }
4403 }
4404
4405 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4406                                        struct bnx2x_fastpath *fp, int last)
4407 {
4408         int i;
4409
4410         for (i = 0; i < last; i++) {
4411                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4412                 struct sk_buff *skb = rx_buf->skb;
4413
4414                 if (skb == NULL) {
4415                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4416                         continue;
4417                 }
4418
4419                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4420                         pci_unmap_single(bp->pdev,
4421                                          pci_unmap_addr(rx_buf, mapping),
4422                                          bp->rx_buf_size,
4423                                          PCI_DMA_FROMDEVICE);
4424
4425                 dev_kfree_skb(skb);
4426                 rx_buf->skb = NULL;
4427         }
4428 }
4429
4430 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4431 {
4432         int func = BP_FUNC(bp);
4433         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4434                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4435         u16 ring_prod, cqe_ring_prod;
4436         int i, j;
4437
4438         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4439         DP(NETIF_MSG_IFUP,
4440            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4441
4442         if (bp->flags & TPA_ENABLE_FLAG) {
4443
4444                 for_each_rx_queue(bp, j) {
4445                         struct bnx2x_fastpath *fp = &bp->fp[j];
4446
4447                         for (i = 0; i < max_agg_queues; i++) {
4448                                 fp->tpa_pool[i].skb =
4449                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4450                                 if (!fp->tpa_pool[i].skb) {
4451                                         BNX2X_ERR("Failed to allocate TPA "
4452                                                   "skb pool for queue[%d] - "
4453                                                   "disabling TPA on this "
4454                                                   "queue!\n", j);
4455                                         bnx2x_free_tpa_pool(bp, fp, i);
4456                                         fp->disable_tpa = 1;
4457                                         break;
4458                                 }
4459                                 pci_unmap_addr_set((struct sw_rx_bd *)
4460                                                         &bp->fp->tpa_pool[i],
4461                                                    mapping, 0);
4462                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4463                         }
4464                 }
4465         }
4466
4467         for_each_rx_queue(bp, j) {
4468                 struct bnx2x_fastpath *fp = &bp->fp[j];
4469
4470                 fp->rx_bd_cons = 0;
4471                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4472                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4473
4474                 /* "next page" elements initialization */
4475                 /* SGE ring */
4476                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4477                         struct eth_rx_sge *sge;
4478
4479                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4480                         sge->addr_hi =
4481                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4482                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4483                         sge->addr_lo =
4484                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4485                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4486                 }
4487
4488                 bnx2x_init_sge_ring_bit_mask(fp);
4489
4490                 /* RX BD ring */
4491                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4492                         struct eth_rx_bd *rx_bd;
4493
4494                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4495                         rx_bd->addr_hi =
4496                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4497                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4498                         rx_bd->addr_lo =
4499                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4500                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4501                 }
4502
4503                 /* CQ ring */
4504                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4505                         struct eth_rx_cqe_next_page *nextpg;
4506
4507                         nextpg = (struct eth_rx_cqe_next_page *)
4508                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4509                         nextpg->addr_hi =
4510                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4511                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4512                         nextpg->addr_lo =
4513                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4514                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4515                 }
4516
4517                 /* Allocate SGEs and initialize the ring elements */
4518                 for (i = 0, ring_prod = 0;
4519                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4520
4521                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4522                                 BNX2X_ERR("was only able to allocate "
4523                                           "%d rx sges\n", i);
4524                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4525                                 /* Cleanup already allocated elements */
4526                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4527                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4528                                 fp->disable_tpa = 1;
4529                                 ring_prod = 0;
4530                                 break;
4531                         }
4532                         ring_prod = NEXT_SGE_IDX(ring_prod);
4533                 }
4534                 fp->rx_sge_prod = ring_prod;
4535
4536                 /* Allocate BDs and initialize BD ring */
4537                 fp->rx_comp_cons = 0;
4538                 cqe_ring_prod = ring_prod = 0;
4539                 for (i = 0; i < bp->rx_ring_size; i++) {
4540                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4541                                 BNX2X_ERR("was only able to allocate "
4542                                           "%d rx skbs on queue[%d]\n", i, j);
4543                                 fp->eth_q_stats.rx_skb_alloc_failed++;
4544                                 break;
4545                         }
4546                         ring_prod = NEXT_RX_IDX(ring_prod);
4547                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4548                         WARN_ON(ring_prod <= i);
4549                 }
4550
4551                 fp->rx_bd_prod = ring_prod;
4552                 /* must not have more available CQEs than BDs */
4553                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4554                                        cqe_ring_prod);
4555                 fp->rx_pkt = fp->rx_calls = 0;
4556
4557                 /* Warning!
4558                  * this will generate an interrupt (to the TSTORM)
4559                  * must only be done after chip is initialized
4560                  */
4561                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4562                                      fp->rx_sge_prod);
4563                 if (j != 0)
4564                         continue;
4565
4566                 REG_WR(bp, BAR_USTRORM_INTMEM +
4567                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4568                        U64_LO(fp->rx_comp_mapping));
4569                 REG_WR(bp, BAR_USTRORM_INTMEM +
4570                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4571                        U64_HI(fp->rx_comp_mapping));
4572         }
4573 }
4574
4575 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4576 {
4577         int i, j;
4578
4579         for_each_tx_queue(bp, j) {
4580                 struct bnx2x_fastpath *fp = &bp->fp[j];
4581
4582                 for (i = 1; i <= NUM_TX_RINGS; i++) {
4583                         struct eth_tx_bd *tx_bd =
4584                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4585
4586                         tx_bd->addr_hi =
4587                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4588                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4589                         tx_bd->addr_lo =
4590                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4591                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4592                 }
4593
4594                 fp->tx_pkt_prod = 0;
4595                 fp->tx_pkt_cons = 0;
4596                 fp->tx_bd_prod = 0;
4597                 fp->tx_bd_cons = 0;
4598                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4599                 fp->tx_pkt = 0;
4600         }
4601 }
4602
4603 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4604 {
4605         int func = BP_FUNC(bp);
4606
4607         spin_lock_init(&bp->spq_lock);
4608
4609         bp->spq_left = MAX_SPQ_PENDING;
4610         bp->spq_prod_idx = 0;
4611         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4612         bp->spq_prod_bd = bp->spq;
4613         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4614
4615         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4616                U64_LO(bp->spq_mapping));
4617         REG_WR(bp,
4618                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4619                U64_HI(bp->spq_mapping));
4620
4621         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4622                bp->spq_prod_idx);
4623 }
4624
4625 static void bnx2x_init_context(struct bnx2x *bp)
4626 {
4627         int i;
4628
4629         for_each_queue(bp, i) {
4630                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4631                 struct bnx2x_fastpath *fp = &bp->fp[i];
4632                 u8 cl_id = fp->cl_id;
4633                 u8 sb_id = FP_SB_ID(fp);
4634
4635                 context->ustorm_st_context.common.sb_index_numbers =
4636                                                 BNX2X_RX_SB_INDEX_NUM;
4637                 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4638                 context->ustorm_st_context.common.status_block_id = sb_id;
4639                 context->ustorm_st_context.common.flags =
4640                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4641                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4642                 context->ustorm_st_context.common.statistics_counter_id =
4643                                                 cl_id;
4644                 context->ustorm_st_context.common.mc_alignment_log_size =
4645                                                 BNX2X_RX_ALIGN_SHIFT;
4646                 context->ustorm_st_context.common.bd_buff_size =
4647                                                 bp->rx_buf_size;
4648                 context->ustorm_st_context.common.bd_page_base_hi =
4649                                                 U64_HI(fp->rx_desc_mapping);
4650                 context->ustorm_st_context.common.bd_page_base_lo =
4651                                                 U64_LO(fp->rx_desc_mapping);
4652                 if (!fp->disable_tpa) {
4653                         context->ustorm_st_context.common.flags |=
4654                                 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4655                                  USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4656                         context->ustorm_st_context.common.sge_buff_size =
4657                                 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4658                                          (u32)0xffff);
4659                         context->ustorm_st_context.common.sge_page_base_hi =
4660                                                 U64_HI(fp->rx_sge_mapping);
4661                         context->ustorm_st_context.common.sge_page_base_lo =
4662                                                 U64_LO(fp->rx_sge_mapping);
4663                 }
4664
4665                 context->ustorm_ag_context.cdu_usage =
4666                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4667                                                CDU_REGION_NUMBER_UCM_AG,
4668                                                ETH_CONNECTION_TYPE);
4669
4670                 context->xstorm_st_context.tx_bd_page_base_hi =
4671                                                 U64_HI(fp->tx_desc_mapping);
4672                 context->xstorm_st_context.tx_bd_page_base_lo =
4673                                                 U64_LO(fp->tx_desc_mapping);
4674                 context->xstorm_st_context.db_data_addr_hi =
4675                                                 U64_HI(fp->tx_prods_mapping);
4676                 context->xstorm_st_context.db_data_addr_lo =
4677                                                 U64_LO(fp->tx_prods_mapping);
4678                 context->xstorm_st_context.statistics_data = (fp->cl_id |
4679                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4680                 context->cstorm_st_context.sb_index_number =
4681                                                 C_SB_ETH_TX_CQ_INDEX;
4682                 context->cstorm_st_context.status_block_id = sb_id;
4683
4684                 context->xstorm_ag_context.cdu_reserved =
4685                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4686                                                CDU_REGION_NUMBER_XCM_AG,
4687                                                ETH_CONNECTION_TYPE);
4688         }
4689 }
4690
4691 static void bnx2x_init_ind_table(struct bnx2x *bp)
4692 {
4693         int func = BP_FUNC(bp);
4694         int i;
4695
4696         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4697                 return;
4698
4699         DP(NETIF_MSG_IFUP,
4700            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
4701         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4702                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4703                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4704                         BP_CL_ID(bp) + (i % bp->num_rx_queues));
4705 }
4706
4707 static void bnx2x_set_client_config(struct bnx2x *bp)
4708 {
4709         struct tstorm_eth_client_config tstorm_client = {0};
4710         int port = BP_PORT(bp);
4711         int i;
4712
4713         tstorm_client.mtu = bp->dev->mtu;
4714         tstorm_client.config_flags =
4715                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4716                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
4717 #ifdef BCM_VLAN
4718         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4719                 tstorm_client.config_flags |=
4720                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
4721                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4722         }
4723 #endif
4724
4725         if (bp->flags & TPA_ENABLE_FLAG) {
4726                 tstorm_client.max_sges_for_packet =
4727                         SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4728                 tstorm_client.max_sges_for_packet =
4729                         ((tstorm_client.max_sges_for_packet +
4730                           PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4731                         PAGES_PER_SGE_SHIFT;
4732
4733                 tstorm_client.config_flags |=
4734                                 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4735         }
4736
4737         for_each_queue(bp, i) {
4738                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4739
4740                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4741                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4742                        ((u32 *)&tstorm_client)[0]);
4743                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4744                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4745                        ((u32 *)&tstorm_client)[1]);
4746         }
4747
4748         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4749            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4750 }
4751
4752 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4753 {
4754         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4755         int mode = bp->rx_mode;
4756         int mask = (1 << BP_L_ID(bp));
4757         int func = BP_FUNC(bp);
4758         int i;
4759
4760         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
4761
4762         switch (mode) {
4763         case BNX2X_RX_MODE_NONE: /* no Rx */
4764                 tstorm_mac_filter.ucast_drop_all = mask;
4765                 tstorm_mac_filter.mcast_drop_all = mask;
4766                 tstorm_mac_filter.bcast_drop_all = mask;
4767                 break;
4768         case BNX2X_RX_MODE_NORMAL:
4769                 tstorm_mac_filter.bcast_accept_all = mask;
4770                 break;
4771         case BNX2X_RX_MODE_ALLMULTI:
4772                 tstorm_mac_filter.mcast_accept_all = mask;
4773                 tstorm_mac_filter.bcast_accept_all = mask;
4774                 break;
4775         case BNX2X_RX_MODE_PROMISC:
4776                 tstorm_mac_filter.ucast_accept_all = mask;
4777                 tstorm_mac_filter.mcast_accept_all = mask;
4778                 tstorm_mac_filter.bcast_accept_all = mask;
4779                 break;
4780         default:
4781                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4782                 break;
4783         }
4784
4785         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4786                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4787                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4788                        ((u32 *)&tstorm_mac_filter)[i]);
4789
4790 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4791                    ((u32 *)&tstorm_mac_filter)[i]); */
4792         }
4793
4794         if (mode != BNX2X_RX_MODE_NONE)
4795                 bnx2x_set_client_config(bp);
4796 }
4797
4798 static void bnx2x_init_internal_common(struct bnx2x *bp)
4799 {
4800         int i;
4801
4802         if (bp->flags & TPA_ENABLE_FLAG) {
4803                 struct tstorm_eth_tpa_exist tpa = {0};
4804
4805                 tpa.tpa_exist = 1;
4806
4807                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4808                        ((u32 *)&tpa)[0]);
4809                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4810                        ((u32 *)&tpa)[1]);
4811         }
4812
4813         /* Zero this manually as its initialization is
4814            currently missing in the initTool */
4815         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4816                 REG_WR(bp, BAR_USTRORM_INTMEM +
4817                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4818 }
4819
4820 static void bnx2x_init_internal_port(struct bnx2x *bp)
4821 {
4822         int port = BP_PORT(bp);
4823
4824         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4825         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4826         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4827         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4828 }
4829
4830 /* Calculates the sum of vn_min_rates.
4831    It's needed for further normalizing of the min_rates.
4832    Returns:
4833      sum of vn_min_rates.
4834        or
4835      0 - if all the min_rates are 0.
4836      In the later case fainess algorithm should be deactivated.
4837      If not all min_rates are zero then those that are zeroes will be set to 1.
4838  */
4839 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4840 {
4841         int all_zero = 1;
4842         int port = BP_PORT(bp);
4843         int vn;
4844
4845         bp->vn_weight_sum = 0;
4846         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4847                 int func = 2*vn + port;
4848                 u32 vn_cfg =
4849                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4850                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4851                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4852
4853                 /* Skip hidden vns */
4854                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4855                         continue;
4856
4857                 /* If min rate is zero - set it to 1 */
4858                 if (!vn_min_rate)
4859                         vn_min_rate = DEF_MIN_RATE;
4860                 else
4861                         all_zero = 0;
4862
4863                 bp->vn_weight_sum += vn_min_rate;
4864         }
4865
4866         /* ... only if all min rates are zeros - disable fairness */
4867         if (all_zero)
4868                 bp->vn_weight_sum = 0;
4869 }
4870
4871 static void bnx2x_init_internal_func(struct bnx2x *bp)
4872 {
4873         struct tstorm_eth_function_common_config tstorm_config = {0};
4874         struct stats_indication_flags stats_flags = {0};
4875         int port = BP_PORT(bp);
4876         int func = BP_FUNC(bp);
4877         int i, j;
4878         u32 offset;
4879         u16 max_agg_size;
4880
4881         if (is_multi(bp)) {
4882                 tstorm_config.config_flags = MULTI_FLAGS(bp);
4883                 tstorm_config.rss_result_mask = MULTI_MASK;
4884         }
4885         if (IS_E1HMF(bp))
4886                 tstorm_config.config_flags |=
4887                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
4888
4889         tstorm_config.leading_client_id = BP_L_ID(bp);
4890
4891         REG_WR(bp, BAR_TSTRORM_INTMEM +
4892                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4893                (*(u32 *)&tstorm_config));
4894
4895         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4896         bnx2x_set_storm_rx_mode(bp);
4897
4898         for_each_queue(bp, i) {
4899                 u8 cl_id = bp->fp[i].cl_id;
4900
4901                 /* reset xstorm per client statistics */
4902                 offset = BAR_XSTRORM_INTMEM +
4903                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4904                 for (j = 0;
4905                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4906                         REG_WR(bp, offset + j*4, 0);
4907
4908                 /* reset tstorm per client statistics */
4909                 offset = BAR_TSTRORM_INTMEM +
4910                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4911                 for (j = 0;
4912                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4913                         REG_WR(bp, offset + j*4, 0);
4914
4915                 /* reset ustorm per client statistics */
4916                 offset = BAR_USTRORM_INTMEM +
4917                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4918                 for (j = 0;
4919                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4920                         REG_WR(bp, offset + j*4, 0);
4921         }
4922
4923         /* Init statistics related context */
4924         stats_flags.collect_eth = 1;
4925
4926         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4927                ((u32 *)&stats_flags)[0]);
4928         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4929                ((u32 *)&stats_flags)[1]);
4930
4931         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4932                ((u32 *)&stats_flags)[0]);
4933         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4934                ((u32 *)&stats_flags)[1]);
4935
4936         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4937                ((u32 *)&stats_flags)[0]);
4938         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4939                ((u32 *)&stats_flags)[1]);
4940
4941         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4942                ((u32 *)&stats_flags)[0]);
4943         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4944                ((u32 *)&stats_flags)[1]);
4945
4946         REG_WR(bp, BAR_XSTRORM_INTMEM +
4947                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4948                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4949         REG_WR(bp, BAR_XSTRORM_INTMEM +
4950                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4951                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4952
4953         REG_WR(bp, BAR_TSTRORM_INTMEM +
4954                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4955                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4956         REG_WR(bp, BAR_TSTRORM_INTMEM +
4957                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4958                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4959
4960         REG_WR(bp, BAR_USTRORM_INTMEM +
4961                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4962                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4963         REG_WR(bp, BAR_USTRORM_INTMEM +
4964                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4965                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4966
4967         if (CHIP_IS_E1H(bp)) {
4968                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4969                         IS_E1HMF(bp));
4970                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4971                         IS_E1HMF(bp));
4972                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4973                         IS_E1HMF(bp));
4974                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4975                         IS_E1HMF(bp));
4976
4977                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4978                          bp->e1hov);
4979         }
4980
4981         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4982         max_agg_size =
4983                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4984                           SGE_PAGE_SIZE * PAGES_PER_SGE),
4985                     (u32)0xffff);
4986         for_each_rx_queue(bp, i) {
4987                 struct bnx2x_fastpath *fp = &bp->fp[i];
4988
4989                 REG_WR(bp, BAR_USTRORM_INTMEM +
4990                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4991                        U64_LO(fp->rx_comp_mapping));
4992                 REG_WR(bp, BAR_USTRORM_INTMEM +
4993                        USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4994                        U64_HI(fp->rx_comp_mapping));
4995
4996                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4997                          USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4998                          max_agg_size);
4999         }
5000
5001         /* dropless flow control */
5002         if (CHIP_IS_E1H(bp)) {
5003                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5004
5005                 rx_pause.bd_thr_low = 250;
5006                 rx_pause.cqe_thr_low = 250;
5007                 rx_pause.cos = 1;
5008                 rx_pause.sge_thr_low = 0;
5009                 rx_pause.bd_thr_high = 350;
5010                 rx_pause.cqe_thr_high = 350;
5011                 rx_pause.sge_thr_high = 0;
5012
5013                 for_each_rx_queue(bp, i) {
5014                         struct bnx2x_fastpath *fp = &bp->fp[i];
5015
5016                         if (!fp->disable_tpa) {
5017                                 rx_pause.sge_thr_low = 150;
5018                                 rx_pause.sge_thr_high = 250;
5019                         }
5020
5021
5022                         offset = BAR_USTRORM_INTMEM +
5023                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5024                                                                    fp->cl_id);
5025                         for (j = 0;
5026                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5027                              j++)
5028                                 REG_WR(bp, offset + j*4,
5029                                        ((u32 *)&rx_pause)[j]);
5030                 }
5031         }
5032
5033         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5034
5035         /* Init rate shaping and fairness contexts */
5036         if (IS_E1HMF(bp)) {
5037                 int vn;
5038
5039                 /* During init there is no active link
5040                    Until link is up, set link rate to 10Gbps */
5041                 bp->link_vars.line_speed = SPEED_10000;
5042                 bnx2x_init_port_minmax(bp);
5043
5044                 bnx2x_calc_vn_weight_sum(bp);
5045
5046                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5047                         bnx2x_init_vn_minmax(bp, 2*vn + port);
5048
5049                 /* Enable rate shaping and fairness */
5050                 bp->cmng.flags.cmng_enables =
5051                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5052                 if (bp->vn_weight_sum)
5053                         bp->cmng.flags.cmng_enables |=
5054                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5055                 else
5056                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5057                            "  fairness will be disabled\n");
5058         } else {
5059                 /* rate shaping and fairness are disabled */
5060                 DP(NETIF_MSG_IFUP,
5061                    "single function mode  minmax will be disabled\n");
5062         }
5063
5064
5065         /* Store it to internal memory */
5066         if (bp->port.pmf)
5067                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5068                         REG_WR(bp, BAR_XSTRORM_INTMEM +
5069                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5070                                ((u32 *)(&bp->cmng))[i]);
5071 }
5072
5073 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5074 {
5075         switch (load_code) {
5076         case FW_MSG_CODE_DRV_LOAD_COMMON:
5077                 bnx2x_init_internal_common(bp);
5078                 /* no break */
5079
5080         case FW_MSG_CODE_DRV_LOAD_PORT:
5081                 bnx2x_init_internal_port(bp);
5082                 /* no break */
5083
5084         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5085                 bnx2x_init_internal_func(bp);
5086                 break;
5087
5088         default:
5089                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5090                 break;
5091         }
5092 }
5093
5094 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5095 {
5096         int i;
5097
5098         for_each_queue(bp, i) {
5099                 struct bnx2x_fastpath *fp = &bp->fp[i];
5100
5101                 fp->bp = bp;
5102                 fp->state = BNX2X_FP_STATE_CLOSED;
5103                 fp->index = i;
5104                 fp->cl_id = BP_L_ID(bp) + i;
5105                 fp->sb_id = fp->cl_id;
5106                 DP(NETIF_MSG_IFUP,
5107                    "bnx2x_init_sb(%p,%p) index %d  cl_id %d  sb %d\n",
5108                    bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
5109                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5110                               FP_SB_ID(fp));
5111                 bnx2x_update_fpsb_idx(fp);
5112         }
5113
5114         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5115                           DEF_SB_ID);
5116         bnx2x_update_dsb_idx(bp);
5117         bnx2x_update_coalesce(bp);
5118         bnx2x_init_rx_rings(bp);
5119         bnx2x_init_tx_ring(bp);
5120         bnx2x_init_sp_ring(bp);
5121         bnx2x_init_context(bp);
5122         bnx2x_init_internal(bp, load_code);
5123         bnx2x_init_ind_table(bp);
5124         bnx2x_stats_init(bp);
5125
5126         /* At this point, we are ready for interrupts */
5127         atomic_set(&bp->intr_sem, 0);
5128
5129         /* flush all before enabling interrupts */
5130         mb();
5131         mmiowb();
5132
5133         bnx2x_int_enable(bp);
5134 }
5135
5136 /* end of nic init */
5137
5138 /*
5139  * gzip service functions
5140  */
5141
5142 static int bnx2x_gunzip_init(struct bnx2x *bp)
5143 {
5144         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5145                                               &bp->gunzip_mapping);
5146         if (bp->gunzip_buf  == NULL)
5147                 goto gunzip_nomem1;
5148
5149         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5150         if (bp->strm  == NULL)
5151                 goto gunzip_nomem2;
5152
5153         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5154                                       GFP_KERNEL);
5155         if (bp->strm->workspace == NULL)
5156                 goto gunzip_nomem3;
5157
5158         return 0;
5159
5160 gunzip_nomem3:
5161         kfree(bp->strm);
5162         bp->strm = NULL;
5163
5164 gunzip_nomem2:
5165         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5166                             bp->gunzip_mapping);
5167         bp->gunzip_buf = NULL;
5168
5169 gunzip_nomem1:
5170         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5171                " un-compression\n", bp->dev->name);
5172         return -ENOMEM;
5173 }
5174
5175 static void bnx2x_gunzip_end(struct bnx2x *bp)
5176 {
5177         kfree(bp->strm->workspace);
5178
5179         kfree(bp->strm);
5180         bp->strm = NULL;
5181
5182         if (bp->gunzip_buf) {
5183                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5184                                     bp->gunzip_mapping);
5185                 bp->gunzip_buf = NULL;
5186         }
5187 }
5188
5189 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5190 {
5191         int n, rc;
5192
5193         /* check gzip header */
5194         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5195                 return -EINVAL;
5196
5197         n = 10;
5198
5199 #define FNAME                           0x8
5200
5201         if (zbuf[3] & FNAME)
5202                 while ((zbuf[n++] != 0) && (n < len));
5203
5204         bp->strm->next_in = zbuf + n;
5205         bp->strm->avail_in = len - n;
5206         bp->strm->next_out = bp->gunzip_buf;
5207         bp->strm->avail_out = FW_BUF_SIZE;
5208
5209         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5210         if (rc != Z_OK)
5211                 return rc;
5212
5213         rc = zlib_inflate(bp->strm, Z_FINISH);
5214         if ((rc != Z_OK) && (rc != Z_STREAM_END))
5215                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5216                        bp->dev->name, bp->strm->msg);
5217
5218         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5219         if (bp->gunzip_outlen & 0x3)
5220                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5221                                     " gunzip_outlen (%d) not aligned\n",
5222                        bp->dev->name, bp->gunzip_outlen);
5223         bp->gunzip_outlen >>= 2;
5224
5225         zlib_inflateEnd(bp->strm);
5226
5227         if (rc == Z_STREAM_END)
5228                 return 0;
5229
5230         return rc;
5231 }
5232
5233 /* nic load/unload */
5234
5235 /*
5236  * General service functions
5237  */
5238
5239 /* send a NIG loopback debug packet */
5240 static void bnx2x_lb_pckt(struct bnx2x *bp)
5241 {
5242         u32 wb_write[3];
5243
5244         /* Ethernet source and destination addresses */
5245         wb_write[0] = 0x55555555;
5246         wb_write[1] = 0x55555555;
5247         wb_write[2] = 0x20;             /* SOP */
5248         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5249
5250         /* NON-IP protocol */
5251         wb_write[0] = 0x09000000;
5252         wb_write[1] = 0x55555555;
5253         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
5254         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5255 }
5256
5257 /* some of the internal memories
5258  * are not directly readable from the driver
5259  * to test them we send debug packets
5260  */
5261 static int bnx2x_int_mem_test(struct bnx2x *bp)
5262 {
5263         int factor;
5264         int count, i;
5265         u32 val = 0;
5266
5267         if (CHIP_REV_IS_FPGA(bp))
5268                 factor = 120;
5269         else if (CHIP_REV_IS_EMUL(bp))
5270                 factor = 200;
5271         else
5272                 factor = 1;
5273
5274         DP(NETIF_MSG_HW, "start part1\n");
5275
5276         /* Disable inputs of parser neighbor blocks */
5277         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5278         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5279         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5280         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5281
5282         /*  Write 0 to parser credits for CFC search request */
5283         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5284
5285         /* send Ethernet packet */
5286         bnx2x_lb_pckt(bp);
5287
5288         /* TODO do i reset NIG statistic? */
5289         /* Wait until NIG register shows 1 packet of size 0x10 */
5290         count = 1000 * factor;
5291         while (count) {
5292
5293                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5294                 val = *bnx2x_sp(bp, wb_data[0]);
5295                 if (val == 0x10)
5296                         break;
5297
5298                 msleep(10);
5299                 count--;
5300         }
5301         if (val != 0x10) {
5302                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5303                 return -1;
5304         }
5305
5306         /* Wait until PRS register shows 1 packet */
5307         count = 1000 * factor;
5308         while (count) {
5309                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5310                 if (val == 1)
5311                         break;
5312
5313                 msleep(10);
5314                 count--;
5315         }
5316         if (val != 0x1) {
5317                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5318                 return -2;
5319         }
5320
5321         /* Reset and init BRB, PRS */
5322         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5323         msleep(50);
5324         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5325         msleep(50);
5326         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5327         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5328
5329         DP(NETIF_MSG_HW, "part2\n");
5330
5331         /* Disable inputs of parser neighbor blocks */
5332         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5333         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5334         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5335         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5336
5337         /* Write 0 to parser credits for CFC search request */
5338         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5339
5340         /* send 10 Ethernet packets */
5341         for (i = 0; i < 10; i++)
5342                 bnx2x_lb_pckt(bp);
5343
5344         /* Wait until NIG register shows 10 + 1
5345            packets of size 11*0x10 = 0xb0 */
5346         count = 1000 * factor;
5347         while (count) {
5348
5349                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5350                 val = *bnx2x_sp(bp, wb_data[0]);
5351                 if (val == 0xb0)
5352                         break;
5353
5354                 msleep(10);
5355                 count--;
5356         }
5357         if (val != 0xb0) {
5358                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5359                 return -3;
5360         }
5361
5362         /* Wait until PRS register shows 2 packets */
5363         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5364         if (val != 2)
5365                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5366
5367         /* Write 1 to parser credits for CFC search request */
5368         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5369
5370         /* Wait until PRS register shows 3 packets */
5371         msleep(10 * factor);
5372         /* Wait until NIG register shows 1 packet of size 0x10 */
5373         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5374         if (val != 3)
5375                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5376
5377         /* clear NIG EOP FIFO */
5378         for (i = 0; i < 11; i++)
5379                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5380         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5381         if (val != 1) {
5382                 BNX2X_ERR("clear of NIG failed\n");
5383                 return -4;
5384         }
5385
5386         /* Reset and init BRB, PRS, NIG */
5387         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5388         msleep(50);
5389         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5390         msleep(50);
5391         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5392         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5393 #ifndef BCM_ISCSI
5394         /* set NIC mode */
5395         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5396 #endif
5397
5398         /* Enable inputs of parser neighbor blocks */
5399         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5400         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5401         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5402         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5403
5404         DP(NETIF_MSG_HW, "done\n");
5405
5406         return 0; /* OK */
5407 }
5408
5409 static void enable_blocks_attention(struct bnx2x *bp)
5410 {
5411         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5412         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5413         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5414         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5415         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5416         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5417         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5418         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5419         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5420 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5421 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5422         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5423         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5424         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5425 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5426 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5427         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5428         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5429         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5430         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5431 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5432 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5433         if (CHIP_REV_IS_FPGA(bp))
5434                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5435         else
5436                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5437         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5438         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5439         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5440 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5441 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5442         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5443         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5444 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5445         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5446 }
5447
5448
5449 static void bnx2x_reset_common(struct bnx2x *bp)
5450 {
5451         /* reset_common */
5452         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5453                0xd3ffff7f);
5454         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5455 }
5456
5457 static int bnx2x_init_common(struct bnx2x *bp)
5458 {
5459         u32 val, i;
5460
5461         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5462
5463         bnx2x_reset_common(bp);
5464         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5465         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5466
5467         bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5468         if (CHIP_IS_E1H(bp))
5469                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5470
5471         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5472         msleep(30);
5473         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5474
5475         bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5476         if (CHIP_IS_E1(bp)) {
5477                 /* enable HW interrupt from PXP on USDM overflow
5478                    bit 16 on INT_MASK_0 */
5479                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5480         }
5481
5482         bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5483         bnx2x_init_pxp(bp);
5484
5485 #ifdef __BIG_ENDIAN
5486         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5487         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5488         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5489         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5490         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5491         /* make sure this value is 0 */
5492         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5493
5494 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5495         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5496         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5497         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5498         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5499 #endif
5500
5501         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5502 #ifdef BCM_ISCSI
5503         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5504         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5505         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5506 #endif
5507
5508         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5509                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5510
5511         /* let the HW do it's magic ... */
5512         msleep(100);
5513         /* finish PXP init */
5514         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5515         if (val != 1) {
5516                 BNX2X_ERR("PXP2 CFG failed\n");
5517                 return -EBUSY;
5518         }
5519         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5520         if (val != 1) {
5521                 BNX2X_ERR("PXP2 RD_INIT failed\n");
5522                 return -EBUSY;
5523         }
5524
5525         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5526         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5527
5528         bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5529
5530         /* clean the DMAE memory */
5531         bp->dmae_ready = 1;
5532         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5533
5534         bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5535         bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5536         bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5537         bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5538
5539         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5540         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5541         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5542         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5543
5544         bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5545         /* soft reset pulse */
5546         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5547         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5548
5549 #ifdef BCM_ISCSI
5550         bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5551 #endif
5552
5553         bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5554         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5555         if (!CHIP_REV_IS_SLOW(bp)) {
5556                 /* enable hw interrupt from doorbell Q */
5557                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5558         }
5559
5560         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5561         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5562         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5563         /* set NIC mode */
5564         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5565         if (CHIP_IS_E1H(bp))
5566                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5567
5568         bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5569         bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5570         bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5571         bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5572
5573         if (CHIP_IS_E1H(bp)) {
5574                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5575                                 STORM_INTMEM_SIZE_E1H/2);
5576                 bnx2x_init_fill(bp,
5577                                 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5578                                 0, STORM_INTMEM_SIZE_E1H/2);
5579                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5580                                 STORM_INTMEM_SIZE_E1H/2);
5581                 bnx2x_init_fill(bp,
5582                                 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5583                                 0, STORM_INTMEM_SIZE_E1H/2);
5584                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5585                                 STORM_INTMEM_SIZE_E1H/2);
5586                 bnx2x_init_fill(bp,
5587                                 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5588                                 0, STORM_INTMEM_SIZE_E1H/2);
5589                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5590                                 STORM_INTMEM_SIZE_E1H/2);
5591                 bnx2x_init_fill(bp,
5592                                 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5593                                 0, STORM_INTMEM_SIZE_E1H/2);
5594         } else { /* E1 */
5595                 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5596                                 STORM_INTMEM_SIZE_E1);
5597                 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5598                                 STORM_INTMEM_SIZE_E1);
5599                 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5600                                 STORM_INTMEM_SIZE_E1);
5601                 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5602                                 STORM_INTMEM_SIZE_E1);
5603         }
5604
5605         bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5606         bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5607         bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5608         bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5609
5610         /* sync semi rtc */
5611         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5612                0x80000000);
5613         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5614                0x80000000);
5615
5616         bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5617         bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5618         bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5619
5620         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5621         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5622                 REG_WR(bp, i, 0xc0cac01a);
5623                 /* TODO: replace with something meaningful */
5624         }
5625         bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5626         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5627
5628         if (sizeof(union cdu_context) != 1024)
5629                 /* we currently assume that a context is 1024 bytes */
5630                 printk(KERN_ALERT PFX "please adjust the size of"
5631                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5632
5633         bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5634         val = (4 << 24) + (0 << 12) + 1024;
5635         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5636         if (CHIP_IS_E1(bp)) {
5637                 /* !!! fix pxp client crdit until excel update */
5638                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5639                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5640         }
5641
5642         bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5643         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5644         /* enable context validation interrupt from CFC */
5645         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5646
5647         /* set the thresholds to prevent CFC/CDU race */
5648         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5649
5650         bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5651         bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5652
5653         /* PXPCS COMMON comes here */
5654         /* Reset PCIE errors for debug */
5655         REG_WR(bp, 0x2814, 0xffffffff);
5656         REG_WR(bp, 0x3820, 0xffffffff);
5657
5658         /* EMAC0 COMMON comes here */
5659         /* EMAC1 COMMON comes here */
5660         /* DBU COMMON comes here */
5661         /* DBG COMMON comes here */
5662
5663         bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5664         if (CHIP_IS_E1H(bp)) {
5665                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5666                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5667         }
5668
5669         if (CHIP_REV_IS_SLOW(bp))
5670                 msleep(200);
5671
5672         /* finish CFC init */
5673         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5674         if (val != 1) {
5675                 BNX2X_ERR("CFC LL_INIT failed\n");
5676                 return -EBUSY;
5677         }
5678         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5679         if (val != 1) {
5680                 BNX2X_ERR("CFC AC_INIT failed\n");
5681                 return -EBUSY;
5682         }
5683         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5684         if (val != 1) {
5685                 BNX2X_ERR("CFC CAM_INIT failed\n");
5686                 return -EBUSY;
5687         }
5688         REG_WR(bp, CFC_REG_DEBUG0, 0);
5689
5690         /* read NIG statistic
5691            to see if this is our first up since powerup */
5692         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5693         val = *bnx2x_sp(bp, wb_data[0]);
5694
5695         /* do internal memory self test */
5696         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5697                 BNX2X_ERR("internal mem self test failed\n");
5698                 return -EBUSY;
5699         }
5700
5701         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5702         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5703         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5704         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5705                 bp->port.need_hw_lock = 1;
5706                 break;
5707
5708         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5709                 /* Fan failure is indicated by SPIO 5 */
5710                 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5711                                MISC_REGISTERS_SPIO_INPUT_HI_Z);
5712
5713                 /* set to active low mode */
5714                 val = REG_RD(bp, MISC_REG_SPIO_INT);
5715                 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5716                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5717                 REG_WR(bp, MISC_REG_SPIO_INT, val);
5718
5719                 /* enable interrupt to signal the IGU */
5720                 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5721                 val |= (1 << MISC_REGISTERS_SPIO_5);
5722                 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5723                 break;
5724
5725         default:
5726                 break;
5727         }
5728
5729         /* clear PXP2 attentions */
5730         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5731
5732         enable_blocks_attention(bp);
5733
5734         if (!BP_NOMCP(bp)) {
5735                 bnx2x_acquire_phy_lock(bp);
5736                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5737                 bnx2x_release_phy_lock(bp);
5738         } else
5739                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5740
5741         return 0;
5742 }
5743
5744 static int bnx2x_init_port(struct bnx2x *bp)
5745 {
5746         int port = BP_PORT(bp);
5747         u32 low, high;
5748         u32 val;
5749
5750         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
5751
5752         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5753
5754         /* Port PXP comes here */
5755         /* Port PXP2 comes here */
5756 #ifdef BCM_ISCSI
5757         /* Port0  1
5758          * Port1  385 */
5759         i++;
5760         wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5761         wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5762         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5763         REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5764
5765         /* Port0  2
5766          * Port1  386 */
5767         i++;
5768         wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5769         wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5770         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5771         REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5772
5773         /* Port0  3
5774          * Port1  387 */
5775         i++;
5776         wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5777         wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5778         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5779         REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5780 #endif
5781         /* Port CMs come here */
5782         bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
5783                              (port ? XCM_PORT1_END : XCM_PORT0_END));
5784
5785         /* Port QM comes here */
5786 #ifdef BCM_ISCSI
5787         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5788         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5789
5790         bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5791                              func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5792 #endif
5793         /* Port DQ comes here */
5794
5795         bnx2x_init_block(bp, (port ? BRB1_PORT1_START : BRB1_PORT0_START),
5796                              (port ? BRB1_PORT1_END : BRB1_PORT0_END));
5797         if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5798                 /* no pause for emulation and FPGA */
5799                 low = 0;
5800                 high = 513;
5801         } else {
5802                 if (IS_E1HMF(bp))
5803                         low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5804                 else if (bp->dev->mtu > 4096) {
5805                         if (bp->flags & ONE_PORT_FLAG)
5806                                 low = 160;
5807                         else {
5808                                 val = bp->dev->mtu;
5809                                 /* (24*1024 + val*4)/256 */
5810                                 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5811                         }
5812                 } else
5813                         low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5814                 high = low + 56;        /* 14*1024/256 */
5815         }
5816         REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5817         REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5818
5819
5820         /* Port PRS comes here */
5821         /* Port TSDM comes here */
5822         /* Port CSDM comes here */
5823         /* Port USDM comes here */
5824         /* Port XSDM comes here */
5825         bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5826                              port ? TSEM_PORT1_END : TSEM_PORT0_END);
5827         bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5828                              port ? USEM_PORT1_END : USEM_PORT0_END);
5829         bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5830                              port ? CSEM_PORT1_END : CSEM_PORT0_END);
5831         bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5832                              port ? XSEM_PORT1_END : XSEM_PORT0_END);
5833         /* Port UPB comes here */
5834         /* Port XPB comes here */
5835
5836         bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5837                              port ? PBF_PORT1_END : PBF_PORT0_END);
5838
5839         /* configure PBF to work without PAUSE mtu 9000 */
5840         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5841
5842         /* update threshold */
5843         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5844         /* update init credit */
5845         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5846
5847         /* probe changes */
5848         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5849         msleep(5);
5850         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5851
5852 #ifdef BCM_ISCSI
5853         /* tell the searcher where the T2 table is */
5854         REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5855
5856         wb_write[0] = U64_LO(bp->t2_mapping);
5857         wb_write[1] = U64_HI(bp->t2_mapping);
5858         REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5859         wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5860         wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5861         REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5862
5863         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5864         /* Port SRCH comes here */
5865 #endif
5866         /* Port CDU comes here */
5867         /* Port CFC comes here */
5868
5869         if (CHIP_IS_E1(bp)) {
5870                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5871                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5872         }
5873         bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5874                              port ? HC_PORT1_END : HC_PORT0_END);
5875
5876         bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5877                                     MISC_AEU_PORT0_START,
5878                              port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5879         /* init aeu_mask_attn_func_0/1:
5880          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5881          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5882          *             bits 4-7 are used for "per vn group attention" */
5883         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5884                (IS_E1HMF(bp) ? 0xF7 : 0x7));
5885
5886         /* Port PXPCS comes here */
5887         /* Port EMAC0 comes here */
5888         /* Port EMAC1 comes here */
5889         /* Port DBU comes here */
5890         /* Port DBG comes here */
5891         bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5892                              port ? NIG_PORT1_END : NIG_PORT0_END);
5893
5894         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5895
5896         if (CHIP_IS_E1H(bp)) {
5897                 /* 0x2 disable e1hov, 0x1 enable */
5898                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5899                        (IS_E1HMF(bp) ? 0x1 : 0x2));
5900
5901                 /* support pause requests from USDM, TSDM and BRB */
5902                 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
5903
5904                 {
5905                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5906                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5907                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5908                 }
5909         }
5910
5911         /* Port MCP comes here */
5912         /* Port DMAE comes here */
5913
5914         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5915         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5916                 {
5917                 u32 swap_val, swap_override, aeu_gpio_mask, offset;
5918
5919                 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5920                                MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
5921
5922                 /* The GPIO should be swapped if the swap register is
5923                    set and active */
5924                 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5925                 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5926
5927                 /* Select function upon port-swap configuration */
5928                 if (port == 0) {
5929                         offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
5930                         aeu_gpio_mask = (swap_val && swap_override) ?
5931                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
5932                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
5933                 } else {
5934                         offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
5935                         aeu_gpio_mask = (swap_val && swap_override) ?
5936                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
5937                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
5938                 }
5939                 val = REG_RD(bp, offset);
5940                 /* add GPIO3 to group */
5941                 val |= aeu_gpio_mask;
5942                 REG_WR(bp, offset, val);
5943                 }
5944                 break;
5945
5946         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5947                 /* add SPIO 5 to group 0 */
5948                 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5949                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5950                 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5951                 break;
5952
5953         default:
5954                 break;
5955         }
5956
5957         bnx2x__link_reset(bp);
5958
5959         return 0;
5960 }
5961
5962 #define ILT_PER_FUNC            (768/2)
5963 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
5964 /* the phys address is shifted right 12 bits and has an added
5965    1=valid bit added to the 53rd bit
5966    then since this is a wide register(TM)
5967    we split it into two 32 bit writes
5968  */
5969 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5970 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
5971 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
5972 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
5973
5974 #define CNIC_ILT_LINES          0
5975
5976 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5977 {
5978         int reg;
5979
5980         if (CHIP_IS_E1H(bp))
5981                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5982         else /* E1 */
5983                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5984
5985         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5986 }
5987
5988 static int bnx2x_init_func(struct bnx2x *bp)
5989 {
5990         int port = BP_PORT(bp);
5991         int func = BP_FUNC(bp);
5992         u32 addr, val;
5993         int i;
5994
5995         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
5996
5997         /* set MSI reconfigure capability */
5998         addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5999         val = REG_RD(bp, addr);
6000         val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6001         REG_WR(bp, addr, val);
6002
6003         i = FUNC_ILT_BASE(func);
6004
6005         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6006         if (CHIP_IS_E1H(bp)) {
6007                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6008                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6009         } else /* E1 */
6010                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6011                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6012
6013
6014         if (CHIP_IS_E1H(bp)) {
6015                 for (i = 0; i < 9; i++)
6016                         bnx2x_init_block(bp,
6017                                          cm_start[func][i], cm_end[func][i]);
6018
6019                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6020                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6021         }
6022
6023         /* HC init per function */
6024         if (CHIP_IS_E1H(bp)) {
6025                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6026
6027                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6028                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6029         }
6030         bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
6031
6032         /* Reset PCIE errors for debug */
6033         REG_WR(bp, 0x2114, 0xffffffff);
6034         REG_WR(bp, 0x2120, 0xffffffff);
6035
6036         return 0;
6037 }
6038
6039 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6040 {
6041         int i, rc = 0;
6042
6043         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
6044            BP_FUNC(bp), load_code);
6045
6046         bp->dmae_ready = 0;
6047         mutex_init(&bp->dmae_mutex);
6048         bnx2x_gunzip_init(bp);
6049
6050         switch (load_code) {
6051         case FW_MSG_CODE_DRV_LOAD_COMMON:
6052                 rc = bnx2x_init_common(bp);
6053                 if (rc)
6054                         goto init_hw_err;
6055                 /* no break */
6056
6057         case FW_MSG_CODE_DRV_LOAD_PORT:
6058                 bp->dmae_ready = 1;
6059                 rc = bnx2x_init_port(bp);
6060                 if (rc)
6061                         goto init_hw_err;
6062                 /* no break */
6063
6064         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6065                 bp->dmae_ready = 1;
6066                 rc = bnx2x_init_func(bp);
6067                 if (rc)
6068                         goto init_hw_err;
6069                 break;
6070
6071         default:
6072                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6073                 break;
6074         }
6075
6076         if (!BP_NOMCP(bp)) {
6077                 int func = BP_FUNC(bp);
6078
6079                 bp->fw_drv_pulse_wr_seq =
6080                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6081                                  DRV_PULSE_SEQ_MASK);
6082                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6083                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x  func_stx 0x%x\n",
6084                    bp->fw_drv_pulse_wr_seq, bp->func_stx);
6085         } else
6086                 bp->func_stx = 0;
6087
6088         /* this needs to be done before gunzip end */
6089         bnx2x_zero_def_sb(bp);
6090         for_each_queue(bp, i)
6091                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6092
6093 init_hw_err:
6094         bnx2x_gunzip_end(bp);
6095
6096         return rc;
6097 }
6098
6099 /* send the MCP a request, block until there is a reply */
6100 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6101 {
6102         int func = BP_FUNC(bp);
6103         u32 seq = ++bp->fw_seq;
6104         u32 rc = 0;
6105         u32 cnt = 1;
6106         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
6107
6108         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
6109         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
6110
6111         do {
6112                 /* let the FW do it's magic ... */
6113                 msleep(delay);
6114
6115                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
6116
6117                 /* Give the FW up to 2 second (200*10ms) */
6118         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6119
6120         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6121            cnt*delay, rc, seq);
6122
6123         /* is this a reply to our command? */
6124         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6125                 rc &= FW_MSG_CODE_MASK;
6126
6127         } else {
6128                 /* FW BUG! */
6129                 BNX2X_ERR("FW failed to respond!\n");
6130                 bnx2x_fw_dump(bp);
6131                 rc = 0;
6132         }
6133
6134         return rc;
6135 }
6136
6137 static void bnx2x_free_mem(struct bnx2x *bp)
6138 {
6139
6140 #define BNX2X_PCI_FREE(x, y, size) \
6141         do { \
6142                 if (x) { \
6143                         pci_free_consistent(bp->pdev, size, x, y); \
6144                         x = NULL; \
6145                         y = 0; \
6146                 } \
6147         } while (0)
6148
6149 #define BNX2X_FREE(x) \
6150         do { \
6151                 if (x) { \
6152                         vfree(x); \
6153                         x = NULL; \
6154                 } \
6155         } while (0)
6156
6157         int i;
6158
6159         /* fastpath */
6160         /* Common */
6161         for_each_queue(bp, i) {
6162
6163                 /* status blocks */
6164                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6165                                bnx2x_fp(bp, i, status_blk_mapping),
6166                                sizeof(struct host_status_block) +
6167                                sizeof(struct eth_tx_db_data));
6168         }
6169         /* Rx */
6170         for_each_rx_queue(bp, i) {
6171
6172                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6173                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6174                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6175                                bnx2x_fp(bp, i, rx_desc_mapping),
6176                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
6177
6178                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6179                                bnx2x_fp(bp, i, rx_comp_mapping),
6180                                sizeof(struct eth_fast_path_rx_cqe) *
6181                                NUM_RCQ_BD);
6182
6183                 /* SGE ring */
6184                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6185                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6186                                bnx2x_fp(bp, i, rx_sge_mapping),
6187                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6188         }
6189         /* Tx */
6190         for_each_tx_queue(bp, i) {
6191
6192                 /* fastpath tx rings: tx_buf tx_desc */
6193                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6194                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6195                                bnx2x_fp(bp, i, tx_desc_mapping),
6196                                sizeof(struct eth_tx_bd) * NUM_TX_BD);
6197         }
6198         /* end of fastpath */
6199
6200         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6201                        sizeof(struct host_def_status_block));
6202
6203         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6204                        sizeof(struct bnx2x_slowpath));
6205
6206 #ifdef BCM_ISCSI
6207         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6208         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6209         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6210         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6211 #endif
6212         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6213
6214 #undef BNX2X_PCI_FREE
6215 #undef BNX2X_KFREE
6216 }
6217
6218 static int bnx2x_alloc_mem(struct bnx2x *bp)
6219 {
6220
6221 #define BNX2X_PCI_ALLOC(x, y, size) \
6222         do { \
6223                 x = pci_alloc_consistent(bp->pdev, size, y); \
6224                 if (x == NULL) \
6225                         goto alloc_mem_err; \
6226                 memset(x, 0, size); \
6227         } while (0)
6228
6229 #define BNX2X_ALLOC(x, size) \
6230         do { \
6231                 x = vmalloc(size); \
6232                 if (x == NULL) \
6233                         goto alloc_mem_err; \
6234                 memset(x, 0, size); \
6235         } while (0)
6236
6237         int i;
6238
6239         /* fastpath */
6240         /* Common */
6241         for_each_queue(bp, i) {
6242                 bnx2x_fp(bp, i, bp) = bp;
6243
6244                 /* status blocks */
6245                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6246                                 &bnx2x_fp(bp, i, status_blk_mapping),
6247                                 sizeof(struct host_status_block) +
6248                                 sizeof(struct eth_tx_db_data));
6249         }
6250         /* Rx */
6251         for_each_rx_queue(bp, i) {
6252
6253                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6254                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6255                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6256                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6257                                 &bnx2x_fp(bp, i, rx_desc_mapping),
6258                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6259
6260                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6261                                 &bnx2x_fp(bp, i, rx_comp_mapping),
6262                                 sizeof(struct eth_fast_path_rx_cqe) *
6263                                 NUM_RCQ_BD);
6264
6265                 /* SGE ring */
6266                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6267                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6268                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6269                                 &bnx2x_fp(bp, i, rx_sge_mapping),
6270                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6271         }
6272         /* Tx */
6273         for_each_tx_queue(bp, i) {
6274
6275                 bnx2x_fp(bp, i, hw_tx_prods) =
6276                                 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6277
6278                 bnx2x_fp(bp, i, tx_prods_mapping) =
6279                                 bnx2x_fp(bp, i, status_blk_mapping) +
6280                                 sizeof(struct host_status_block);
6281
6282                 /* fastpath tx rings: tx_buf tx_desc */
6283                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6284                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6285                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6286                                 &bnx2x_fp(bp, i, tx_desc_mapping),
6287                                 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6288         }
6289         /* end of fastpath */
6290
6291         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6292                         sizeof(struct host_def_status_block));
6293
6294         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6295                         sizeof(struct bnx2x_slowpath));
6296
6297 #ifdef BCM_ISCSI
6298         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6299
6300         /* Initialize T1 */
6301         for (i = 0; i < 64*1024; i += 64) {
6302                 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6303                 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6304         }
6305
6306         /* allocate searcher T2 table
6307            we allocate 1/4 of alloc num for T2
6308           (which is not entered into the ILT) */
6309         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6310
6311         /* Initialize T2 */
6312         for (i = 0; i < 16*1024; i += 64)
6313                 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6314
6315         /* now fixup the last line in the block to point to the next block */
6316         *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6317
6318         /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6319         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6320
6321         /* QM queues (128*MAX_CONN) */
6322         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6323 #endif
6324
6325         /* Slow path ring */
6326         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6327
6328         return 0;
6329
6330 alloc_mem_err:
6331         bnx2x_free_mem(bp);
6332         return -ENOMEM;
6333
6334 #undef BNX2X_PCI_ALLOC
6335 #undef BNX2X_ALLOC
6336 }
6337
6338 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6339 {
6340         int i;
6341
6342         for_each_tx_queue(bp, i) {
6343                 struct bnx2x_fastpath *fp = &bp->fp[i];
6344
6345                 u16 bd_cons = fp->tx_bd_cons;
6346                 u16 sw_prod = fp->tx_pkt_prod;
6347                 u16 sw_cons = fp->tx_pkt_cons;
6348
6349                 while (sw_cons != sw_prod) {
6350                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6351                         sw_cons++;
6352                 }
6353         }
6354 }
6355
6356 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6357 {
6358         int i, j;
6359
6360         for_each_rx_queue(bp, j) {
6361                 struct bnx2x_fastpath *fp = &bp->fp[j];
6362
6363                 for (i = 0; i < NUM_RX_BD; i++) {
6364                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6365                         struct sk_buff *skb = rx_buf->skb;
6366
6367                         if (skb == NULL)
6368                                 continue;
6369
6370                         pci_unmap_single(bp->pdev,
6371                                          pci_unmap_addr(rx_buf, mapping),
6372                                          bp->rx_buf_size,
6373                                          PCI_DMA_FROMDEVICE);
6374
6375                         rx_buf->skb = NULL;
6376                         dev_kfree_skb(skb);
6377                 }
6378                 if (!fp->disable_tpa)
6379                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6380                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
6381                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
6382         }
6383 }
6384
6385 static void bnx2x_free_skbs(struct bnx2x *bp)
6386 {
6387         bnx2x_free_tx_skbs(bp);
6388         bnx2x_free_rx_skbs(bp);
6389 }
6390
6391 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6392 {
6393         int i, offset = 1;
6394
6395         free_irq(bp->msix_table[0].vector, bp->dev);
6396         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6397            bp->msix_table[0].vector);
6398
6399         for_each_queue(bp, i) {
6400                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
6401                    "state %x\n", i, bp->msix_table[i + offset].vector,
6402                    bnx2x_fp(bp, i, state));
6403
6404                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6405         }
6406 }
6407
6408 static void bnx2x_free_irq(struct bnx2x *bp)
6409 {
6410         if (bp->flags & USING_MSIX_FLAG) {
6411                 bnx2x_free_msix_irqs(bp);
6412                 pci_disable_msix(bp->pdev);
6413                 bp->flags &= ~USING_MSIX_FLAG;
6414
6415         } else if (bp->flags & USING_MSI_FLAG) {
6416                 free_irq(bp->pdev->irq, bp->dev);
6417                 pci_disable_msi(bp->pdev);
6418                 bp->flags &= ~USING_MSI_FLAG;
6419
6420         } else
6421                 free_irq(bp->pdev->irq, bp->dev);
6422 }
6423
6424 static int bnx2x_enable_msix(struct bnx2x *bp)
6425 {
6426         int i, rc, offset = 1;
6427         int igu_vec = 0;
6428
6429         bp->msix_table[0].entry = igu_vec;
6430         DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6431
6432         for_each_queue(bp, i) {
6433                 igu_vec = BP_L_ID(bp) + offset + i;
6434                 bp->msix_table[i + offset].entry = igu_vec;
6435                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6436                    "(fastpath #%u)\n", i + offset, igu_vec, i);
6437         }
6438
6439         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6440                              BNX2X_NUM_QUEUES(bp) + offset);
6441         if (rc) {
6442                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
6443                 return rc;
6444         }
6445
6446         bp->flags |= USING_MSIX_FLAG;
6447
6448         return 0;
6449 }
6450
6451 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6452 {
6453         int i, rc, offset = 1;
6454
6455         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6456                          bp->dev->name, bp->dev);
6457         if (rc) {
6458                 BNX2X_ERR("request sp irq failed\n");
6459                 return -EBUSY;
6460         }
6461
6462         for_each_queue(bp, i) {
6463                 struct bnx2x_fastpath *fp = &bp->fp[i];
6464
6465                 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
6466                 rc = request_irq(bp->msix_table[i + offset].vector,
6467                                  bnx2x_msix_fp_int, 0, fp->name, fp);
6468                 if (rc) {
6469                         BNX2X_ERR("request fp #%d irq failed  rc %d\n", i, rc);
6470                         bnx2x_free_msix_irqs(bp);
6471                         return -EBUSY;
6472                 }
6473
6474                 fp->state = BNX2X_FP_STATE_IRQ;
6475         }
6476
6477         i = BNX2X_NUM_QUEUES(bp);
6478         if (is_multi(bp))
6479                 printk(KERN_INFO PFX
6480                        "%s: using MSI-X  IRQs: sp %d  fp %d - %d\n",
6481                        bp->dev->name, bp->msix_table[0].vector,
6482                        bp->msix_table[offset].vector,
6483                        bp->msix_table[offset + i - 1].vector);
6484         else
6485                 printk(KERN_INFO PFX "%s: using MSI-X  IRQs: sp %d  fp %d\n",
6486                        bp->dev->name, bp->msix_table[0].vector,
6487                        bp->msix_table[offset + i - 1].vector);
6488
6489         return 0;
6490 }
6491
6492 static int bnx2x_enable_msi(struct bnx2x *bp)
6493 {
6494         int rc;
6495
6496         rc = pci_enable_msi(bp->pdev);
6497         if (rc) {
6498                 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6499                 return -1;
6500         }
6501         bp->flags |= USING_MSI_FLAG;
6502
6503         return 0;
6504 }
6505
6506 static int bnx2x_req_irq(struct bnx2x *bp)
6507 {
6508         unsigned long flags;
6509         int rc;
6510
6511         if (bp->flags & USING_MSI_FLAG)
6512                 flags = 0;
6513         else
6514                 flags = IRQF_SHARED;
6515
6516         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
6517                          bp->dev->name, bp->dev);
6518         if (!rc)
6519                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6520
6521         return rc;
6522 }
6523
6524 static void bnx2x_napi_enable(struct bnx2x *bp)
6525 {
6526         int i;
6527
6528         for_each_rx_queue(bp, i)
6529                 napi_enable(&bnx2x_fp(bp, i, napi));
6530 }
6531
6532 static void bnx2x_napi_disable(struct bnx2x *bp)
6533 {
6534         int i;
6535
6536         for_each_rx_queue(bp, i)
6537                 napi_disable(&bnx2x_fp(bp, i, napi));
6538 }
6539
6540 static void bnx2x_netif_start(struct bnx2x *bp)
6541 {
6542         if (atomic_dec_and_test(&bp->intr_sem)) {
6543                 if (netif_running(bp->dev)) {
6544                         bnx2x_napi_enable(bp);
6545                         bnx2x_int_enable(bp);
6546                         if (bp->state == BNX2X_STATE_OPEN)
6547                                 netif_tx_wake_all_queues(bp->dev);
6548                 }
6549         }
6550 }
6551
6552 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6553 {
6554         bnx2x_int_disable_sync(bp, disable_hw);
6555         bnx2x_napi_disable(bp);
6556         if (netif_running(bp->dev)) {
6557                 netif_tx_disable(bp->dev);
6558                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6559         }
6560 }
6561
6562 /*
6563  * Init service functions
6564  */
6565
6566 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6567 {
6568         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6569         int port = BP_PORT(bp);
6570
6571         /* CAM allocation
6572          * unicasts 0-31:port0 32-63:port1
6573          * multicast 64-127:port0 128-191:port1
6574          */
6575         config->hdr.length = 2;
6576         config->hdr.offset = port ? 32 : 0;
6577         config->hdr.client_id = BP_CL_ID(bp);
6578         config->hdr.reserved1 = 0;
6579
6580         /* primary MAC */
6581         config->config_table[0].cam_entry.msb_mac_addr =
6582                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6583         config->config_table[0].cam_entry.middle_mac_addr =
6584                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6585         config->config_table[0].cam_entry.lsb_mac_addr =
6586                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6587         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6588         if (set)
6589                 config->config_table[0].target_table_entry.flags = 0;
6590         else
6591                 CAM_INVALIDATE(config->config_table[0]);
6592         config->config_table[0].target_table_entry.client_id = 0;
6593         config->config_table[0].target_table_entry.vlan_id = 0;
6594
6595         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6596            (set ? "setting" : "clearing"),
6597            config->config_table[0].cam_entry.msb_mac_addr,
6598            config->config_table[0].cam_entry.middle_mac_addr,
6599            config->config_table[0].cam_entry.lsb_mac_addr);
6600
6601         /* broadcast */
6602         config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6603         config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6604         config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6605         config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6606         if (set)
6607                 config->config_table[1].target_table_entry.flags =
6608                                 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6609         else
6610                 CAM_INVALIDATE(config->config_table[1]);
6611         config->config_table[1].target_table_entry.client_id = 0;
6612         config->config_table[1].target_table_entry.vlan_id = 0;
6613
6614         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6615                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6616                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6617 }
6618
6619 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6620 {
6621         struct mac_configuration_cmd_e1h *config =
6622                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6623
6624         if (set && (bp->state != BNX2X_STATE_OPEN)) {
6625                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6626                 return;
6627         }
6628
6629         /* CAM allocation for E1H
6630          * unicasts: by func number
6631          * multicast: 20+FUNC*20, 20 each
6632          */
6633         config->hdr.length = 1;
6634         config->hdr.offset = BP_FUNC(bp);
6635         config->hdr.client_id = BP_CL_ID(bp);
6636         config->hdr.reserved1 = 0;
6637
6638         /* primary MAC */
6639         config->config_table[0].msb_mac_addr =
6640                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6641         config->config_table[0].middle_mac_addr =
6642                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6643         config->config_table[0].lsb_mac_addr =
6644                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6645         config->config_table[0].client_id = BP_L_ID(bp);
6646         config->config_table[0].vlan_id = 0;
6647         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6648         if (set)
6649                 config->config_table[0].flags = BP_PORT(bp);
6650         else
6651                 config->config_table[0].flags =
6652                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6653
6654         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID %d\n",
6655            (set ? "setting" : "clearing"),
6656            config->config_table[0].msb_mac_addr,
6657            config->config_table[0].middle_mac_addr,
6658            config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6659
6660         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6661                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6662                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6663 }
6664
6665 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6666                              int *state_p, int poll)
6667 {
6668         /* can take a while if any port is running */
6669         int cnt = 500;
6670
6671         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6672            poll ? "polling" : "waiting", state, idx);
6673
6674         might_sleep();
6675         while (cnt--) {
6676                 if (poll) {
6677                         bnx2x_rx_int(bp->fp, 10);
6678                         /* if index is different from 0
6679                          * the reply for some commands will
6680                          * be on the non default queue
6681                          */
6682                         if (idx)
6683                                 bnx2x_rx_int(&bp->fp[idx], 10);
6684                 }
6685
6686                 mb(); /* state is changed by bnx2x_sp_event() */
6687                 if (*state_p == state)
6688                         return 0;
6689
6690                 msleep(1);
6691         }
6692
6693         /* timeout! */
6694         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6695                   poll ? "polling" : "waiting", state, idx);
6696 #ifdef BNX2X_STOP_ON_ERROR
6697         bnx2x_panic();
6698 #endif
6699
6700         return -EBUSY;
6701 }
6702
6703 static int bnx2x_setup_leading(struct bnx2x *bp)
6704 {
6705         int rc;
6706
6707         /* reset IGU state */
6708         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6709
6710         /* SETUP ramrod */
6711         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6712
6713         /* Wait for completion */
6714         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6715
6716         return rc;
6717 }
6718
6719 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6720 {
6721         struct bnx2x_fastpath *fp = &bp->fp[index];
6722
6723         /* reset IGU state */
6724         bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6725
6726         /* SETUP ramrod */
6727         fp->state = BNX2X_FP_STATE_OPENING;
6728         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6729                       fp->cl_id, 0);
6730
6731         /* Wait for completion */
6732         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6733                                  &(fp->state), 0);
6734 }
6735
6736 static int bnx2x_poll(struct napi_struct *napi, int budget);
6737
6738 static void bnx2x_set_int_mode(struct bnx2x *bp)
6739 {
6740         int num_queues;
6741
6742         switch (int_mode) {
6743         case INT_MODE_INTx:
6744         case INT_MODE_MSI:
6745                 num_queues = 1;
6746                 bp->num_rx_queues = num_queues;
6747                 bp->num_tx_queues = num_queues;
6748                 DP(NETIF_MSG_IFUP,
6749                    "set number of queues to %d\n", num_queues);
6750                 break;
6751
6752         case INT_MODE_MSIX:
6753         default:
6754                 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6755                         num_queues = min_t(u32, num_online_cpus(),
6756                                            BNX2X_MAX_QUEUES(bp));
6757                 else
6758                         num_queues = 1;
6759                 bp->num_rx_queues = num_queues;
6760                 bp->num_tx_queues = num_queues;
6761                 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6762                    "  number of tx queues to %d\n",
6763                    bp->num_rx_queues, bp->num_tx_queues);
6764                 /* if we can't use MSI-X we only need one fp,
6765                  * so try to enable MSI-X with the requested number of fp's
6766                  * and fallback to MSI or legacy INTx with one fp
6767                  */
6768                 if (bnx2x_enable_msix(bp)) {
6769                         /* failed to enable MSI-X */
6770                         num_queues = 1;
6771                         bp->num_rx_queues = num_queues;
6772                         bp->num_tx_queues = num_queues;
6773                         if (bp->multi_mode)
6774                                 BNX2X_ERR("Multi requested but failed to "
6775                                           "enable MSI-X  set number of "
6776                                           "queues to %d\n", num_queues);
6777                 }
6778                 break;
6779         }
6780         bp->dev->real_num_tx_queues = bp->num_tx_queues;
6781 }
6782
6783 static void bnx2x_set_rx_mode(struct net_device *dev);
6784
6785 /* must be called with rtnl_lock */
6786 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6787 {
6788         u32 load_code;
6789         int i, rc = 0;
6790 #ifdef BNX2X_STOP_ON_ERROR
6791         DP(NETIF_MSG_IFUP, "enter  load_mode %d\n", load_mode);
6792         if (unlikely(bp->panic))
6793                 return -EPERM;
6794 #endif
6795
6796         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6797
6798         bnx2x_set_int_mode(bp);
6799
6800         if (bnx2x_alloc_mem(bp))
6801                 return -ENOMEM;
6802
6803         for_each_rx_queue(bp, i)
6804                 bnx2x_fp(bp, i, disable_tpa) =
6805                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
6806
6807         for_each_rx_queue(bp, i)
6808                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6809                                bnx2x_poll, 128);
6810
6811 #ifdef BNX2X_STOP_ON_ERROR
6812         for_each_rx_queue(bp, i) {
6813                 struct bnx2x_fastpath *fp = &bp->fp[i];
6814
6815                 fp->poll_no_work = 0;
6816                 fp->poll_calls = 0;
6817                 fp->poll_max_calls = 0;
6818                 fp->poll_complete = 0;
6819                 fp->poll_exit = 0;
6820         }
6821 #endif
6822         bnx2x_napi_enable(bp);
6823
6824         if (bp->flags & USING_MSIX_FLAG) {
6825                 rc = bnx2x_req_msix_irqs(bp);
6826                 if (rc) {
6827                         pci_disable_msix(bp->pdev);
6828                         goto load_error1;
6829                 }
6830         } else {
6831                 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6832                         bnx2x_enable_msi(bp);
6833                 bnx2x_ack_int(bp);
6834                 rc = bnx2x_req_irq(bp);
6835                 if (rc) {
6836                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
6837                         if (bp->flags & USING_MSI_FLAG)
6838                                 pci_disable_msi(bp->pdev);
6839                         goto load_error1;
6840                 }
6841                 if (bp->flags & USING_MSI_FLAG) {
6842                         bp->dev->irq = bp->pdev->irq;
6843                         printk(KERN_INFO PFX "%s: using MSI  IRQ %d\n",
6844                                bp->dev->name, bp->pdev->irq);
6845                 }
6846         }
6847
6848         /* Send LOAD_REQUEST command to MCP
6849            Returns the type of LOAD command:
6850            if it is the first port to be initialized
6851            common blocks should be initialized, otherwise - not
6852         */
6853         if (!BP_NOMCP(bp)) {
6854                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6855                 if (!load_code) {
6856                         BNX2X_ERR("MCP response failure, aborting\n");
6857                         rc = -EBUSY;
6858                         goto load_error2;
6859                 }
6860                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6861                         rc = -EBUSY; /* other port in diagnostic mode */
6862                         goto load_error2;
6863                 }
6864
6865         } else {
6866                 int port = BP_PORT(bp);
6867
6868                 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6869                    load_count[0], load_count[1], load_count[2]);
6870                 load_count[0]++;
6871                 load_count[1 + port]++;
6872                 DP(NETIF_MSG_IFUP, "NO MCP new load counts       %d, %d, %d\n",
6873                    load_count[0], load_count[1], load_count[2]);
6874                 if (load_count[0] == 1)
6875                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6876                 else if (load_count[1 + port] == 1)
6877                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6878                 else
6879                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6880         }
6881
6882         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6883             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6884                 bp->port.pmf = 1;
6885         else
6886                 bp->port.pmf = 0;
6887         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6888
6889         /* Initialize HW */
6890         rc = bnx2x_init_hw(bp, load_code);
6891         if (rc) {
6892                 BNX2X_ERR("HW init failed, aborting\n");
6893                 goto load_error2;
6894         }
6895
6896         /* Setup NIC internals and enable interrupts */
6897         bnx2x_nic_init(bp, load_code);
6898
6899         /* Send LOAD_DONE command to MCP */
6900         if (!BP_NOMCP(bp)) {
6901                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6902                 if (!load_code) {
6903                         BNX2X_ERR("MCP response failure, aborting\n");
6904                         rc = -EBUSY;
6905                         goto load_error3;
6906                 }
6907         }
6908
6909         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6910
6911         rc = bnx2x_setup_leading(bp);
6912         if (rc) {
6913                 BNX2X_ERR("Setup leading failed!\n");
6914                 goto load_error3;
6915         }
6916
6917         if (CHIP_IS_E1H(bp))
6918                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6919                         BNX2X_ERR("!!!  mf_cfg function disabled\n");
6920                         bp->state = BNX2X_STATE_DISABLED;
6921                 }
6922
6923         if (bp->state == BNX2X_STATE_OPEN)
6924                 for_each_nondefault_queue(bp, i) {
6925                         rc = bnx2x_setup_multi(bp, i);
6926                         if (rc)
6927                                 goto load_error3;
6928                 }
6929
6930         if (CHIP_IS_E1(bp))
6931                 bnx2x_set_mac_addr_e1(bp, 1);
6932         else
6933                 bnx2x_set_mac_addr_e1h(bp, 1);
6934
6935         if (bp->port.pmf)
6936                 bnx2x_initial_phy_init(bp);
6937
6938         /* Start fast path */
6939         switch (load_mode) {
6940         case LOAD_NORMAL:
6941                 /* Tx queue should be only reenabled */
6942                 netif_tx_wake_all_queues(bp->dev);
6943                 /* Initialize the receive filter. */
6944                 bnx2x_set_rx_mode(bp->dev);
6945                 break;
6946
6947         case LOAD_OPEN:
6948                 netif_tx_start_all_queues(bp->dev);
6949                 /* Initialize the receive filter. */
6950                 bnx2x_set_rx_mode(bp->dev);
6951                 break;
6952
6953         case LOAD_DIAG:
6954                 /* Initialize the receive filter. */
6955                 bnx2x_set_rx_mode(bp->dev);
6956                 bp->state = BNX2X_STATE_DIAG;
6957                 break;
6958
6959         default:
6960                 break;
6961         }
6962
6963         if (!bp->port.pmf)
6964                 bnx2x__link_status_update(bp);
6965
6966         /* start the timer */
6967         mod_timer(&bp->timer, jiffies + bp->current_interval);
6968
6969
6970         return 0;
6971
6972 load_error3:
6973         bnx2x_int_disable_sync(bp, 1);
6974         if (!BP_NOMCP(bp)) {
6975                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
6976                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6977         }
6978         bp->port.pmf = 0;
6979         /* Free SKBs, SGEs, TPA pool and driver internals */
6980         bnx2x_free_skbs(bp);
6981         for_each_rx_queue(bp, i)
6982                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6983 load_error2:
6984         /* Release IRQs */
6985         bnx2x_free_irq(bp);
6986 load_error1:
6987         bnx2x_napi_disable(bp);
6988         for_each_rx_queue(bp, i)
6989                 netif_napi_del(&bnx2x_fp(bp, i, napi));
6990         bnx2x_free_mem(bp);
6991
6992         /* TBD we really need to reset the chip
6993            if we want to recover from this */
6994         return rc;
6995 }
6996
6997 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6998 {
6999         struct bnx2x_fastpath *fp = &bp->fp[index];
7000         int rc;
7001
7002         /* halt the connection */
7003         fp->state = BNX2X_FP_STATE_HALTING;
7004         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7005
7006         /* Wait for completion */
7007         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7008                                &(fp->state), 1);
7009         if (rc) /* timeout */
7010                 return rc;
7011
7012         /* delete cfc entry */
7013         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7014
7015         /* Wait for completion */
7016         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7017                                &(fp->state), 1);
7018         return rc;
7019 }
7020
7021 static int bnx2x_stop_leading(struct bnx2x *bp)
7022 {
7023         u16 dsb_sp_prod_idx;
7024         /* if the other port is handling traffic,
7025            this can take a lot of time */
7026         int cnt = 500;
7027         int rc;
7028
7029         might_sleep();
7030
7031         /* Send HALT ramrod */
7032         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7033         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
7034
7035         /* Wait for completion */
7036         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7037                                &(bp->fp[0].state), 1);
7038         if (rc) /* timeout */
7039                 return rc;
7040
7041         dsb_sp_prod_idx = *bp->dsb_sp_prod;
7042
7043         /* Send PORT_DELETE ramrod */
7044         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7045
7046         /* Wait for completion to arrive on default status block
7047            we are going to reset the chip anyway
7048            so there is not much to do if this times out
7049          */
7050         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7051                 if (!cnt) {
7052                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7053                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7054                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
7055 #ifdef BNX2X_STOP_ON_ERROR
7056                         bnx2x_panic();
7057 #else
7058                         rc = -EBUSY;
7059 #endif
7060                         break;
7061                 }
7062                 cnt--;
7063                 msleep(1);
7064                 rmb(); /* Refresh the dsb_sp_prod */
7065         }
7066         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7067         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7068
7069         return rc;
7070 }
7071
7072 static void bnx2x_reset_func(struct bnx2x *bp)
7073 {
7074         int port = BP_PORT(bp);
7075         int func = BP_FUNC(bp);
7076         int base, i;
7077
7078         /* Configure IGU */
7079         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7080         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7081
7082         /* Clear ILT */
7083         base = FUNC_ILT_BASE(func);
7084         for (i = base; i < base + ILT_PER_FUNC; i++)
7085                 bnx2x_ilt_wr(bp, i, 0);
7086 }
7087
7088 static void bnx2x_reset_port(struct bnx2x *bp)
7089 {
7090         int port = BP_PORT(bp);
7091         u32 val;
7092
7093         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7094
7095         /* Do not rcv packets to BRB */
7096         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7097         /* Do not direct rcv packets that are not for MCP to the BRB */
7098         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7099                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7100
7101         /* Configure AEU */
7102         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7103
7104         msleep(100);
7105         /* Check for BRB port occupancy */
7106         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7107         if (val)
7108                 DP(NETIF_MSG_IFDOWN,
7109                    "BRB1 is not empty  %d blocks are occupied\n", val);
7110
7111         /* TODO: Close Doorbell port? */
7112 }
7113
7114 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7115 {
7116         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
7117            BP_FUNC(bp), reset_code);
7118
7119         switch (reset_code) {
7120         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7121                 bnx2x_reset_port(bp);
7122                 bnx2x_reset_func(bp);
7123                 bnx2x_reset_common(bp);
7124                 break;
7125
7126         case FW_MSG_CODE_DRV_UNLOAD_PORT:
7127                 bnx2x_reset_port(bp);
7128                 bnx2x_reset_func(bp);
7129                 break;
7130
7131         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7132                 bnx2x_reset_func(bp);
7133                 break;
7134
7135         default:
7136                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7137                 break;
7138         }
7139 }
7140
7141 /* must be called with rtnl_lock */
7142 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7143 {
7144         int port = BP_PORT(bp);
7145         u32 reset_code = 0;
7146         int i, cnt, rc;
7147
7148         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7149
7150         bp->rx_mode = BNX2X_RX_MODE_NONE;
7151         bnx2x_set_storm_rx_mode(bp);
7152
7153         bnx2x_netif_stop(bp, 1);
7154
7155         del_timer_sync(&bp->timer);
7156         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7157                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7158         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7159
7160         /* Release IRQs */
7161         bnx2x_free_irq(bp);
7162
7163         /* Wait until tx fastpath tasks complete */
7164         for_each_tx_queue(bp, i) {
7165                 struct bnx2x_fastpath *fp = &bp->fp[i];
7166
7167                 cnt = 1000;
7168                 smp_rmb();
7169                 while (bnx2x_has_tx_work_unload(fp)) {
7170
7171                         bnx2x_tx_int(fp, 1000);
7172                         if (!cnt) {
7173                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
7174                                           i);
7175 #ifdef BNX2X_STOP_ON_ERROR
7176                                 bnx2x_panic();
7177                                 return -EBUSY;
7178 #else
7179                                 break;
7180 #endif
7181                         }
7182                         cnt--;
7183                         msleep(1);
7184                         smp_rmb();
7185                 }
7186         }
7187         /* Give HW time to discard old tx messages */
7188         msleep(1);
7189
7190         if (CHIP_IS_E1(bp)) {
7191                 struct mac_configuration_cmd *config =
7192                                                 bnx2x_sp(bp, mcast_config);
7193
7194                 bnx2x_set_mac_addr_e1(bp, 0);
7195
7196                 for (i = 0; i < config->hdr.length; i++)
7197                         CAM_INVALIDATE(config->config_table[i]);
7198
7199                 config->hdr.length = i;
7200                 if (CHIP_REV_IS_SLOW(bp))
7201                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7202                 else
7203                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7204                 config->hdr.client_id = BP_CL_ID(bp);
7205                 config->hdr.reserved1 = 0;
7206
7207                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7208                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7209                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7210
7211         } else { /* E1H */
7212                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7213
7214                 bnx2x_set_mac_addr_e1h(bp, 0);
7215
7216                 for (i = 0; i < MC_HASH_SIZE; i++)
7217                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7218         }
7219
7220         if (unload_mode == UNLOAD_NORMAL)
7221                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7222
7223         else if (bp->flags & NO_WOL_FLAG) {
7224                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7225                 if (CHIP_IS_E1H(bp))
7226                         REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7227
7228         } else if (bp->wol) {
7229                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7230                 u8 *mac_addr = bp->dev->dev_addr;
7231                 u32 val;
7232                 /* The mac address is written to entries 1-4 to
7233                    preserve entry 0 which is used by the PMF */
7234                 u8 entry = (BP_E1HVN(bp) + 1)*8;
7235
7236                 val = (mac_addr[0] << 8) | mac_addr[1];
7237                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7238
7239                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7240                       (mac_addr[4] << 8) | mac_addr[5];
7241                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7242
7243                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7244
7245         } else
7246                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7247
7248         /* Close multi and leading connections
7249            Completions for ramrods are collected in a synchronous way */
7250         for_each_nondefault_queue(bp, i)
7251                 if (bnx2x_stop_multi(bp, i))
7252                         goto unload_error;
7253
7254         rc = bnx2x_stop_leading(bp);
7255         if (rc) {
7256                 BNX2X_ERR("Stop leading failed!\n");
7257 #ifdef BNX2X_STOP_ON_ERROR
7258                 return -EBUSY;
7259 #else
7260                 goto unload_error;
7261 #endif
7262         }
7263
7264 unload_error:
7265         if (!BP_NOMCP(bp))
7266                 reset_code = bnx2x_fw_command(bp, reset_code);
7267         else {
7268                 DP(NETIF_MSG_IFDOWN, "NO MCP load counts      %d, %d, %d\n",
7269                    load_count[0], load_count[1], load_count[2]);
7270                 load_count[0]--;
7271                 load_count[1 + port]--;
7272                 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts  %d, %d, %d\n",
7273                    load_count[0], load_count[1], load_count[2]);
7274                 if (load_count[0] == 0)
7275                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7276                 else if (load_count[1 + port] == 0)
7277                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7278                 else
7279                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7280         }
7281
7282         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7283             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7284                 bnx2x__link_reset(bp);
7285
7286         /* Reset the chip */
7287         bnx2x_reset_chip(bp, reset_code);
7288
7289         /* Report UNLOAD_DONE to MCP */
7290         if (!BP_NOMCP(bp))
7291                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7292         bp->port.pmf = 0;
7293
7294         /* Free SKBs, SGEs, TPA pool and driver internals */
7295         bnx2x_free_skbs(bp);
7296         for_each_rx_queue(bp, i)
7297                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7298         for_each_rx_queue(bp, i)
7299                 netif_napi_del(&bnx2x_fp(bp, i, napi));
7300         bnx2x_free_mem(bp);
7301
7302         bp->state = BNX2X_STATE_CLOSED;
7303
7304         netif_carrier_off(bp->dev);
7305
7306         return 0;
7307 }
7308
7309 static void bnx2x_reset_task(struct work_struct *work)
7310 {
7311         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7312
7313 #ifdef BNX2X_STOP_ON_ERROR
7314         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7315                   " so reset not done to allow debug dump,\n"
7316          KERN_ERR " you will need to reboot when done\n");
7317         return;
7318 #endif
7319
7320         rtnl_lock();
7321
7322         if (!netif_running(bp->dev))
7323                 goto reset_task_exit;
7324
7325         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7326         bnx2x_nic_load(bp, LOAD_NORMAL);
7327
7328 reset_task_exit:
7329         rtnl_unlock();
7330 }
7331
7332 /* end of nic load/unload */
7333
7334 /* ethtool_ops */
7335
7336 /*
7337  * Init service functions
7338  */
7339
7340 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7341 {
7342         switch (func) {
7343         case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7344         case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7345         case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7346         case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7347         case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7348         case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7349         case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7350         case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7351         default:
7352                 BNX2X_ERR("Unsupported function index: %d\n", func);
7353                 return (u32)(-1);
7354         }
7355 }
7356
7357 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7358 {
7359         u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7360
7361         /* Flush all outstanding writes */
7362         mmiowb();
7363
7364         /* Pretend to be function 0 */
7365         REG_WR(bp, reg, 0);
7366         /* Flush the GRC transaction (in the chip) */
7367         new_val = REG_RD(bp, reg);
7368         if (new_val != 0) {
7369                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7370                           new_val);
7371                 BUG();
7372         }
7373
7374         /* From now we are in the "like-E1" mode */
7375         bnx2x_int_disable(bp);
7376
7377         /* Flush all outstanding writes */
7378         mmiowb();
7379
7380         /* Restore the original funtion settings */
7381         REG_WR(bp, reg, orig_func);
7382         new_val = REG_RD(bp, reg);
7383         if (new_val != orig_func) {
7384                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7385                           orig_func, new_val);
7386                 BUG();
7387         }
7388 }
7389
7390 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7391 {
7392         if (CHIP_IS_E1H(bp))
7393                 bnx2x_undi_int_disable_e1h(bp, func);
7394         else
7395                 bnx2x_int_disable(bp);
7396 }
7397
7398 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7399 {
7400         u32 val;
7401
7402         /* Check if there is any driver already loaded */
7403         val = REG_RD(bp, MISC_REG_UNPREPARED);
7404         if (val == 0x1) {
7405                 /* Check if it is the UNDI driver
7406                  * UNDI driver initializes CID offset for normal bell to 0x7
7407                  */
7408                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7409                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7410                 if (val == 0x7) {
7411                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7412                         /* save our func */
7413                         int func = BP_FUNC(bp);
7414                         u32 swap_en;
7415                         u32 swap_val;
7416
7417                         /* clear the UNDI indication */
7418                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7419
7420                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
7421
7422                         /* try unload UNDI on port 0 */
7423                         bp->func = 0;
7424                         bp->fw_seq =
7425                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7426                                 DRV_MSG_SEQ_NUMBER_MASK);
7427                         reset_code = bnx2x_fw_command(bp, reset_code);
7428
7429                         /* if UNDI is loaded on the other port */
7430                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7431
7432                                 /* send "DONE" for previous unload */
7433                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7434
7435                                 /* unload UNDI on port 1 */
7436                                 bp->func = 1;
7437                                 bp->fw_seq =
7438                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7439                                         DRV_MSG_SEQ_NUMBER_MASK);
7440                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7441
7442                                 bnx2x_fw_command(bp, reset_code);
7443                         }
7444
7445                         /* now it's safe to release the lock */
7446                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7447
7448                         bnx2x_undi_int_disable(bp, func);
7449
7450                         /* close input traffic and wait for it */
7451                         /* Do not rcv packets to BRB */
7452                         REG_WR(bp,
7453                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7454                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7455                         /* Do not direct rcv packets that are not for MCP to
7456                          * the BRB */
7457                         REG_WR(bp,
7458                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7459                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7460                         /* clear AEU */
7461                         REG_WR(bp,
7462                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7463                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7464                         msleep(10);
7465
7466                         /* save NIG port swap info */
7467                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7468                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7469                         /* reset device */
7470                         REG_WR(bp,
7471                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7472                                0xd3ffffff);
7473                         REG_WR(bp,
7474                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7475                                0x1403);
7476                         /* take the NIG out of reset and restore swap values */
7477                         REG_WR(bp,
7478                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7479                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
7480                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7481                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7482
7483                         /* send unload done to the MCP */
7484                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7485
7486                         /* restore our func and fw_seq */
7487                         bp->func = func;
7488                         bp->fw_seq =
7489                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7490                                 DRV_MSG_SEQ_NUMBER_MASK);
7491
7492                 } else
7493                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7494         }
7495 }
7496
7497 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7498 {
7499         u32 val, val2, val3, val4, id;
7500         u16 pmc;
7501
7502         /* Get the chip revision id and number. */
7503         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7504         val = REG_RD(bp, MISC_REG_CHIP_NUM);
7505         id = ((val & 0xffff) << 16);
7506         val = REG_RD(bp, MISC_REG_CHIP_REV);
7507         id |= ((val & 0xf) << 12);
7508         val = REG_RD(bp, MISC_REG_CHIP_METAL);
7509         id |= ((val & 0xff) << 4);
7510         val = REG_RD(bp, MISC_REG_BOND_ID);
7511         id |= (val & 0xf);
7512         bp->common.chip_id = id;
7513         bp->link_params.chip_id = bp->common.chip_id;
7514         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7515
7516         val = (REG_RD(bp, 0x2874) & 0x55);
7517         if ((bp->common.chip_id & 0x1) ||
7518             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7519                 bp->flags |= ONE_PORT_FLAG;
7520                 BNX2X_DEV_INFO("single port device\n");
7521         }
7522
7523         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7524         bp->common.flash_size = (NVRAM_1MB_SIZE <<
7525                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
7526         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7527                        bp->common.flash_size, bp->common.flash_size);
7528
7529         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7530         bp->link_params.shmem_base = bp->common.shmem_base;
7531         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7532
7533         if (!bp->common.shmem_base ||
7534             (bp->common.shmem_base < 0xA0000) ||
7535             (bp->common.shmem_base >= 0xC0000)) {
7536                 BNX2X_DEV_INFO("MCP not active\n");
7537                 bp->flags |= NO_MCP_FLAG;
7538                 return;
7539         }
7540
7541         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7542         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7543                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7544                 BNX2X_ERR("BAD MCP validity signature\n");
7545
7546         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7547         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
7548
7549         bp->link_params.hw_led_mode = ((bp->common.hw_config &
7550                                         SHARED_HW_CFG_LED_MODE_MASK) >>
7551                                        SHARED_HW_CFG_LED_MODE_SHIFT);
7552
7553         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7554         bp->common.bc_ver = val;
7555         BNX2X_DEV_INFO("bc_ver %X\n", val);
7556         if (val < BNX2X_BC_VER) {
7557                 /* for now only warn
7558                  * later we might need to enforce this */
7559                 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7560                           " please upgrade BC\n", BNX2X_BC_VER, val);
7561         }
7562
7563         if (BP_E1HVN(bp) == 0) {
7564                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7565                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7566         } else {
7567                 /* no WOL capability for E1HVN != 0 */
7568                 bp->flags |= NO_WOL_FLAG;
7569         }
7570         BNX2X_DEV_INFO("%sWoL capable\n",
7571                        (bp->flags & NO_WOL_FLAG) ? "Not " : "");
7572
7573         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7574         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7575         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7576         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7577
7578         printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7579                val, val2, val3, val4);
7580 }
7581
7582 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7583                                                     u32 switch_cfg)
7584 {
7585         int port = BP_PORT(bp);
7586         u32 ext_phy_type;
7587
7588         switch (switch_cfg) {
7589         case SWITCH_CFG_1G:
7590                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7591
7592                 ext_phy_type =
7593                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7594                 switch (ext_phy_type) {
7595                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7596                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7597                                        ext_phy_type);
7598
7599                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7600                                                SUPPORTED_10baseT_Full |
7601                                                SUPPORTED_100baseT_Half |
7602                                                SUPPORTED_100baseT_Full |
7603                                                SUPPORTED_1000baseT_Full |
7604                                                SUPPORTED_2500baseX_Full |
7605                                                SUPPORTED_TP |
7606                                                SUPPORTED_FIBRE |
7607                                                SUPPORTED_Autoneg |
7608                                                SUPPORTED_Pause |
7609                                                SUPPORTED_Asym_Pause);
7610                         break;
7611
7612                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7613                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7614                                        ext_phy_type);
7615
7616                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7617                                                SUPPORTED_10baseT_Full |
7618                                                SUPPORTED_100baseT_Half |
7619                                                SUPPORTED_100baseT_Full |
7620                                                SUPPORTED_1000baseT_Full |
7621                                                SUPPORTED_TP |
7622                                                SUPPORTED_FIBRE |
7623                                                SUPPORTED_Autoneg |
7624                                                SUPPORTED_Pause |
7625                                                SUPPORTED_Asym_Pause);
7626                         break;
7627
7628                 default:
7629                         BNX2X_ERR("NVRAM config error. "
7630                                   "BAD SerDes ext_phy_config 0x%x\n",
7631                                   bp->link_params.ext_phy_config);
7632                         return;
7633                 }
7634
7635                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7636                                            port*0x10);
7637                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7638                 break;
7639
7640         case SWITCH_CFG_10G:
7641                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7642
7643                 ext_phy_type =
7644                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7645                 switch (ext_phy_type) {
7646                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7647                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7648                                        ext_phy_type);
7649
7650                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7651                                                SUPPORTED_10baseT_Full |
7652                                                SUPPORTED_100baseT_Half |
7653                                                SUPPORTED_100baseT_Full |
7654                                                SUPPORTED_1000baseT_Full |
7655                                                SUPPORTED_2500baseX_Full |
7656                                                SUPPORTED_10000baseT_Full |
7657                                                SUPPORTED_TP |
7658                                                SUPPORTED_FIBRE |
7659                                                SUPPORTED_Autoneg |
7660                                                SUPPORTED_Pause |
7661                                                SUPPORTED_Asym_Pause);
7662                         break;
7663
7664                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7665                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7666                                        ext_phy_type);
7667
7668                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7669                                                SUPPORTED_1000baseT_Full |
7670                                                SUPPORTED_FIBRE |
7671                                                SUPPORTED_Autoneg |
7672                                                SUPPORTED_Pause |
7673                                                SUPPORTED_Asym_Pause);
7674                         break;
7675
7676                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7677                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7678                                        ext_phy_type);
7679
7680                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7681                                                SUPPORTED_2500baseX_Full |
7682                                                SUPPORTED_1000baseT_Full |
7683                                                SUPPORTED_FIBRE |
7684                                                SUPPORTED_Autoneg |
7685                                                SUPPORTED_Pause |
7686                                                SUPPORTED_Asym_Pause);
7687                         break;
7688
7689                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7690                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7691                                        ext_phy_type);
7692
7693                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7694                                                SUPPORTED_FIBRE |
7695                                                SUPPORTED_Pause |
7696                                                SUPPORTED_Asym_Pause);
7697                         break;
7698
7699                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7700                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7701                                        ext_phy_type);
7702
7703                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7704                                                SUPPORTED_1000baseT_Full |
7705                                                SUPPORTED_FIBRE |
7706                                                SUPPORTED_Pause |
7707                                                SUPPORTED_Asym_Pause);
7708                         break;
7709
7710                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7711                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
7712                                        ext_phy_type);
7713
7714                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7715                                                SUPPORTED_1000baseT_Full |
7716                                                SUPPORTED_Autoneg |
7717                                                SUPPORTED_FIBRE |
7718                                                SUPPORTED_Pause |
7719                                                SUPPORTED_Asym_Pause);
7720                         break;
7721
7722                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7723                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7724                                        ext_phy_type);
7725
7726                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7727                                                SUPPORTED_TP |
7728                                                SUPPORTED_Autoneg |
7729                                                SUPPORTED_Pause |
7730                                                SUPPORTED_Asym_Pause);
7731                         break;
7732
7733                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7734                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7735                                   bp->link_params.ext_phy_config);
7736                         break;
7737
7738                 default:
7739                         BNX2X_ERR("NVRAM config error. "
7740                                   "BAD XGXS ext_phy_config 0x%x\n",
7741                                   bp->link_params.ext_phy_config);
7742                         return;
7743                 }
7744
7745                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7746                                            port*0x18);
7747                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7748
7749                 break;
7750
7751         default:
7752                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7753                           bp->port.link_config);
7754                 return;
7755         }
7756         bp->link_params.phy_addr = bp->port.phy_addr;
7757
7758         /* mask what we support according to speed_cap_mask */
7759         if (!(bp->link_params.speed_cap_mask &
7760                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7761                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7762
7763         if (!(bp->link_params.speed_cap_mask &
7764                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7765                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7766
7767         if (!(bp->link_params.speed_cap_mask &
7768                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7769                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7770
7771         if (!(bp->link_params.speed_cap_mask &
7772                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7773                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7774
7775         if (!(bp->link_params.speed_cap_mask &
7776                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7777                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7778                                         SUPPORTED_1000baseT_Full);
7779
7780         if (!(bp->link_params.speed_cap_mask &
7781                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7782                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7783
7784         if (!(bp->link_params.speed_cap_mask &
7785                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7786                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7787
7788         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7789 }
7790
7791 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7792 {
7793         bp->link_params.req_duplex = DUPLEX_FULL;
7794
7795         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7796         case PORT_FEATURE_LINK_SPEED_AUTO:
7797                 if (bp->port.supported & SUPPORTED_Autoneg) {
7798                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7799                         bp->port.advertising = bp->port.supported;
7800                 } else {
7801                         u32 ext_phy_type =
7802                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7803
7804                         if ((ext_phy_type ==
7805                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7806                             (ext_phy_type ==
7807                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7808                                 /* force 10G, no AN */
7809                                 bp->link_params.req_line_speed = SPEED_10000;
7810                                 bp->port.advertising =
7811                                                 (ADVERTISED_10000baseT_Full |
7812                                                  ADVERTISED_FIBRE);
7813                                 break;
7814                         }
7815                         BNX2X_ERR("NVRAM config error. "
7816                                   "Invalid link_config 0x%x"
7817                                   "  Autoneg not supported\n",
7818                                   bp->port.link_config);
7819                         return;
7820                 }
7821                 break;
7822
7823         case PORT_FEATURE_LINK_SPEED_10M_FULL:
7824                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7825                         bp->link_params.req_line_speed = SPEED_10;
7826                         bp->port.advertising = (ADVERTISED_10baseT_Full |
7827                                                 ADVERTISED_TP);
7828                 } else {
7829                         BNX2X_ERR("NVRAM config error. "
7830                                   "Invalid link_config 0x%x"
7831                                   "  speed_cap_mask 0x%x\n",
7832                                   bp->port.link_config,
7833                                   bp->link_params.speed_cap_mask);
7834                         return;
7835                 }
7836                 break;
7837
7838         case PORT_FEATURE_LINK_SPEED_10M_HALF:
7839                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7840                         bp->link_params.req_line_speed = SPEED_10;
7841                         bp->link_params.req_duplex = DUPLEX_HALF;
7842                         bp->port.advertising = (ADVERTISED_10baseT_Half |
7843                                                 ADVERTISED_TP);
7844                 } else {
7845                         BNX2X_ERR("NVRAM config error. "
7846                                   "Invalid link_config 0x%x"
7847                                   "  speed_cap_mask 0x%x\n",
7848                                   bp->port.link_config,
7849                                   bp->link_params.speed_cap_mask);
7850                         return;
7851                 }
7852                 break;
7853
7854         case PORT_FEATURE_LINK_SPEED_100M_FULL:
7855                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7856                         bp->link_params.req_line_speed = SPEED_100;
7857                         bp->port.advertising = (ADVERTISED_100baseT_Full |
7858                                                 ADVERTISED_TP);
7859                 } else {
7860                         BNX2X_ERR("NVRAM config error. "
7861                                   "Invalid link_config 0x%x"
7862                                   "  speed_cap_mask 0x%x\n",
7863                                   bp->port.link_config,
7864                                   bp->link_params.speed_cap_mask);
7865                         return;
7866                 }
7867                 break;
7868
7869         case PORT_FEATURE_LINK_SPEED_100M_HALF:
7870                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7871                         bp->link_params.req_line_speed = SPEED_100;
7872                         bp->link_params.req_duplex = DUPLEX_HALF;
7873                         bp->port.advertising = (ADVERTISED_100baseT_Half |
7874                                                 ADVERTISED_TP);
7875                 } else {
7876                         BNX2X_ERR("NVRAM config error. "
7877                                   "Invalid link_config 0x%x"
7878                                   "  speed_cap_mask 0x%x\n",
7879                                   bp->port.link_config,
7880                                   bp->link_params.speed_cap_mask);
7881                         return;
7882                 }
7883                 break;
7884
7885         case PORT_FEATURE_LINK_SPEED_1G:
7886                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7887                         bp->link_params.req_line_speed = SPEED_1000;
7888                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
7889                                                 ADVERTISED_TP);
7890                 } else {
7891                         BNX2X_ERR("NVRAM config error. "
7892                                   "Invalid link_config 0x%x"
7893                                   "  speed_cap_mask 0x%x\n",
7894                                   bp->port.link_config,
7895                                   bp->link_params.speed_cap_mask);
7896                         return;
7897                 }
7898                 break;
7899
7900         case PORT_FEATURE_LINK_SPEED_2_5G:
7901                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7902                         bp->link_params.req_line_speed = SPEED_2500;
7903                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
7904                                                 ADVERTISED_TP);
7905                 } else {
7906                         BNX2X_ERR("NVRAM config error. "
7907                                   "Invalid link_config 0x%x"
7908                                   "  speed_cap_mask 0x%x\n",
7909                                   bp->port.link_config,
7910                                   bp->link_params.speed_cap_mask);
7911                         return;
7912                 }
7913                 break;
7914
7915         case PORT_FEATURE_LINK_SPEED_10G_CX4:
7916         case PORT_FEATURE_LINK_SPEED_10G_KX4:
7917         case PORT_FEATURE_LINK_SPEED_10G_KR:
7918                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7919                         bp->link_params.req_line_speed = SPEED_10000;
7920                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
7921                                                 ADVERTISED_FIBRE);
7922                 } else {
7923                         BNX2X_ERR("NVRAM config error. "
7924                                   "Invalid link_config 0x%x"
7925                                   "  speed_cap_mask 0x%x\n",
7926                                   bp->port.link_config,
7927                                   bp->link_params.speed_cap_mask);
7928                         return;
7929                 }
7930                 break;
7931
7932         default:
7933                 BNX2X_ERR("NVRAM config error. "
7934                           "BAD link speed link_config 0x%x\n",
7935                           bp->port.link_config);
7936                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7937                 bp->port.advertising = bp->port.supported;
7938                 break;
7939         }
7940
7941         bp->link_params.req_flow_ctrl = (bp->port.link_config &
7942                                          PORT_FEATURE_FLOW_CONTROL_MASK);
7943         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
7944             !(bp->port.supported & SUPPORTED_Autoneg))
7945                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
7946
7947         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
7948                        "  advertising 0x%x\n",
7949                        bp->link_params.req_line_speed,
7950                        bp->link_params.req_duplex,
7951                        bp->link_params.req_flow_ctrl, bp->port.advertising);
7952 }
7953
7954 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7955 {
7956         int port = BP_PORT(bp);
7957         u32 val, val2;
7958         u32 config;
7959
7960         bp->link_params.bp = bp;
7961         bp->link_params.port = port;
7962
7963         bp->link_params.serdes_config =
7964                 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7965         bp->link_params.lane_config =
7966                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7967         bp->link_params.ext_phy_config =
7968                 SHMEM_RD(bp,
7969                          dev_info.port_hw_config[port].external_phy_config);
7970         bp->link_params.speed_cap_mask =
7971                 SHMEM_RD(bp,
7972                          dev_info.port_hw_config[port].speed_capability_mask);
7973
7974         bp->port.link_config =
7975                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7976
7977         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
7978         if (config & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_ENABLED)
7979                 bp->link_params.feature_config_flags |=
7980                                 FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
7981         else
7982                 bp->link_params.feature_config_flags &=
7983                                 ~FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
7984
7985         BNX2X_DEV_INFO("serdes_config 0x%08x  lane_config 0x%08x\n"
7986              KERN_INFO "  ext_phy_config 0x%08x  speed_cap_mask 0x%08x"
7987                        "  link_config 0x%08x\n",
7988                        bp->link_params.serdes_config,
7989                        bp->link_params.lane_config,
7990                        bp->link_params.ext_phy_config,
7991                        bp->link_params.speed_cap_mask, bp->port.link_config);
7992
7993         bp->link_params.switch_cfg = (bp->port.link_config &
7994                                       PORT_FEATURE_CONNECTED_SWITCH_MASK);
7995         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7996
7997         bnx2x_link_settings_requested(bp);
7998
7999         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8000         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8001         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8002         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8003         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8004         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8005         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
8006         bp->dev->dev_addr[5] = (u8)(val & 0xff);
8007         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8008         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8009 }
8010
8011 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8012 {
8013         int func = BP_FUNC(bp);
8014         u32 val, val2;
8015         int rc = 0;
8016
8017         bnx2x_get_common_hwinfo(bp);
8018
8019         bp->e1hov = 0;
8020         bp->e1hmf = 0;
8021         if (CHIP_IS_E1H(bp)) {
8022                 bp->mf_config =
8023                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8024
8025                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8026                        FUNC_MF_CFG_E1HOV_TAG_MASK);
8027                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8028
8029                         bp->e1hov = val;
8030                         bp->e1hmf = 1;
8031                         BNX2X_DEV_INFO("MF mode  E1HOV for func %d is %d "
8032                                        "(0x%04x)\n",
8033                                        func, bp->e1hov, bp->e1hov);
8034                 } else {
8035                         BNX2X_DEV_INFO("Single function mode\n");
8036                         if (BP_E1HVN(bp)) {
8037                                 BNX2X_ERR("!!!  No valid E1HOV for func %d,"
8038                                           "  aborting\n", func);
8039                                 rc = -EPERM;
8040                         }
8041                 }
8042         }
8043
8044         if (!BP_NOMCP(bp)) {
8045                 bnx2x_get_port_hwinfo(bp);
8046
8047                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8048                               DRV_MSG_SEQ_NUMBER_MASK);
8049                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8050         }
8051
8052         if (IS_E1HMF(bp)) {
8053                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8054                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
8055                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8056                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8057                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8058                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8059                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8060                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8061                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
8062                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
8063                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8064                                ETH_ALEN);
8065                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8066                                ETH_ALEN);
8067                 }
8068
8069                 return rc;
8070         }
8071
8072         if (BP_NOMCP(bp)) {
8073                 /* only supposed to happen on emulation/FPGA */
8074                 BNX2X_ERR("warning random MAC workaround active\n");
8075                 random_ether_addr(bp->dev->dev_addr);
8076                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8077         }
8078
8079         return rc;
8080 }
8081
8082 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8083 {
8084         int func = BP_FUNC(bp);
8085         int timer_interval;
8086         int rc;
8087
8088         /* Disable interrupt handling until HW is initialized */
8089         atomic_set(&bp->intr_sem, 1);
8090
8091         mutex_init(&bp->port.phy_mutex);
8092
8093         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8094         INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8095
8096         rc = bnx2x_get_hwinfo(bp);
8097
8098         /* need to reset chip if undi was active */
8099         if (!BP_NOMCP(bp))
8100                 bnx2x_undi_unload(bp);
8101
8102         if (CHIP_REV_IS_FPGA(bp))
8103                 printk(KERN_ERR PFX "FPGA detected\n");
8104
8105         if (BP_NOMCP(bp) && (func == 0))
8106                 printk(KERN_ERR PFX
8107                        "MCP disabled, must load devices in order!\n");
8108
8109         /* Set multi queue mode */
8110         if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8111             ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8112                 printk(KERN_ERR PFX
8113                       "Multi disabled since int_mode requested is not MSI-X\n");
8114                 multi_mode = ETH_RSS_MODE_DISABLED;
8115         }
8116         bp->multi_mode = multi_mode;
8117
8118
8119         /* Set TPA flags */
8120         if (disable_tpa) {
8121                 bp->flags &= ~TPA_ENABLE_FLAG;
8122                 bp->dev->features &= ~NETIF_F_LRO;
8123         } else {
8124                 bp->flags |= TPA_ENABLE_FLAG;
8125                 bp->dev->features |= NETIF_F_LRO;
8126         }
8127
8128
8129         bp->tx_ring_size = MAX_TX_AVAIL;
8130         bp->rx_ring_size = MAX_RX_AVAIL;
8131
8132         bp->rx_csum = 1;
8133
8134         bp->tx_ticks = 50;
8135         bp->rx_ticks = 25;
8136
8137         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8138         bp->current_interval = (poll ? poll : timer_interval);
8139
8140         init_timer(&bp->timer);
8141         bp->timer.expires = jiffies + bp->current_interval;
8142         bp->timer.data = (unsigned long) bp;
8143         bp->timer.function = bnx2x_timer;
8144
8145         return rc;
8146 }
8147
8148 /*
8149  * ethtool service functions
8150  */
8151
8152 /* All ethtool functions called with rtnl_lock */
8153
8154 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8155 {
8156         struct bnx2x *bp = netdev_priv(dev);
8157
8158         cmd->supported = bp->port.supported;
8159         cmd->advertising = bp->port.advertising;
8160
8161         if (netif_carrier_ok(dev)) {
8162                 cmd->speed = bp->link_vars.line_speed;
8163                 cmd->duplex = bp->link_vars.duplex;
8164         } else {
8165                 cmd->speed = bp->link_params.req_line_speed;
8166                 cmd->duplex = bp->link_params.req_duplex;
8167         }
8168         if (IS_E1HMF(bp)) {
8169                 u16 vn_max_rate;
8170
8171                 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8172                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8173                 if (vn_max_rate < cmd->speed)
8174                         cmd->speed = vn_max_rate;
8175         }
8176
8177         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8178                 u32 ext_phy_type =
8179                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8180
8181                 switch (ext_phy_type) {
8182                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8183                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8184                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8185                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8186                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8187                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8188                         cmd->port = PORT_FIBRE;
8189                         break;
8190
8191                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8192                         cmd->port = PORT_TP;
8193                         break;
8194
8195                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8196                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8197                                   bp->link_params.ext_phy_config);
8198                         break;
8199
8200                 default:
8201                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
8202                            bp->link_params.ext_phy_config);
8203                         break;
8204                 }
8205         } else
8206                 cmd->port = PORT_TP;
8207
8208         cmd->phy_address = bp->port.phy_addr;
8209         cmd->transceiver = XCVR_INTERNAL;
8210
8211         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8212                 cmd->autoneg = AUTONEG_ENABLE;
8213         else
8214                 cmd->autoneg = AUTONEG_DISABLE;
8215
8216         cmd->maxtxpkt = 0;
8217         cmd->maxrxpkt = 0;
8218
8219         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8220            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
8221            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
8222            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
8223            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8224            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8225            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8226
8227         return 0;
8228 }
8229
8230 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8231 {
8232         struct bnx2x *bp = netdev_priv(dev);
8233         u32 advertising;
8234
8235         if (IS_E1HMF(bp))
8236                 return 0;
8237
8238         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8239            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
8240            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
8241            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
8242            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8243            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8244            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8245
8246         if (cmd->autoneg == AUTONEG_ENABLE) {
8247                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8248                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8249                         return -EINVAL;
8250                 }
8251
8252                 /* advertise the requested speed and duplex if supported */
8253                 cmd->advertising &= bp->port.supported;
8254
8255                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8256                 bp->link_params.req_duplex = DUPLEX_FULL;
8257                 bp->port.advertising |= (ADVERTISED_Autoneg |
8258                                          cmd->advertising);
8259
8260         } else { /* forced speed */
8261                 /* advertise the requested speed and duplex if supported */
8262                 switch (cmd->speed) {
8263                 case SPEED_10:
8264                         if (cmd->duplex == DUPLEX_FULL) {
8265                                 if (!(bp->port.supported &
8266                                       SUPPORTED_10baseT_Full)) {
8267                                         DP(NETIF_MSG_LINK,
8268                                            "10M full not supported\n");
8269                                         return -EINVAL;
8270                                 }
8271
8272                                 advertising = (ADVERTISED_10baseT_Full |
8273                                                ADVERTISED_TP);
8274                         } else {
8275                                 if (!(bp->port.supported &
8276                                       SUPPORTED_10baseT_Half)) {
8277                                         DP(NETIF_MSG_LINK,
8278                                            "10M half not supported\n");
8279                                         return -EINVAL;
8280                                 }
8281
8282                                 advertising = (ADVERTISED_10baseT_Half |
8283                                                ADVERTISED_TP);
8284                         }
8285                         break;
8286
8287                 case SPEED_100:
8288                         if (cmd->duplex == DUPLEX_FULL) {
8289                                 if (!(bp->port.supported &
8290                                                 SUPPORTED_100baseT_Full)) {
8291                                         DP(NETIF_MSG_LINK,
8292                                            "100M full not supported\n");
8293                                         return -EINVAL;
8294                                 }
8295
8296                                 advertising = (ADVERTISED_100baseT_Full |
8297                                                ADVERTISED_TP);
8298                         } else {
8299                                 if (!(bp->port.supported &
8300                                                 SUPPORTED_100baseT_Half)) {
8301                                         DP(NETIF_MSG_LINK,
8302                                            "100M half not supported\n");
8303                                         return -EINVAL;
8304                                 }
8305
8306                                 advertising = (ADVERTISED_100baseT_Half |
8307                                                ADVERTISED_TP);
8308                         }
8309                         break;
8310
8311                 case SPEED_1000:
8312                         if (cmd->duplex != DUPLEX_FULL) {
8313                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
8314                                 return -EINVAL;
8315                         }
8316
8317                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
8318                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
8319                                 return -EINVAL;
8320                         }
8321
8322                         advertising = (ADVERTISED_1000baseT_Full |
8323                                        ADVERTISED_TP);
8324                         break;
8325
8326                 case SPEED_2500:
8327                         if (cmd->duplex != DUPLEX_FULL) {
8328                                 DP(NETIF_MSG_LINK,
8329                                    "2.5G half not supported\n");
8330                                 return -EINVAL;
8331                         }
8332
8333                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
8334                                 DP(NETIF_MSG_LINK,
8335                                    "2.5G full not supported\n");
8336                                 return -EINVAL;
8337                         }
8338
8339                         advertising = (ADVERTISED_2500baseX_Full |
8340                                        ADVERTISED_TP);
8341                         break;
8342
8343                 case SPEED_10000:
8344                         if (cmd->duplex != DUPLEX_FULL) {
8345                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
8346                                 return -EINVAL;
8347                         }
8348
8349                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
8350                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
8351                                 return -EINVAL;
8352                         }
8353
8354                         advertising = (ADVERTISED_10000baseT_Full |
8355                                        ADVERTISED_FIBRE);
8356                         break;
8357
8358                 default:
8359                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
8360                         return -EINVAL;
8361                 }
8362
8363                 bp->link_params.req_line_speed = cmd->speed;
8364                 bp->link_params.req_duplex = cmd->duplex;
8365                 bp->port.advertising = advertising;
8366         }
8367
8368         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
8369            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
8370            bp->link_params.req_line_speed, bp->link_params.req_duplex,
8371            bp->port.advertising);
8372
8373         if (netif_running(dev)) {
8374                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8375                 bnx2x_link_set(bp);
8376         }
8377
8378         return 0;
8379 }
8380
8381 #define PHY_FW_VER_LEN                  10
8382
8383 static void bnx2x_get_drvinfo(struct net_device *dev,
8384                               struct ethtool_drvinfo *info)
8385 {
8386         struct bnx2x *bp = netdev_priv(dev);
8387         u8 phy_fw_ver[PHY_FW_VER_LEN];
8388
8389         strcpy(info->driver, DRV_MODULE_NAME);
8390         strcpy(info->version, DRV_MODULE_VERSION);
8391
8392         phy_fw_ver[0] = '\0';
8393         if (bp->port.pmf) {
8394                 bnx2x_acquire_phy_lock(bp);
8395                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8396                                              (bp->state != BNX2X_STATE_CLOSED),
8397                                              phy_fw_ver, PHY_FW_VER_LEN);
8398                 bnx2x_release_phy_lock(bp);
8399         }
8400
8401         snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8402                  (bp->common.bc_ver & 0xff0000) >> 16,
8403                  (bp->common.bc_ver & 0xff00) >> 8,
8404                  (bp->common.bc_ver & 0xff),
8405                  ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
8406         strcpy(info->bus_info, pci_name(bp->pdev));
8407         info->n_stats = BNX2X_NUM_STATS;
8408         info->testinfo_len = BNX2X_NUM_TESTS;
8409         info->eedump_len = bp->common.flash_size;
8410         info->regdump_len = 0;
8411 }
8412
8413 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8414 {
8415         struct bnx2x *bp = netdev_priv(dev);
8416
8417         if (bp->flags & NO_WOL_FLAG) {
8418                 wol->supported = 0;
8419                 wol->wolopts = 0;
8420         } else {
8421                 wol->supported = WAKE_MAGIC;
8422                 if (bp->wol)
8423                         wol->wolopts = WAKE_MAGIC;
8424                 else
8425                         wol->wolopts = 0;
8426         }
8427         memset(&wol->sopass, 0, sizeof(wol->sopass));
8428 }
8429
8430 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8431 {
8432         struct bnx2x *bp = netdev_priv(dev);
8433
8434         if (wol->wolopts & ~WAKE_MAGIC)
8435                 return -EINVAL;
8436
8437         if (wol->wolopts & WAKE_MAGIC) {
8438                 if (bp->flags & NO_WOL_FLAG)
8439                         return -EINVAL;
8440
8441                 bp->wol = 1;
8442         } else
8443                 bp->wol = 0;
8444
8445         return 0;
8446 }
8447
8448 static u32 bnx2x_get_msglevel(struct net_device *dev)
8449 {
8450         struct bnx2x *bp = netdev_priv(dev);
8451
8452         return bp->msglevel;
8453 }
8454
8455 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8456 {
8457         struct bnx2x *bp = netdev_priv(dev);
8458
8459         if (capable(CAP_NET_ADMIN))
8460                 bp->msglevel = level;
8461 }
8462
8463 static int bnx2x_nway_reset(struct net_device *dev)
8464 {
8465         struct bnx2x *bp = netdev_priv(dev);
8466
8467         if (!bp->port.pmf)
8468                 return 0;
8469
8470         if (netif_running(dev)) {
8471                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8472                 bnx2x_link_set(bp);
8473         }
8474
8475         return 0;
8476 }
8477
8478 static int bnx2x_get_eeprom_len(struct net_device *dev)
8479 {
8480         struct bnx2x *bp = netdev_priv(dev);
8481
8482         return bp->common.flash_size;
8483 }
8484
8485 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8486 {
8487         int port = BP_PORT(bp);
8488         int count, i;
8489         u32 val = 0;
8490
8491         /* adjust timeout for emulation/FPGA */
8492         count = NVRAM_TIMEOUT_COUNT;
8493         if (CHIP_REV_IS_SLOW(bp))
8494                 count *= 100;
8495
8496         /* request access to nvram interface */
8497         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8498                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8499
8500         for (i = 0; i < count*10; i++) {
8501                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8502                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8503                         break;
8504
8505                 udelay(5);
8506         }
8507
8508         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
8509                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
8510                 return -EBUSY;
8511         }
8512
8513         return 0;
8514 }
8515
8516 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8517 {
8518         int port = BP_PORT(bp);
8519         int count, i;
8520         u32 val = 0;
8521
8522         /* adjust timeout for emulation/FPGA */
8523         count = NVRAM_TIMEOUT_COUNT;
8524         if (CHIP_REV_IS_SLOW(bp))
8525                 count *= 100;
8526
8527         /* relinquish nvram interface */
8528         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8529                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8530
8531         for (i = 0; i < count*10; i++) {
8532                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8533                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8534                         break;
8535
8536                 udelay(5);
8537         }
8538
8539         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
8540                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
8541                 return -EBUSY;
8542         }
8543
8544         return 0;
8545 }
8546
8547 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8548 {
8549         u32 val;
8550
8551         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8552
8553         /* enable both bits, even on read */
8554         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8555                (val | MCPR_NVM_ACCESS_ENABLE_EN |
8556                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
8557 }
8558
8559 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8560 {
8561         u32 val;
8562
8563         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8564
8565         /* disable both bits, even after read */
8566         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8567                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8568                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8569 }
8570
8571 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8572                                   u32 cmd_flags)
8573 {
8574         int count, i, rc;
8575         u32 val;
8576
8577         /* build the command word */
8578         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8579
8580         /* need to clear DONE bit separately */
8581         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8582
8583         /* address of the NVRAM to read from */
8584         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8585                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8586
8587         /* issue a read command */
8588         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8589
8590         /* adjust timeout for emulation/FPGA */
8591         count = NVRAM_TIMEOUT_COUNT;
8592         if (CHIP_REV_IS_SLOW(bp))
8593                 count *= 100;
8594
8595         /* wait for completion */
8596         *ret_val = 0;
8597         rc = -EBUSY;
8598         for (i = 0; i < count; i++) {
8599                 udelay(5);
8600                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8601
8602                 if (val & MCPR_NVM_COMMAND_DONE) {
8603                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8604                         /* we read nvram data in cpu order
8605                          * but ethtool sees it as an array of bytes
8606                          * converting to big-endian will do the work */
8607                         val = cpu_to_be32(val);
8608                         *ret_val = val;
8609                         rc = 0;
8610                         break;
8611                 }
8612         }
8613
8614         return rc;
8615 }
8616
8617 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8618                             int buf_size)
8619 {
8620         int rc;
8621         u32 cmd_flags;
8622         u32 val;
8623
8624         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8625                 DP(BNX2X_MSG_NVM,
8626                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8627                    offset, buf_size);
8628                 return -EINVAL;
8629         }
8630
8631         if (offset + buf_size > bp->common.flash_size) {
8632                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8633                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8634                    offset, buf_size, bp->common.flash_size);
8635                 return -EINVAL;
8636         }
8637
8638         /* request access to nvram interface */
8639         rc = bnx2x_acquire_nvram_lock(bp);
8640         if (rc)
8641                 return rc;
8642
8643         /* enable access to nvram interface */
8644         bnx2x_enable_nvram_access(bp);
8645
8646         /* read the first word(s) */
8647         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8648         while ((buf_size > sizeof(u32)) && (rc == 0)) {
8649                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8650                 memcpy(ret_buf, &val, 4);
8651
8652                 /* advance to the next dword */
8653                 offset += sizeof(u32);
8654                 ret_buf += sizeof(u32);
8655                 buf_size -= sizeof(u32);
8656                 cmd_flags = 0;
8657         }
8658
8659         if (rc == 0) {
8660                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8661                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8662                 memcpy(ret_buf, &val, 4);
8663         }
8664
8665         /* disable access to nvram interface */
8666         bnx2x_disable_nvram_access(bp);
8667         bnx2x_release_nvram_lock(bp);
8668
8669         return rc;
8670 }
8671
8672 static int bnx2x_get_eeprom(struct net_device *dev,
8673                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8674 {
8675         struct bnx2x *bp = netdev_priv(dev);
8676         int rc;
8677
8678         if (!netif_running(dev))
8679                 return -EAGAIN;
8680
8681         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8682            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8683            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8684            eeprom->len, eeprom->len);
8685
8686         /* parameters already validated in ethtool_get_eeprom */
8687
8688         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8689
8690         return rc;
8691 }
8692
8693 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8694                                    u32 cmd_flags)
8695 {
8696         int count, i, rc;
8697
8698         /* build the command word */
8699         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8700
8701         /* need to clear DONE bit separately */
8702         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8703
8704         /* write the data */
8705         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8706
8707         /* address of the NVRAM to write to */
8708         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8709                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8710
8711         /* issue the write command */
8712         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8713
8714         /* adjust timeout for emulation/FPGA */
8715         count = NVRAM_TIMEOUT_COUNT;
8716         if (CHIP_REV_IS_SLOW(bp))
8717                 count *= 100;
8718
8719         /* wait for completion */
8720         rc = -EBUSY;
8721         for (i = 0; i < count; i++) {
8722                 udelay(5);
8723                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8724                 if (val & MCPR_NVM_COMMAND_DONE) {
8725                         rc = 0;
8726                         break;
8727                 }
8728         }
8729
8730         return rc;
8731 }
8732
8733 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
8734
8735 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8736                               int buf_size)
8737 {
8738         int rc;
8739         u32 cmd_flags;
8740         u32 align_offset;
8741         u32 val;
8742
8743         if (offset + buf_size > bp->common.flash_size) {
8744                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8745                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8746                    offset, buf_size, bp->common.flash_size);
8747                 return -EINVAL;
8748         }
8749
8750         /* request access to nvram interface */
8751         rc = bnx2x_acquire_nvram_lock(bp);
8752         if (rc)
8753                 return rc;
8754
8755         /* enable access to nvram interface */
8756         bnx2x_enable_nvram_access(bp);
8757
8758         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8759         align_offset = (offset & ~0x03);
8760         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8761
8762         if (rc == 0) {
8763                 val &= ~(0xff << BYTE_OFFSET(offset));
8764                 val |= (*data_buf << BYTE_OFFSET(offset));
8765
8766                 /* nvram data is returned as an array of bytes
8767                  * convert it back to cpu order */
8768                 val = be32_to_cpu(val);
8769
8770                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8771                                              cmd_flags);
8772         }
8773
8774         /* disable access to nvram interface */
8775         bnx2x_disable_nvram_access(bp);
8776         bnx2x_release_nvram_lock(bp);
8777
8778         return rc;
8779 }
8780
8781 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8782                              int buf_size)
8783 {
8784         int rc;
8785         u32 cmd_flags;
8786         u32 val;
8787         u32 written_so_far;
8788
8789         if (buf_size == 1)      /* ethtool */
8790                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8791
8792         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8793                 DP(BNX2X_MSG_NVM,
8794                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8795                    offset, buf_size);
8796                 return -EINVAL;
8797         }
8798
8799         if (offset + buf_size > bp->common.flash_size) {
8800                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8801                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8802                    offset, buf_size, bp->common.flash_size);
8803                 return -EINVAL;
8804         }
8805
8806         /* request access to nvram interface */
8807         rc = bnx2x_acquire_nvram_lock(bp);
8808         if (rc)
8809                 return rc;
8810
8811         /* enable access to nvram interface */
8812         bnx2x_enable_nvram_access(bp);
8813
8814         written_so_far = 0;
8815         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8816         while ((written_so_far < buf_size) && (rc == 0)) {
8817                 if (written_so_far == (buf_size - sizeof(u32)))
8818                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8819                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8820                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8821                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8822                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8823
8824                 memcpy(&val, data_buf, 4);
8825
8826                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8827
8828                 /* advance to the next dword */
8829                 offset += sizeof(u32);
8830                 data_buf += sizeof(u32);
8831                 written_so_far += sizeof(u32);
8832                 cmd_flags = 0;
8833         }
8834
8835         /* disable access to nvram interface */
8836         bnx2x_disable_nvram_access(bp);
8837         bnx2x_release_nvram_lock(bp);
8838
8839         return rc;
8840 }
8841
8842 static int bnx2x_set_eeprom(struct net_device *dev,
8843                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8844 {
8845         struct bnx2x *bp = netdev_priv(dev);
8846         int rc;
8847
8848         if (!netif_running(dev))
8849                 return -EAGAIN;
8850
8851         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8852            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8853            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8854            eeprom->len, eeprom->len);
8855
8856         /* parameters already validated in ethtool_set_eeprom */
8857
8858         /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8859         if (eeprom->magic == 0x00504859)
8860                 if (bp->port.pmf) {
8861
8862                         bnx2x_acquire_phy_lock(bp);
8863                         rc = bnx2x_flash_download(bp, BP_PORT(bp),
8864                                              bp->link_params.ext_phy_config,
8865                                              (bp->state != BNX2X_STATE_CLOSED),
8866                                              eebuf, eeprom->len);
8867                         if ((bp->state == BNX2X_STATE_OPEN) ||
8868                             (bp->state == BNX2X_STATE_DISABLED)) {
8869                                 rc |= bnx2x_link_reset(&bp->link_params,
8870                                                        &bp->link_vars, 1);
8871                                 rc |= bnx2x_phy_init(&bp->link_params,
8872                                                      &bp->link_vars);
8873                         }
8874                         bnx2x_release_phy_lock(bp);
8875
8876                 } else /* Only the PMF can access the PHY */
8877                         return -EINVAL;
8878         else
8879                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8880
8881         return rc;
8882 }
8883
8884 static int bnx2x_get_coalesce(struct net_device *dev,
8885                               struct ethtool_coalesce *coal)
8886 {
8887         struct bnx2x *bp = netdev_priv(dev);
8888
8889         memset(coal, 0, sizeof(struct ethtool_coalesce));
8890
8891         coal->rx_coalesce_usecs = bp->rx_ticks;
8892         coal->tx_coalesce_usecs = bp->tx_ticks;
8893
8894         return 0;
8895 }
8896
8897 static int bnx2x_set_coalesce(struct net_device *dev,
8898                               struct ethtool_coalesce *coal)
8899 {
8900         struct bnx2x *bp = netdev_priv(dev);
8901
8902         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8903         if (bp->rx_ticks > 3000)
8904                 bp->rx_ticks = 3000;
8905
8906         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8907         if (bp->tx_ticks > 0x3000)
8908                 bp->tx_ticks = 0x3000;
8909
8910         if (netif_running(dev))
8911                 bnx2x_update_coalesce(bp);
8912
8913         return 0;
8914 }
8915
8916 static void bnx2x_get_ringparam(struct net_device *dev,
8917                                 struct ethtool_ringparam *ering)
8918 {
8919         struct bnx2x *bp = netdev_priv(dev);
8920
8921         ering->rx_max_pending = MAX_RX_AVAIL;
8922         ering->rx_mini_max_pending = 0;
8923         ering->rx_jumbo_max_pending = 0;
8924
8925         ering->rx_pending = bp->rx_ring_size;
8926         ering->rx_mini_pending = 0;
8927         ering->rx_jumbo_pending = 0;
8928
8929         ering->tx_max_pending = MAX_TX_AVAIL;
8930         ering->tx_pending = bp->tx_ring_size;
8931 }
8932
8933 static int bnx2x_set_ringparam(struct net_device *dev,
8934                                struct ethtool_ringparam *ering)
8935 {
8936         struct bnx2x *bp = netdev_priv(dev);
8937         int rc = 0;
8938
8939         if ((ering->rx_pending > MAX_RX_AVAIL) ||
8940             (ering->tx_pending > MAX_TX_AVAIL) ||
8941             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8942                 return -EINVAL;
8943
8944         bp->rx_ring_size = ering->rx_pending;
8945         bp->tx_ring_size = ering->tx_pending;
8946
8947         if (netif_running(dev)) {
8948                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8949                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8950         }
8951
8952         return rc;
8953 }
8954
8955 static void bnx2x_get_pauseparam(struct net_device *dev,
8956                                  struct ethtool_pauseparam *epause)
8957 {
8958         struct bnx2x *bp = netdev_priv(dev);
8959
8960         epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8961                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8962
8963         epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8964                             BNX2X_FLOW_CTRL_RX);
8965         epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8966                             BNX2X_FLOW_CTRL_TX);
8967
8968         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8969            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
8970            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8971 }
8972
8973 static int bnx2x_set_pauseparam(struct net_device *dev,
8974                                 struct ethtool_pauseparam *epause)
8975 {
8976         struct bnx2x *bp = netdev_priv(dev);
8977
8978         if (IS_E1HMF(bp))
8979                 return 0;
8980
8981         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8982            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
8983            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8984
8985         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8986
8987         if (epause->rx_pause)
8988                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
8989
8990         if (epause->tx_pause)
8991                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
8992
8993         if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8994                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8995
8996         if (epause->autoneg) {
8997                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8998                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
8999                         return -EINVAL;
9000                 }
9001
9002                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9003                         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9004         }
9005
9006         DP(NETIF_MSG_LINK,
9007            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9008
9009         if (netif_running(dev)) {
9010                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9011                 bnx2x_link_set(bp);
9012         }
9013
9014         return 0;
9015 }
9016
9017 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9018 {
9019         struct bnx2x *bp = netdev_priv(dev);
9020         int changed = 0;
9021         int rc = 0;
9022
9023         /* TPA requires Rx CSUM offloading */
9024         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9025                 if (!(dev->features & NETIF_F_LRO)) {
9026                         dev->features |= NETIF_F_LRO;
9027                         bp->flags |= TPA_ENABLE_FLAG;
9028                         changed = 1;
9029                 }
9030
9031         } else if (dev->features & NETIF_F_LRO) {
9032                 dev->features &= ~NETIF_F_LRO;
9033                 bp->flags &= ~TPA_ENABLE_FLAG;
9034                 changed = 1;
9035         }
9036
9037         if (changed && netif_running(dev)) {
9038                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9039                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9040         }
9041
9042         return rc;
9043 }
9044
9045 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9046 {
9047         struct bnx2x *bp = netdev_priv(dev);
9048
9049         return bp->rx_csum;
9050 }
9051
9052 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9053 {
9054         struct bnx2x *bp = netdev_priv(dev);
9055         int rc = 0;
9056
9057         bp->rx_csum = data;
9058
9059         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9060            TPA'ed packets will be discarded due to wrong TCP CSUM */
9061         if (!data) {
9062                 u32 flags = ethtool_op_get_flags(dev);
9063
9064                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9065         }
9066
9067         return rc;
9068 }
9069
9070 static int bnx2x_set_tso(struct net_device *dev, u32 data)
9071 {
9072         if (data) {
9073                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9074                 dev->features |= NETIF_F_TSO6;
9075         } else {
9076                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
9077                 dev->features &= ~NETIF_F_TSO6;
9078         }
9079
9080         return 0;
9081 }
9082
9083 static const struct {
9084         char string[ETH_GSTRING_LEN];
9085 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
9086         { "register_test (offline)" },
9087         { "memory_test (offline)" },
9088         { "loopback_test (offline)" },
9089         { "nvram_test (online)" },
9090         { "interrupt_test (online)" },
9091         { "link_test (online)" },
9092         { "idle check (online)" }
9093 };
9094
9095 static int bnx2x_self_test_count(struct net_device *dev)
9096 {
9097         return BNX2X_NUM_TESTS;
9098 }
9099
9100 static int bnx2x_test_registers(struct bnx2x *bp)
9101 {
9102         int idx, i, rc = -ENODEV;
9103         u32 wr_val = 0;
9104         int port = BP_PORT(bp);
9105         static const struct {
9106                 u32  offset0;
9107                 u32  offset1;
9108                 u32  mask;
9109         } reg_tbl[] = {
9110 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
9111                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
9112                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
9113                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
9114                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
9115                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
9116                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
9117                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
9118                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
9119                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
9120 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
9121                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
9122                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
9123                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
9124                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
9125                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9126                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
9127                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
9128                 { NIG_REG_EGRESS_MNG0_FIFO,           20, 0xffffffff },
9129                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
9130 /* 20 */        { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
9131                 { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
9132                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
9133                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
9134                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
9135                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
9136                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
9137                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
9138                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
9139                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
9140 /* 30 */        { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
9141                 { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
9142                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
9143                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
9144                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9145                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
9146                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9147                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
9148
9149                 { 0xffffffff, 0, 0x00000000 }
9150         };
9151
9152         if (!netif_running(bp->dev))
9153                 return rc;
9154
9155         /* Repeat the test twice:
9156            First by writing 0x00000000, second by writing 0xffffffff */
9157         for (idx = 0; idx < 2; idx++) {
9158
9159                 switch (idx) {
9160                 case 0:
9161                         wr_val = 0;
9162                         break;
9163                 case 1:
9164                         wr_val = 0xffffffff;
9165                         break;
9166                 }
9167
9168                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9169                         u32 offset, mask, save_val, val;
9170
9171                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9172                         mask = reg_tbl[i].mask;
9173
9174                         save_val = REG_RD(bp, offset);
9175
9176                         REG_WR(bp, offset, wr_val);
9177                         val = REG_RD(bp, offset);
9178
9179                         /* Restore the original register's value */
9180                         REG_WR(bp, offset, save_val);
9181
9182                         /* verify that value is as expected value */
9183                         if ((val & mask) != (wr_val & mask))
9184                                 goto test_reg_exit;
9185                 }
9186         }
9187
9188         rc = 0;
9189
9190 test_reg_exit:
9191         return rc;
9192 }
9193
9194 static int bnx2x_test_memory(struct bnx2x *bp)
9195 {
9196         int i, j, rc = -ENODEV;
9197         u32 val;
9198         static const struct {
9199                 u32 offset;
9200                 int size;
9201         } mem_tbl[] = {
9202                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
9203                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9204                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
9205                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
9206                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
9207                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
9208                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
9209
9210                 { 0xffffffff, 0 }
9211         };
9212         static const struct {
9213                 char *name;
9214                 u32 offset;
9215                 u32 e1_mask;
9216                 u32 e1h_mask;
9217         } prty_tbl[] = {
9218                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
9219                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
9220                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
9221                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
9222                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
9223                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
9224
9225                 { NULL, 0xffffffff, 0, 0 }
9226         };
9227
9228         if (!netif_running(bp->dev))
9229                 return rc;
9230
9231         /* Go through all the memories */
9232         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9233                 for (j = 0; j < mem_tbl[i].size; j++)
9234                         REG_RD(bp, mem_tbl[i].offset + j*4);
9235
9236         /* Check the parity status */
9237         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9238                 val = REG_RD(bp, prty_tbl[i].offset);
9239                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9240                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
9241                         DP(NETIF_MSG_HW,
9242                            "%s is 0x%x\n", prty_tbl[i].name, val);
9243                         goto test_mem_exit;
9244                 }
9245         }
9246
9247         rc = 0;
9248
9249 test_mem_exit:
9250         return rc;
9251 }
9252
9253 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9254 {
9255         int cnt = 1000;
9256
9257         if (link_up)
9258                 while (bnx2x_link_test(bp) && cnt--)
9259                         msleep(10);
9260 }
9261
9262 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9263 {
9264         unsigned int pkt_size, num_pkts, i;
9265         struct sk_buff *skb;
9266         unsigned char *packet;
9267         struct bnx2x_fastpath *fp = &bp->fp[0];
9268         u16 tx_start_idx, tx_idx;
9269         u16 rx_start_idx, rx_idx;
9270         u16 pkt_prod;
9271         struct sw_tx_bd *tx_buf;
9272         struct eth_tx_bd *tx_bd;
9273         dma_addr_t mapping;
9274         union eth_rx_cqe *cqe;
9275         u8 cqe_fp_flags;
9276         struct sw_rx_bd *rx_buf;
9277         u16 len;
9278         int rc = -ENODEV;
9279
9280         if (loopback_mode == BNX2X_MAC_LOOPBACK) {
9281                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
9282                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
9283
9284         } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
9285                 u16 cnt = 1000;
9286                 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
9287                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
9288                 /* wait until link state is restored */
9289                 if (link_up)
9290                         while (cnt-- && bnx2x_test_link(&bp->link_params,
9291                                                         &bp->link_vars))
9292                                 msleep(10);
9293         } else
9294                 return -EINVAL;
9295
9296         pkt_size = 1514;
9297         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9298         if (!skb) {
9299                 rc = -ENOMEM;
9300                 goto test_loopback_exit;
9301         }
9302         packet = skb_put(skb, pkt_size);
9303         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9304         memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9305         for (i = ETH_HLEN; i < pkt_size; i++)
9306                 packet[i] = (unsigned char) (i & 0xff);
9307
9308         num_pkts = 0;
9309         tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9310         rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9311
9312         pkt_prod = fp->tx_pkt_prod++;
9313         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9314         tx_buf->first_bd = fp->tx_bd_prod;
9315         tx_buf->skb = skb;
9316
9317         tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9318         mapping = pci_map_single(bp->pdev, skb->data,
9319                                  skb_headlen(skb), PCI_DMA_TODEVICE);
9320         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9321         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9322         tx_bd->nbd = cpu_to_le16(1);
9323         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9324         tx_bd->vlan = cpu_to_le16(pkt_prod);
9325         tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9326                                        ETH_TX_BD_FLAGS_END_BD);
9327         tx_bd->general_data = ((UNICAST_ADDRESS <<
9328                                 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9329
9330         wmb();
9331
9332         fp->hw_tx_prods->bds_prod =
9333                 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
9334         mb(); /* FW restriction: must not reorder writing nbd and packets */
9335         fp->hw_tx_prods->packets_prod =
9336                 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9337         DOORBELL(bp, FP_IDX(fp), 0);
9338
9339         mmiowb();
9340
9341         num_pkts++;
9342         fp->tx_bd_prod++;
9343         bp->dev->trans_start = jiffies;
9344
9345         udelay(100);
9346
9347         tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9348         if (tx_idx != tx_start_idx + num_pkts)
9349                 goto test_loopback_exit;
9350
9351         rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9352         if (rx_idx != rx_start_idx + num_pkts)
9353                 goto test_loopback_exit;
9354
9355         cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9356         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9357         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9358                 goto test_loopback_rx_exit;
9359
9360         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9361         if (len != pkt_size)
9362                 goto test_loopback_rx_exit;
9363
9364         rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9365         skb = rx_buf->skb;
9366         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9367         for (i = ETH_HLEN; i < pkt_size; i++)
9368                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9369                         goto test_loopback_rx_exit;
9370
9371         rc = 0;
9372
9373 test_loopback_rx_exit:
9374
9375         fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9376         fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9377         fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9378         fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9379
9380         /* Update producers */
9381         bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9382                              fp->rx_sge_prod);
9383
9384 test_loopback_exit:
9385         bp->link_params.loopback_mode = LOOPBACK_NONE;
9386
9387         return rc;
9388 }
9389
9390 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9391 {
9392         int rc = 0;
9393
9394         if (!netif_running(bp->dev))
9395                 return BNX2X_LOOPBACK_FAILED;
9396
9397         bnx2x_netif_stop(bp, 1);
9398         bnx2x_acquire_phy_lock(bp);
9399
9400         if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
9401                 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
9402                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
9403         }
9404
9405         if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
9406                 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
9407                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
9408         }
9409
9410         bnx2x_release_phy_lock(bp);
9411         bnx2x_netif_start(bp);
9412
9413         return rc;
9414 }
9415
9416 #define CRC32_RESIDUAL                  0xdebb20e3
9417
9418 static int bnx2x_test_nvram(struct bnx2x *bp)
9419 {
9420         static const struct {
9421                 int offset;
9422                 int size;
9423         } nvram_tbl[] = {
9424                 {     0,  0x14 }, /* bootstrap */
9425                 {  0x14,  0xec }, /* dir */
9426                 { 0x100, 0x350 }, /* manuf_info */
9427                 { 0x450,  0xf0 }, /* feature_info */
9428                 { 0x640,  0x64 }, /* upgrade_key_info */
9429                 { 0x6a4,  0x64 },
9430                 { 0x708,  0x70 }, /* manuf_key_info */
9431                 { 0x778,  0x70 },
9432                 {     0,     0 }
9433         };
9434         u32 buf[0x350 / 4];
9435         u8 *data = (u8 *)buf;
9436         int i, rc;
9437         u32 magic, csum;
9438
9439         rc = bnx2x_nvram_read(bp, 0, data, 4);
9440         if (rc) {
9441                 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
9442                 goto test_nvram_exit;
9443         }
9444
9445         magic = be32_to_cpu(buf[0]);
9446         if (magic != 0x669955aa) {
9447                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9448                 rc = -ENODEV;
9449                 goto test_nvram_exit;
9450         }
9451
9452         for (i = 0; nvram_tbl[i].size; i++) {
9453
9454                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9455                                       nvram_tbl[i].size);
9456                 if (rc) {
9457                         DP(NETIF_MSG_PROBE,
9458                            "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
9459                         goto test_nvram_exit;
9460                 }
9461
9462                 csum = ether_crc_le(nvram_tbl[i].size, data);
9463                 if (csum != CRC32_RESIDUAL) {
9464                         DP(NETIF_MSG_PROBE,
9465                            "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9466                         rc = -ENODEV;
9467                         goto test_nvram_exit;
9468                 }
9469         }
9470
9471 test_nvram_exit:
9472         return rc;
9473 }
9474
9475 static int bnx2x_test_intr(struct bnx2x *bp)
9476 {
9477         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9478         int i, rc;
9479
9480         if (!netif_running(bp->dev))
9481                 return -ENODEV;
9482
9483         config->hdr.length = 0;
9484         if (CHIP_IS_E1(bp))
9485                 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9486         else
9487                 config->hdr.offset = BP_FUNC(bp);
9488         config->hdr.client_id = BP_CL_ID(bp);
9489         config->hdr.reserved1 = 0;
9490
9491         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9492                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9493                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9494         if (rc == 0) {
9495                 bp->set_mac_pending++;
9496                 for (i = 0; i < 10; i++) {
9497                         if (!bp->set_mac_pending)
9498                                 break;
9499                         msleep_interruptible(10);
9500                 }
9501                 if (i == 10)
9502                         rc = -ENODEV;
9503         }
9504
9505         return rc;
9506 }
9507
9508 static void bnx2x_self_test(struct net_device *dev,
9509                             struct ethtool_test *etest, u64 *buf)
9510 {
9511         struct bnx2x *bp = netdev_priv(dev);
9512
9513         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9514
9515         if (!netif_running(dev))
9516                 return;
9517
9518         /* offline tests are not supported in MF mode */
9519         if (IS_E1HMF(bp))
9520                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9521
9522         if (etest->flags & ETH_TEST_FL_OFFLINE) {
9523                 u8 link_up;
9524
9525                 link_up = bp->link_vars.link_up;
9526                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9527                 bnx2x_nic_load(bp, LOAD_DIAG);
9528                 /* wait until link state is restored */
9529                 bnx2x_wait_for_link(bp, link_up);
9530
9531                 if (bnx2x_test_registers(bp) != 0) {
9532                         buf[0] = 1;
9533                         etest->flags |= ETH_TEST_FL_FAILED;
9534                 }
9535                 if (bnx2x_test_memory(bp) != 0) {
9536                         buf[1] = 1;
9537                         etest->flags |= ETH_TEST_FL_FAILED;
9538                 }
9539                 buf[2] = bnx2x_test_loopback(bp, link_up);
9540                 if (buf[2] != 0)
9541                         etest->flags |= ETH_TEST_FL_FAILED;
9542
9543                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9544                 bnx2x_nic_load(bp, LOAD_NORMAL);
9545                 /* wait until link state is restored */
9546                 bnx2x_wait_for_link(bp, link_up);
9547         }
9548         if (bnx2x_test_nvram(bp) != 0) {
9549                 buf[3] = 1;
9550                 etest->flags |= ETH_TEST_FL_FAILED;
9551         }
9552         if (bnx2x_test_intr(bp) != 0) {
9553                 buf[4] = 1;
9554                 etest->flags |= ETH_TEST_FL_FAILED;
9555         }
9556         if (bp->port.pmf)
9557                 if (bnx2x_link_test(bp) != 0) {
9558                         buf[5] = 1;
9559                         etest->flags |= ETH_TEST_FL_FAILED;
9560                 }
9561
9562 #ifdef BNX2X_EXTRA_DEBUG
9563         bnx2x_panic_dump(bp);
9564 #endif
9565 }
9566
9567 static const struct {
9568         long offset;
9569         int size;
9570         u8 string[ETH_GSTRING_LEN];
9571 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9572 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9573         { Q_STATS_OFFSET32(error_bytes_received_hi),
9574                                                 8, "[%d]: rx_error_bytes" },
9575         { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9576                                                 8, "[%d]: rx_ucast_packets" },
9577         { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9578                                                 8, "[%d]: rx_mcast_packets" },
9579         { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9580                                                 8, "[%d]: rx_bcast_packets" },
9581         { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9582         { Q_STATS_OFFSET32(rx_err_discard_pkt),
9583                                          4, "[%d]: rx_phy_ip_err_discards"},
9584         { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9585                                          4, "[%d]: rx_skb_alloc_discard" },
9586         { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9587
9588 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9589         { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9590                                                         8, "[%d]: tx_packets" }
9591 };
9592
9593 static const struct {
9594         long offset;
9595         int size;
9596         u32 flags;
9597 #define STATS_FLAGS_PORT                1
9598 #define STATS_FLAGS_FUNC                2
9599 #define STATS_FLAGS_BOTH                (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
9600         u8 string[ETH_GSTRING_LEN];
9601 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9602 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9603                                 8, STATS_FLAGS_BOTH, "rx_bytes" },
9604         { STATS_OFFSET32(error_bytes_received_hi),
9605                                 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
9606         { STATS_OFFSET32(total_unicast_packets_received_hi),
9607                                 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
9608         { STATS_OFFSET32(total_multicast_packets_received_hi),
9609                                 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
9610         { STATS_OFFSET32(total_broadcast_packets_received_hi),
9611                                 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
9612         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9613                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9614         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9615                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
9616         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9617                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9618         { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9619                                 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9620 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9621                                 8, STATS_FLAGS_PORT, "rx_fragments" },
9622         { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9623                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
9624         { STATS_OFFSET32(no_buff_discard_hi),
9625                                 8, STATS_FLAGS_BOTH, "rx_discards" },
9626         { STATS_OFFSET32(mac_filter_discard),
9627                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9628         { STATS_OFFSET32(xxoverflow_discard),
9629                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9630         { STATS_OFFSET32(brb_drop_hi),
9631                                 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9632         { STATS_OFFSET32(brb_truncate_hi),
9633                                 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9634         { STATS_OFFSET32(pause_frames_received_hi),
9635                                 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9636         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9637                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9638         { STATS_OFFSET32(nig_timer_max),
9639                         4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9640 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9641                                 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9642         { STATS_OFFSET32(rx_skb_alloc_failed),
9643                                 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9644         { STATS_OFFSET32(hw_csum_err),
9645                                 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9646
9647         { STATS_OFFSET32(total_bytes_transmitted_hi),
9648                                 8, STATS_FLAGS_BOTH, "tx_bytes" },
9649         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9650                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9651         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9652                                 8, STATS_FLAGS_BOTH, "tx_packets" },
9653         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9654                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9655         { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9656                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9657         { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9658                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9659         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9660                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9661 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9662                                 8, STATS_FLAGS_PORT, "tx_deferred" },
9663         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9664                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9665         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9666                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9667         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9668                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9669         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9670                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9671         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9672                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9673         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9674                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9675         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9676                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9677         { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9678                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9679         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9680                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9681 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
9682                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9683         { STATS_OFFSET32(pause_frames_sent_hi),
9684                                 8, STATS_FLAGS_PORT, "tx_pause_frames" }
9685 };
9686
9687 #define IS_PORT_STAT(i) \
9688         ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9689 #define IS_FUNC_STAT(i)         (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9690 #define IS_E1HMF_MODE_STAT(bp) \
9691                         (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
9692
9693 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9694 {
9695         struct bnx2x *bp = netdev_priv(dev);
9696         int i, j, k;
9697
9698         switch (stringset) {
9699         case ETH_SS_STATS:
9700                 if (is_multi(bp)) {
9701                         k = 0;
9702                         for_each_queue(bp, i) {
9703                                 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9704                                         sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9705                                                 bnx2x_q_stats_arr[j].string, i);
9706                                 k += BNX2X_NUM_Q_STATS;
9707                         }
9708                         if (IS_E1HMF_MODE_STAT(bp))
9709                                 break;
9710                         for (j = 0; j < BNX2X_NUM_STATS; j++)
9711                                 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9712                                        bnx2x_stats_arr[j].string);
9713                 } else {
9714                         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9715                                 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9716                                         continue;
9717                                 strcpy(buf + j*ETH_GSTRING_LEN,
9718                                        bnx2x_stats_arr[i].string);
9719                                 j++;
9720                         }
9721                 }
9722                 break;
9723
9724         case ETH_SS_TEST:
9725                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9726                 break;
9727         }
9728 }
9729
9730 static int bnx2x_get_stats_count(struct net_device *dev)
9731 {
9732         struct bnx2x *bp = netdev_priv(dev);
9733         int i, num_stats;
9734
9735         if (is_multi(bp)) {
9736                 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9737                 if (!IS_E1HMF_MODE_STAT(bp))
9738                         num_stats += BNX2X_NUM_STATS;
9739         } else {
9740                 if (IS_E1HMF_MODE_STAT(bp)) {
9741                         num_stats = 0;
9742                         for (i = 0; i < BNX2X_NUM_STATS; i++)
9743                                 if (IS_FUNC_STAT(i))
9744                                         num_stats++;
9745                 } else
9746                         num_stats = BNX2X_NUM_STATS;
9747         }
9748
9749         return num_stats;
9750 }
9751
9752 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9753                                     struct ethtool_stats *stats, u64 *buf)
9754 {
9755         struct bnx2x *bp = netdev_priv(dev);
9756         u32 *hw_stats, *offset;
9757         int i, j, k;
9758
9759         if (is_multi(bp)) {
9760                 k = 0;
9761                 for_each_queue(bp, i) {
9762                         hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9763                         for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9764                                 if (bnx2x_q_stats_arr[j].size == 0) {
9765                                         /* skip this counter */
9766                                         buf[k + j] = 0;
9767                                         continue;
9768                                 }
9769                                 offset = (hw_stats +
9770                                           bnx2x_q_stats_arr[j].offset);
9771                                 if (bnx2x_q_stats_arr[j].size == 4) {
9772                                         /* 4-byte counter */
9773                                         buf[k + j] = (u64) *offset;
9774                                         continue;
9775                                 }
9776                                 /* 8-byte counter */
9777                                 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9778                         }
9779                         k += BNX2X_NUM_Q_STATS;
9780                 }
9781                 if (IS_E1HMF_MODE_STAT(bp))
9782                         return;
9783                 hw_stats = (u32 *)&bp->eth_stats;
9784                 for (j = 0; j < BNX2X_NUM_STATS; j++) {
9785                         if (bnx2x_stats_arr[j].size == 0) {
9786                                 /* skip this counter */
9787                                 buf[k + j] = 0;
9788                                 continue;
9789                         }
9790                         offset = (hw_stats + bnx2x_stats_arr[j].offset);
9791                         if (bnx2x_stats_arr[j].size == 4) {
9792                                 /* 4-byte counter */
9793                                 buf[k + j] = (u64) *offset;
9794                                 continue;
9795                         }
9796                         /* 8-byte counter */
9797                         buf[k + j] = HILO_U64(*offset, *(offset + 1));
9798                 }
9799         } else {
9800                 hw_stats = (u32 *)&bp->eth_stats;
9801                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9802                         if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9803                                 continue;
9804                         if (bnx2x_stats_arr[i].size == 0) {
9805                                 /* skip this counter */
9806                                 buf[j] = 0;
9807                                 j++;
9808                                 continue;
9809                         }
9810                         offset = (hw_stats + bnx2x_stats_arr[i].offset);
9811                         if (bnx2x_stats_arr[i].size == 4) {
9812                                 /* 4-byte counter */
9813                                 buf[j] = (u64) *offset;
9814                                 j++;
9815                                 continue;
9816                         }
9817                         /* 8-byte counter */
9818                         buf[j] = HILO_U64(*offset, *(offset + 1));
9819                         j++;
9820                 }
9821         }
9822 }
9823
9824 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9825 {
9826         struct bnx2x *bp = netdev_priv(dev);
9827         int port = BP_PORT(bp);
9828         int i;
9829
9830         if (!netif_running(dev))
9831                 return 0;
9832
9833         if (!bp->port.pmf)
9834                 return 0;
9835
9836         if (data == 0)
9837                 data = 2;
9838
9839         for (i = 0; i < (data * 2); i++) {
9840                 if ((i % 2) == 0)
9841                         bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9842                                       bp->link_params.hw_led_mode,
9843                                       bp->link_params.chip_id);
9844                 else
9845                         bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9846                                       bp->link_params.hw_led_mode,
9847                                       bp->link_params.chip_id);
9848
9849                 msleep_interruptible(500);
9850                 if (signal_pending(current))
9851                         break;
9852         }
9853
9854         if (bp->link_vars.link_up)
9855                 bnx2x_set_led(bp, port, LED_MODE_OPER,
9856                               bp->link_vars.line_speed,
9857                               bp->link_params.hw_led_mode,
9858                               bp->link_params.chip_id);
9859
9860         return 0;
9861 }
9862
9863 static struct ethtool_ops bnx2x_ethtool_ops = {
9864         .get_settings           = bnx2x_get_settings,
9865         .set_settings           = bnx2x_set_settings,
9866         .get_drvinfo            = bnx2x_get_drvinfo,
9867         .get_wol                = bnx2x_get_wol,
9868         .set_wol                = bnx2x_set_wol,
9869         .get_msglevel           = bnx2x_get_msglevel,
9870         .set_msglevel           = bnx2x_set_msglevel,
9871         .nway_reset             = bnx2x_nway_reset,
9872         .get_link               = ethtool_op_get_link,
9873         .get_eeprom_len         = bnx2x_get_eeprom_len,
9874         .get_eeprom             = bnx2x_get_eeprom,
9875         .set_eeprom             = bnx2x_set_eeprom,
9876         .get_coalesce           = bnx2x_get_coalesce,
9877         .set_coalesce           = bnx2x_set_coalesce,
9878         .get_ringparam          = bnx2x_get_ringparam,
9879         .set_ringparam          = bnx2x_set_ringparam,
9880         .get_pauseparam         = bnx2x_get_pauseparam,
9881         .set_pauseparam         = bnx2x_set_pauseparam,
9882         .get_rx_csum            = bnx2x_get_rx_csum,
9883         .set_rx_csum            = bnx2x_set_rx_csum,
9884         .get_tx_csum            = ethtool_op_get_tx_csum,
9885         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
9886         .set_flags              = bnx2x_set_flags,
9887         .get_flags              = ethtool_op_get_flags,
9888         .get_sg                 = ethtool_op_get_sg,
9889         .set_sg                 = ethtool_op_set_sg,
9890         .get_tso                = ethtool_op_get_tso,
9891         .set_tso                = bnx2x_set_tso,
9892         .self_test_count        = bnx2x_self_test_count,
9893         .self_test              = bnx2x_self_test,
9894         .get_strings            = bnx2x_get_strings,
9895         .phys_id                = bnx2x_phys_id,
9896         .get_stats_count        = bnx2x_get_stats_count,
9897         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
9898 };
9899
9900 /* end of ethtool_ops */
9901
9902 /****************************************************************************
9903 * General service functions
9904 ****************************************************************************/
9905
9906 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9907 {
9908         u16 pmcsr;
9909
9910         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9911
9912         switch (state) {
9913         case PCI_D0:
9914                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9915                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9916                                        PCI_PM_CTRL_PME_STATUS));
9917
9918                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9919                         /* delay required during transition out of D3hot */
9920                         msleep(20);
9921                 break;
9922
9923         case PCI_D3hot:
9924                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9925                 pmcsr |= 3;
9926
9927                 if (bp->wol)
9928                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9929
9930                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9931                                       pmcsr);
9932
9933                 /* No more memory access after this point until
9934                 * device is brought back to D0.
9935                 */
9936                 break;
9937
9938         default:
9939                 return -EINVAL;
9940         }
9941         return 0;
9942 }
9943
9944 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
9945 {
9946         u16 rx_cons_sb;
9947
9948         /* Tell compiler that status block fields can change */
9949         barrier();
9950         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9951         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9952                 rx_cons_sb++;
9953         return (fp->rx_comp_cons != rx_cons_sb);
9954 }
9955
9956 /*
9957  * net_device service functions
9958  */
9959
9960 static int bnx2x_poll(struct napi_struct *napi, int budget)
9961 {
9962         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9963                                                  napi);
9964         struct bnx2x *bp = fp->bp;
9965         int work_done = 0;
9966
9967 #ifdef BNX2X_STOP_ON_ERROR
9968         if (unlikely(bp->panic))
9969                 goto poll_panic;
9970 #endif
9971
9972         prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9973         prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9974         prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9975
9976         bnx2x_update_fpsb_idx(fp);
9977
9978         if (bnx2x_has_tx_work(fp))
9979                 bnx2x_tx_int(fp, budget);
9980
9981         if (bnx2x_has_rx_work(fp))
9982                 work_done = bnx2x_rx_int(fp, budget);
9983         rmb(); /* BNX2X_HAS_WORK() reads the status block */
9984
9985         /* must not complete if we consumed full budget */
9986         if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9987
9988 #ifdef BNX2X_STOP_ON_ERROR
9989 poll_panic:
9990 #endif
9991                 napi_complete(napi);
9992
9993                 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9994                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9995                 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
9996                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9997         }
9998         return work_done;
9999 }
10000
10001
10002 /* we split the first BD into headers and data BDs
10003  * to ease the pain of our fellow microcode engineers
10004  * we use one mapping for both BDs
10005  * So far this has only been observed to happen
10006  * in Other Operating Systems(TM)
10007  */
10008 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10009                                    struct bnx2x_fastpath *fp,
10010                                    struct eth_tx_bd **tx_bd, u16 hlen,
10011                                    u16 bd_prod, int nbd)
10012 {
10013         struct eth_tx_bd *h_tx_bd = *tx_bd;
10014         struct eth_tx_bd *d_tx_bd;
10015         dma_addr_t mapping;
10016         int old_len = le16_to_cpu(h_tx_bd->nbytes);
10017
10018         /* first fix first BD */
10019         h_tx_bd->nbd = cpu_to_le16(nbd);
10020         h_tx_bd->nbytes = cpu_to_le16(hlen);
10021
10022         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10023            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10024            h_tx_bd->addr_lo, h_tx_bd->nbd);
10025
10026         /* now get a new data BD
10027          * (after the pbd) and fill it */
10028         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10029         d_tx_bd = &fp->tx_desc_ring[bd_prod];
10030
10031         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10032                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10033
10034         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10035         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10036         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10037         d_tx_bd->vlan = 0;
10038         /* this marks the BD as one that has no individual mapping
10039          * the FW ignores this flag in a BD not marked start
10040          */
10041         d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10042         DP(NETIF_MSG_TX_QUEUED,
10043            "TSO split data size is %d (%x:%x)\n",
10044            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10045
10046         /* update tx_bd for marking the last BD flag */
10047         *tx_bd = d_tx_bd;
10048
10049         return bd_prod;
10050 }
10051
10052 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10053 {
10054         if (fix > 0)
10055                 csum = (u16) ~csum_fold(csum_sub(csum,
10056                                 csum_partial(t_header - fix, fix, 0)));
10057
10058         else if (fix < 0)
10059                 csum = (u16) ~csum_fold(csum_add(csum,
10060                                 csum_partial(t_header, -fix, 0)));
10061
10062         return swab16(csum);
10063 }
10064
10065 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10066 {
10067         u32 rc;
10068
10069         if (skb->ip_summed != CHECKSUM_PARTIAL)
10070                 rc = XMIT_PLAIN;
10071
10072         else {
10073                 if (skb->protocol == ntohs(ETH_P_IPV6)) {
10074                         rc = XMIT_CSUM_V6;
10075                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10076                                 rc |= XMIT_CSUM_TCP;
10077
10078                 } else {
10079                         rc = XMIT_CSUM_V4;
10080                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10081                                 rc |= XMIT_CSUM_TCP;
10082                 }
10083         }
10084
10085         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10086                 rc |= XMIT_GSO_V4;
10087
10088         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10089                 rc |= XMIT_GSO_V6;
10090
10091         return rc;
10092 }
10093
10094 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10095 /* check if packet requires linearization (packet is too fragmented) */
10096 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10097                              u32 xmit_type)
10098 {
10099         int to_copy = 0;
10100         int hlen = 0;
10101         int first_bd_sz = 0;
10102
10103         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10104         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10105
10106                 if (xmit_type & XMIT_GSO) {
10107                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10108                         /* Check if LSO packet needs to be copied:
10109                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10110                         int wnd_size = MAX_FETCH_BD - 3;
10111                         /* Number of windows to check */
10112                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10113                         int wnd_idx = 0;
10114                         int frag_idx = 0;
10115                         u32 wnd_sum = 0;
10116
10117                         /* Headers length */
10118                         hlen = (int)(skb_transport_header(skb) - skb->data) +
10119                                 tcp_hdrlen(skb);
10120
10121                         /* Amount of data (w/o headers) on linear part of SKB*/
10122                         first_bd_sz = skb_headlen(skb) - hlen;
10123
10124                         wnd_sum  = first_bd_sz;
10125
10126                         /* Calculate the first sum - it's special */
10127                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10128                                 wnd_sum +=
10129                                         skb_shinfo(skb)->frags[frag_idx].size;
10130
10131                         /* If there was data on linear skb data - check it */
10132                         if (first_bd_sz > 0) {
10133                                 if (unlikely(wnd_sum < lso_mss)) {
10134                                         to_copy = 1;
10135                                         goto exit_lbl;
10136                                 }
10137
10138                                 wnd_sum -= first_bd_sz;
10139                         }
10140
10141                         /* Others are easier: run through the frag list and
10142                            check all windows */
10143                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10144                                 wnd_sum +=
10145                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10146
10147                                 if (unlikely(wnd_sum < lso_mss)) {
10148                                         to_copy = 1;
10149                                         break;
10150                                 }
10151                                 wnd_sum -=
10152                                         skb_shinfo(skb)->frags[wnd_idx].size;
10153                         }
10154
10155                 } else {
10156                         /* in non-LSO too fragmented packet should always
10157                            be linearized */
10158                         to_copy = 1;
10159                 }
10160         }
10161
10162 exit_lbl:
10163         if (unlikely(to_copy))
10164                 DP(NETIF_MSG_TX_QUEUED,
10165                    "Linearization IS REQUIRED for %s packet. "
10166                    "num_frags %d  hlen %d  first_bd_sz %d\n",
10167                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10168                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10169
10170         return to_copy;
10171 }
10172 #endif
10173
10174 /* called with netif_tx_lock
10175  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
10176  * netif_wake_queue()
10177  */
10178 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10179 {
10180         struct bnx2x *bp = netdev_priv(dev);
10181         struct bnx2x_fastpath *fp;
10182         struct netdev_queue *txq;
10183         struct sw_tx_bd *tx_buf;
10184         struct eth_tx_bd *tx_bd;
10185         struct eth_tx_parse_bd *pbd = NULL;
10186         u16 pkt_prod, bd_prod;
10187         int nbd, fp_index;
10188         dma_addr_t mapping;
10189         u32 xmit_type = bnx2x_xmit_type(bp, skb);
10190         int vlan_off = (bp->e1hov ? 4 : 0);
10191         int i;
10192         u8 hlen = 0;
10193
10194 #ifdef BNX2X_STOP_ON_ERROR
10195         if (unlikely(bp->panic))
10196                 return NETDEV_TX_BUSY;
10197 #endif
10198
10199         fp_index = skb_get_queue_mapping(skb);
10200         txq = netdev_get_tx_queue(dev, fp_index);
10201
10202         fp = &bp->fp[fp_index];
10203
10204         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
10205                 fp->eth_q_stats.driver_xoff++,
10206                 netif_tx_stop_queue(txq);
10207                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10208                 return NETDEV_TX_BUSY;
10209         }
10210
10211         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
10212            "  gso type %x  xmit_type %x\n",
10213            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10214            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10215
10216 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10217         /* First, check if we need to linearize the skb
10218            (due to FW restrictions) */
10219         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10220                 /* Statistics of linearization */
10221                 bp->lin_cnt++;
10222                 if (skb_linearize(skb) != 0) {
10223                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10224                            "silently dropping this SKB\n");
10225                         dev_kfree_skb_any(skb);
10226                         return NETDEV_TX_OK;
10227                 }
10228         }
10229 #endif
10230
10231         /*
10232         Please read carefully. First we use one BD which we mark as start,
10233         then for TSO or xsum we have a parsing info BD,
10234         and only then we have the rest of the TSO BDs.
10235         (don't forget to mark the last one as last,
10236         and to unmap only AFTER you write to the BD ...)
10237         And above all, all pdb sizes are in words - NOT DWORDS!
10238         */
10239
10240         pkt_prod = fp->tx_pkt_prod++;
10241         bd_prod = TX_BD(fp->tx_bd_prod);
10242
10243         /* get a tx_buf and first BD */
10244         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10245         tx_bd = &fp->tx_desc_ring[bd_prod];
10246
10247         tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10248         tx_bd->general_data = (UNICAST_ADDRESS <<
10249                                ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
10250         /* header nbd */
10251         tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
10252
10253         /* remember the first BD of the packet */
10254         tx_buf->first_bd = fp->tx_bd_prod;
10255         tx_buf->skb = skb;
10256
10257         DP(NETIF_MSG_TX_QUEUED,
10258            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
10259            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10260
10261 #ifdef BCM_VLAN
10262         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10263             (bp->flags & HW_VLAN_TX_FLAG)) {
10264                 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10265                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10266                 vlan_off += 4;
10267         } else
10268 #endif
10269                 tx_bd->vlan = cpu_to_le16(pkt_prod);
10270
10271         if (xmit_type) {
10272                 /* turn on parsing and get a BD */
10273                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10274                 pbd = (void *)&fp->tx_desc_ring[bd_prod];
10275
10276                 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10277         }
10278
10279         if (xmit_type & XMIT_CSUM) {
10280                 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
10281
10282                 /* for now NS flag is not used in Linux */
10283                 pbd->global_data = (hlen |
10284                                     ((skb->protocol == ntohs(ETH_P_8021Q)) <<
10285                                      ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
10286
10287                 pbd->ip_hlen = (skb_transport_header(skb) -
10288                                 skb_network_header(skb)) / 2;
10289
10290                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
10291
10292                 pbd->total_hlen = cpu_to_le16(hlen);
10293                 hlen = hlen*2 - vlan_off;
10294
10295                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10296
10297                 if (xmit_type & XMIT_CSUM_V4)
10298                         tx_bd->bd_flags.as_bitfield |=
10299                                                 ETH_TX_BD_FLAGS_IP_CSUM;
10300                 else
10301                         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10302
10303                 if (xmit_type & XMIT_CSUM_TCP) {
10304                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10305
10306                 } else {
10307                         s8 fix = SKB_CS_OFF(skb); /* signed! */
10308
10309                         pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
10310                         pbd->cs_offset = fix / 2;
10311
10312                         DP(NETIF_MSG_TX_QUEUED,
10313                            "hlen %d  offset %d  fix %d  csum before fix %x\n",
10314                            le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10315                            SKB_CS(skb));
10316
10317                         /* HW bug: fixup the CSUM */
10318                         pbd->tcp_pseudo_csum =
10319                                 bnx2x_csum_fix(skb_transport_header(skb),
10320                                                SKB_CS(skb), fix);
10321
10322                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10323                            pbd->tcp_pseudo_csum);
10324                 }
10325         }
10326
10327         mapping = pci_map_single(bp->pdev, skb->data,
10328                                  skb_headlen(skb), PCI_DMA_TODEVICE);
10329
10330         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10331         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10332         nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
10333         tx_bd->nbd = cpu_to_le16(nbd);
10334         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10335
10336         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
10337            "  nbytes %d  flags %x  vlan %x\n",
10338            tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10339            le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10340            le16_to_cpu(tx_bd->vlan));
10341
10342         if (xmit_type & XMIT_GSO) {
10343
10344                 DP(NETIF_MSG_TX_QUEUED,
10345                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
10346                    skb->len, hlen, skb_headlen(skb),
10347                    skb_shinfo(skb)->gso_size);
10348
10349                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10350
10351                 if (unlikely(skb_headlen(skb) > hlen))
10352                         bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10353                                                  bd_prod, ++nbd);
10354
10355                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10356                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
10357                 pbd->tcp_flags = pbd_tcp_flags(skb);
10358
10359                 if (xmit_type & XMIT_GSO_V4) {
10360                         pbd->ip_id = swab16(ip_hdr(skb)->id);
10361                         pbd->tcp_pseudo_csum =
10362                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10363                                                           ip_hdr(skb)->daddr,
10364                                                           0, IPPROTO_TCP, 0));
10365
10366                 } else
10367                         pbd->tcp_pseudo_csum =
10368                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10369                                                         &ipv6_hdr(skb)->daddr,
10370                                                         0, IPPROTO_TCP, 0));
10371
10372                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10373         }
10374
10375         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10376                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
10377
10378                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10379                 tx_bd = &fp->tx_desc_ring[bd_prod];
10380
10381                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10382                                        frag->size, PCI_DMA_TODEVICE);
10383
10384                 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10385                 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10386                 tx_bd->nbytes = cpu_to_le16(frag->size);
10387                 tx_bd->vlan = cpu_to_le16(pkt_prod);
10388                 tx_bd->bd_flags.as_bitfield = 0;
10389
10390                 DP(NETIF_MSG_TX_QUEUED,
10391                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d  flags %x\n",
10392                    i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10393                    le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
10394         }
10395
10396         /* now at last mark the BD as the last BD */
10397         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10398
10399         DP(NETIF_MSG_TX_QUEUED, "last bd @%p  flags %x\n",
10400            tx_bd, tx_bd->bd_flags.as_bitfield);
10401
10402         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10403
10404         /* now send a tx doorbell, counting the next BD
10405          * if the packet contains or ends with it
10406          */
10407         if (TX_BD_POFF(bd_prod) < nbd)
10408                 nbd++;
10409
10410         if (pbd)
10411                 DP(NETIF_MSG_TX_QUEUED,
10412                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
10413                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
10414                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10415                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
10416                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
10417
10418         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
10419
10420         /*
10421          * Make sure that the BD data is updated before updating the producer
10422          * since FW might read the BD right after the producer is updated.
10423          * This is only applicable for weak-ordered memory model archs such
10424          * as IA-64. The following barrier is also mandatory since FW will
10425          * assumes packets must have BDs.
10426          */
10427         wmb();
10428
10429         fp->hw_tx_prods->bds_prod =
10430                 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
10431         mb(); /* FW restriction: must not reorder writing nbd and packets */
10432         fp->hw_tx_prods->packets_prod =
10433                 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
10434         DOORBELL(bp, FP_IDX(fp), 0);
10435
10436         mmiowb();
10437
10438         fp->tx_bd_prod += nbd;
10439         dev->trans_start = jiffies;
10440
10441         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
10442                 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10443                    if we put Tx into XOFF state. */
10444                 smp_mb();
10445                 netif_tx_stop_queue(txq);
10446                 fp->eth_q_stats.driver_xoff++;
10447                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
10448                         netif_tx_wake_queue(txq);
10449         }
10450         fp->tx_pkt++;
10451
10452         return NETDEV_TX_OK;
10453 }
10454
10455 /* called with rtnl_lock */
10456 static int bnx2x_open(struct net_device *dev)
10457 {
10458         struct bnx2x *bp = netdev_priv(dev);
10459
10460         netif_carrier_off(dev);
10461
10462         bnx2x_set_power_state(bp, PCI_D0);
10463
10464         return bnx2x_nic_load(bp, LOAD_OPEN);
10465 }
10466
10467 /* called with rtnl_lock */
10468 static int bnx2x_close(struct net_device *dev)
10469 {
10470         struct bnx2x *bp = netdev_priv(dev);
10471
10472         /* Unload the driver, release IRQs */
10473         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10474         if (atomic_read(&bp->pdev->enable_cnt) == 1)
10475                 if (!CHIP_REV_IS_SLOW(bp))
10476                         bnx2x_set_power_state(bp, PCI_D3hot);
10477
10478         return 0;
10479 }
10480
10481 /* called with netif_tx_lock from set_multicast */
10482 static void bnx2x_set_rx_mode(struct net_device *dev)
10483 {
10484         struct bnx2x *bp = netdev_priv(dev);
10485         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10486         int port = BP_PORT(bp);
10487
10488         if (bp->state != BNX2X_STATE_OPEN) {
10489                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10490                 return;
10491         }
10492
10493         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10494
10495         if (dev->flags & IFF_PROMISC)
10496                 rx_mode = BNX2X_RX_MODE_PROMISC;
10497
10498         else if ((dev->flags & IFF_ALLMULTI) ||
10499                  ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10500                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10501
10502         else { /* some multicasts */
10503                 if (CHIP_IS_E1(bp)) {
10504                         int i, old, offset;
10505                         struct dev_mc_list *mclist;
10506                         struct mac_configuration_cmd *config =
10507                                                 bnx2x_sp(bp, mcast_config);
10508
10509                         for (i = 0, mclist = dev->mc_list;
10510                              mclist && (i < dev->mc_count);
10511                              i++, mclist = mclist->next) {
10512
10513                                 config->config_table[i].
10514                                         cam_entry.msb_mac_addr =
10515                                         swab16(*(u16 *)&mclist->dmi_addr[0]);
10516                                 config->config_table[i].
10517                                         cam_entry.middle_mac_addr =
10518                                         swab16(*(u16 *)&mclist->dmi_addr[2]);
10519                                 config->config_table[i].
10520                                         cam_entry.lsb_mac_addr =
10521                                         swab16(*(u16 *)&mclist->dmi_addr[4]);
10522                                 config->config_table[i].cam_entry.flags =
10523                                                         cpu_to_le16(port);
10524                                 config->config_table[i].
10525                                         target_table_entry.flags = 0;
10526                                 config->config_table[i].
10527                                         target_table_entry.client_id = 0;
10528                                 config->config_table[i].
10529                                         target_table_entry.vlan_id = 0;
10530
10531                                 DP(NETIF_MSG_IFUP,
10532                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10533                                    config->config_table[i].
10534                                                 cam_entry.msb_mac_addr,
10535                                    config->config_table[i].
10536                                                 cam_entry.middle_mac_addr,
10537                                    config->config_table[i].
10538                                                 cam_entry.lsb_mac_addr);
10539                         }
10540                         old = config->hdr.length;
10541                         if (old > i) {
10542                                 for (; i < old; i++) {
10543                                         if (CAM_IS_INVALID(config->
10544                                                            config_table[i])) {
10545                                                 /* already invalidated */
10546                                                 break;
10547                                         }
10548                                         /* invalidate */
10549                                         CAM_INVALIDATE(config->
10550                                                        config_table[i]);
10551                                 }
10552                         }
10553
10554                         if (CHIP_REV_IS_SLOW(bp))
10555                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10556                         else
10557                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
10558
10559                         config->hdr.length = i;
10560                         config->hdr.offset = offset;
10561                         config->hdr.client_id = bp->fp->cl_id;
10562                         config->hdr.reserved1 = 0;
10563
10564                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10565                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10566                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10567                                       0);
10568                 } else { /* E1H */
10569                         /* Accept one or more multicasts */
10570                         struct dev_mc_list *mclist;
10571                         u32 mc_filter[MC_HASH_SIZE];
10572                         u32 crc, bit, regidx;
10573                         int i;
10574
10575                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10576
10577                         for (i = 0, mclist = dev->mc_list;
10578                              mclist && (i < dev->mc_count);
10579                              i++, mclist = mclist->next) {
10580
10581                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10582                                    mclist->dmi_addr);
10583
10584                                 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10585                                 bit = (crc >> 24) & 0xff;
10586                                 regidx = bit >> 5;
10587                                 bit &= 0x1f;
10588                                 mc_filter[regidx] |= (1 << bit);
10589                         }
10590
10591                         for (i = 0; i < MC_HASH_SIZE; i++)
10592                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10593                                        mc_filter[i]);
10594                 }
10595         }
10596
10597         bp->rx_mode = rx_mode;
10598         bnx2x_set_storm_rx_mode(bp);
10599 }
10600
10601 /* called with rtnl_lock */
10602 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10603 {
10604         struct sockaddr *addr = p;
10605         struct bnx2x *bp = netdev_priv(dev);
10606
10607         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
10608                 return -EINVAL;
10609
10610         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
10611         if (netif_running(dev)) {
10612                 if (CHIP_IS_E1(bp))
10613                         bnx2x_set_mac_addr_e1(bp, 1);
10614                 else
10615                         bnx2x_set_mac_addr_e1h(bp, 1);
10616         }
10617
10618         return 0;
10619 }
10620
10621 /* called with rtnl_lock */
10622 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10623 {
10624         struct mii_ioctl_data *data = if_mii(ifr);
10625         struct bnx2x *bp = netdev_priv(dev);
10626         int port = BP_PORT(bp);
10627         int err;
10628
10629         switch (cmd) {
10630         case SIOCGMIIPHY:
10631                 data->phy_id = bp->port.phy_addr;
10632
10633                 /* fallthrough */
10634
10635         case SIOCGMIIREG: {
10636                 u16 mii_regval;
10637
10638                 if (!netif_running(dev))
10639                         return -EAGAIN;
10640
10641                 mutex_lock(&bp->port.phy_mutex);
10642                 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
10643                                       DEFAULT_PHY_DEV_ADDR,
10644                                       (data->reg_num & 0x1f), &mii_regval);
10645                 data->val_out = mii_regval;
10646                 mutex_unlock(&bp->port.phy_mutex);
10647                 return err;
10648         }
10649
10650         case SIOCSMIIREG:
10651                 if (!capable(CAP_NET_ADMIN))
10652                         return -EPERM;
10653
10654                 if (!netif_running(dev))
10655                         return -EAGAIN;
10656
10657                 mutex_lock(&bp->port.phy_mutex);
10658                 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
10659                                        DEFAULT_PHY_DEV_ADDR,
10660                                        (data->reg_num & 0x1f), data->val_in);
10661                 mutex_unlock(&bp->port.phy_mutex);
10662                 return err;
10663
10664         default:
10665                 /* do nothing */
10666                 break;
10667         }
10668
10669         return -EOPNOTSUPP;
10670 }
10671
10672 /* called with rtnl_lock */
10673 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10674 {
10675         struct bnx2x *bp = netdev_priv(dev);
10676         int rc = 0;
10677
10678         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10679             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10680                 return -EINVAL;
10681
10682         /* This does not race with packet allocation
10683          * because the actual alloc size is
10684          * only updated as part of load
10685          */
10686         dev->mtu = new_mtu;
10687
10688         if (netif_running(dev)) {
10689                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10690                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10691         }
10692
10693         return rc;
10694 }
10695
10696 static void bnx2x_tx_timeout(struct net_device *dev)
10697 {
10698         struct bnx2x *bp = netdev_priv(dev);
10699
10700 #ifdef BNX2X_STOP_ON_ERROR
10701         if (!bp->panic)
10702                 bnx2x_panic();
10703 #endif
10704         /* This allows the netif to be shutdown gracefully before resetting */
10705         schedule_work(&bp->reset_task);
10706 }
10707
10708 #ifdef BCM_VLAN
10709 /* called with rtnl_lock */
10710 static void bnx2x_vlan_rx_register(struct net_device *dev,
10711                                    struct vlan_group *vlgrp)
10712 {
10713         struct bnx2x *bp = netdev_priv(dev);
10714
10715         bp->vlgrp = vlgrp;
10716
10717         /* Set flags according to the required capabilities */
10718         bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10719
10720         if (dev->features & NETIF_F_HW_VLAN_TX)
10721                 bp->flags |= HW_VLAN_TX_FLAG;
10722
10723         if (dev->features & NETIF_F_HW_VLAN_RX)
10724                 bp->flags |= HW_VLAN_RX_FLAG;
10725
10726         if (netif_running(dev))
10727                 bnx2x_set_client_config(bp);
10728 }
10729
10730 #endif
10731
10732 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10733 static void poll_bnx2x(struct net_device *dev)
10734 {
10735         struct bnx2x *bp = netdev_priv(dev);
10736
10737         disable_irq(bp->pdev->irq);
10738         bnx2x_interrupt(bp->pdev->irq, dev);
10739         enable_irq(bp->pdev->irq);
10740 }
10741 #endif
10742
10743 static const struct net_device_ops bnx2x_netdev_ops = {
10744         .ndo_open               = bnx2x_open,
10745         .ndo_stop               = bnx2x_close,
10746         .ndo_start_xmit         = bnx2x_start_xmit,
10747         .ndo_set_multicast_list = bnx2x_set_rx_mode,
10748         .ndo_set_mac_address    = bnx2x_change_mac_addr,
10749         .ndo_validate_addr      = eth_validate_addr,
10750         .ndo_do_ioctl           = bnx2x_ioctl,
10751         .ndo_change_mtu         = bnx2x_change_mtu,
10752         .ndo_tx_timeout         = bnx2x_tx_timeout,
10753 #ifdef BCM_VLAN
10754         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
10755 #endif
10756 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10757         .ndo_poll_controller    = poll_bnx2x,
10758 #endif
10759 };
10760
10761
10762 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10763                                     struct net_device *dev)
10764 {
10765         struct bnx2x *bp;
10766         int rc;
10767
10768         SET_NETDEV_DEV(dev, &pdev->dev);
10769         bp = netdev_priv(dev);
10770
10771         bp->dev = dev;
10772         bp->pdev = pdev;
10773         bp->flags = 0;
10774         bp->func = PCI_FUNC(pdev->devfn);
10775
10776         rc = pci_enable_device(pdev);
10777         if (rc) {
10778                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10779                 goto err_out;
10780         }
10781
10782         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10783                 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10784                        " aborting\n");
10785                 rc = -ENODEV;
10786                 goto err_out_disable;
10787         }
10788
10789         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10790                 printk(KERN_ERR PFX "Cannot find second PCI device"
10791                        " base address, aborting\n");
10792                 rc = -ENODEV;
10793                 goto err_out_disable;
10794         }
10795
10796         if (atomic_read(&pdev->enable_cnt) == 1) {
10797                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10798                 if (rc) {
10799                         printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10800                                " aborting\n");
10801                         goto err_out_disable;
10802                 }
10803
10804                 pci_set_master(pdev);
10805                 pci_save_state(pdev);
10806         }
10807
10808         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10809         if (bp->pm_cap == 0) {
10810                 printk(KERN_ERR PFX "Cannot find power management"
10811                        " capability, aborting\n");
10812                 rc = -EIO;
10813                 goto err_out_release;
10814         }
10815
10816         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10817         if (bp->pcie_cap == 0) {
10818                 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10819                        " aborting\n");
10820                 rc = -EIO;
10821                 goto err_out_release;
10822         }
10823
10824         if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10825                 bp->flags |= USING_DAC_FLAG;
10826                 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10827                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10828                                " failed, aborting\n");
10829                         rc = -EIO;
10830                         goto err_out_release;
10831                 }
10832
10833         } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10834                 printk(KERN_ERR PFX "System does not support DMA,"
10835                        " aborting\n");
10836                 rc = -EIO;
10837                 goto err_out_release;
10838         }
10839
10840         dev->mem_start = pci_resource_start(pdev, 0);
10841         dev->base_addr = dev->mem_start;
10842         dev->mem_end = pci_resource_end(pdev, 0);
10843
10844         dev->irq = pdev->irq;
10845
10846         bp->regview = pci_ioremap_bar(pdev, 0);
10847         if (!bp->regview) {
10848                 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10849                 rc = -ENOMEM;
10850                 goto err_out_release;
10851         }
10852
10853         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10854                                         min_t(u64, BNX2X_DB_SIZE,
10855                                               pci_resource_len(pdev, 2)));
10856         if (!bp->doorbells) {
10857                 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10858                 rc = -ENOMEM;
10859                 goto err_out_unmap;
10860         }
10861
10862         bnx2x_set_power_state(bp, PCI_D0);
10863
10864         /* clean indirect addresses */
10865         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10866                                PCICFG_VENDOR_ID_OFFSET);
10867         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10868         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10869         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10870         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10871
10872         dev->watchdog_timeo = TX_TIMEOUT;
10873
10874         dev->netdev_ops = &bnx2x_netdev_ops;
10875         dev->ethtool_ops = &bnx2x_ethtool_ops;
10876         dev->features |= NETIF_F_SG;
10877         dev->features |= NETIF_F_HW_CSUM;
10878         if (bp->flags & USING_DAC_FLAG)
10879                 dev->features |= NETIF_F_HIGHDMA;
10880 #ifdef BCM_VLAN
10881         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10882         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10883 #endif
10884         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10885         dev->features |= NETIF_F_TSO6;
10886
10887         return 0;
10888
10889 err_out_unmap:
10890         if (bp->regview) {
10891                 iounmap(bp->regview);
10892                 bp->regview = NULL;
10893         }
10894         if (bp->doorbells) {
10895                 iounmap(bp->doorbells);
10896                 bp->doorbells = NULL;
10897         }
10898
10899 err_out_release:
10900         if (atomic_read(&pdev->enable_cnt) == 1)
10901                 pci_release_regions(pdev);
10902
10903 err_out_disable:
10904         pci_disable_device(pdev);
10905         pci_set_drvdata(pdev, NULL);
10906
10907 err_out:
10908         return rc;
10909 }
10910
10911 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10912 {
10913         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10914
10915         val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10916         return val;
10917 }
10918
10919 /* return value of 1=2.5GHz 2=5GHz */
10920 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10921 {
10922         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10923
10924         val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10925         return val;
10926 }
10927
10928 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10929                                     const struct pci_device_id *ent)
10930 {
10931         static int version_printed;
10932         struct net_device *dev = NULL;
10933         struct bnx2x *bp;
10934         int rc;
10935
10936         if (version_printed++ == 0)
10937                 printk(KERN_INFO "%s", version);
10938
10939         /* dev zeroed in init_etherdev */
10940         dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
10941         if (!dev) {
10942                 printk(KERN_ERR PFX "Cannot allocate net device\n");
10943                 return -ENOMEM;
10944         }
10945
10946         bp = netdev_priv(dev);
10947         bp->msglevel = debug;
10948
10949         rc = bnx2x_init_dev(pdev, dev);
10950         if (rc < 0) {
10951                 free_netdev(dev);
10952                 return rc;
10953         }
10954
10955         pci_set_drvdata(pdev, dev);
10956
10957         rc = bnx2x_init_bp(bp);
10958         if (rc)
10959                 goto init_one_exit;
10960
10961         rc = register_netdev(dev);
10962         if (rc) {
10963                 dev_err(&pdev->dev, "Cannot register net device\n");
10964                 goto init_one_exit;
10965         }
10966
10967         printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10968                " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
10969                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10970                bnx2x_get_pcie_width(bp),
10971                (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10972                dev->base_addr, bp->pdev->irq);
10973         printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
10974         return 0;
10975
10976 init_one_exit:
10977         if (bp->regview)
10978                 iounmap(bp->regview);
10979
10980         if (bp->doorbells)
10981                 iounmap(bp->doorbells);
10982
10983         free_netdev(dev);
10984
10985         if (atomic_read(&pdev->enable_cnt) == 1)
10986                 pci_release_regions(pdev);
10987
10988         pci_disable_device(pdev);
10989         pci_set_drvdata(pdev, NULL);
10990
10991         return rc;
10992 }
10993
10994 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10995 {
10996         struct net_device *dev = pci_get_drvdata(pdev);
10997         struct bnx2x *bp;
10998
10999         if (!dev) {
11000                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11001                 return;
11002         }
11003         bp = netdev_priv(dev);
11004
11005         unregister_netdev(dev);
11006
11007         if (bp->regview)
11008                 iounmap(bp->regview);
11009
11010         if (bp->doorbells)
11011                 iounmap(bp->doorbells);
11012
11013         free_netdev(dev);
11014
11015         if (atomic_read(&pdev->enable_cnt) == 1)
11016                 pci_release_regions(pdev);
11017
11018         pci_disable_device(pdev);
11019         pci_set_drvdata(pdev, NULL);
11020 }
11021
11022 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11023 {
11024         struct net_device *dev = pci_get_drvdata(pdev);
11025         struct bnx2x *bp;
11026
11027         if (!dev) {
11028                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11029                 return -ENODEV;
11030         }
11031         bp = netdev_priv(dev);
11032
11033         rtnl_lock();
11034
11035         pci_save_state(pdev);
11036
11037         if (!netif_running(dev)) {
11038                 rtnl_unlock();
11039                 return 0;
11040         }
11041
11042         netif_device_detach(dev);
11043
11044         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11045
11046         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
11047
11048         rtnl_unlock();
11049
11050         return 0;
11051 }
11052
11053 static int bnx2x_resume(struct pci_dev *pdev)
11054 {
11055         struct net_device *dev = pci_get_drvdata(pdev);
11056         struct bnx2x *bp;
11057         int rc;
11058
11059         if (!dev) {
11060                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11061                 return -ENODEV;
11062         }
11063         bp = netdev_priv(dev);
11064
11065         rtnl_lock();
11066
11067         pci_restore_state(pdev);
11068
11069         if (!netif_running(dev)) {
11070                 rtnl_unlock();
11071                 return 0;
11072         }
11073
11074         bnx2x_set_power_state(bp, PCI_D0);
11075         netif_device_attach(dev);
11076
11077         rc = bnx2x_nic_load(bp, LOAD_OPEN);
11078
11079         rtnl_unlock();
11080
11081         return rc;
11082 }
11083
11084 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11085 {
11086         int i;
11087
11088         bp->state = BNX2X_STATE_ERROR;
11089
11090         bp->rx_mode = BNX2X_RX_MODE_NONE;
11091
11092         bnx2x_netif_stop(bp, 0);
11093
11094         del_timer_sync(&bp->timer);
11095         bp->stats_state = STATS_STATE_DISABLED;
11096         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11097
11098         /* Release IRQs */
11099         bnx2x_free_irq(bp);
11100
11101         if (CHIP_IS_E1(bp)) {
11102                 struct mac_configuration_cmd *config =
11103                                                 bnx2x_sp(bp, mcast_config);
11104
11105                 for (i = 0; i < config->hdr.length; i++)
11106                         CAM_INVALIDATE(config->config_table[i]);
11107         }
11108
11109         /* Free SKBs, SGEs, TPA pool and driver internals */
11110         bnx2x_free_skbs(bp);
11111         for_each_rx_queue(bp, i)
11112                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
11113         for_each_rx_queue(bp, i)
11114                 netif_napi_del(&bnx2x_fp(bp, i, napi));
11115         bnx2x_free_mem(bp);
11116
11117         bp->state = BNX2X_STATE_CLOSED;
11118
11119         netif_carrier_off(bp->dev);
11120
11121         return 0;
11122 }
11123
11124 static void bnx2x_eeh_recover(struct bnx2x *bp)
11125 {
11126         u32 val;
11127
11128         mutex_init(&bp->port.phy_mutex);
11129
11130         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11131         bp->link_params.shmem_base = bp->common.shmem_base;
11132         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11133
11134         if (!bp->common.shmem_base ||
11135             (bp->common.shmem_base < 0xA0000) ||
11136             (bp->common.shmem_base >= 0xC0000)) {
11137                 BNX2X_DEV_INFO("MCP not active\n");
11138                 bp->flags |= NO_MCP_FLAG;
11139                 return;
11140         }
11141
11142         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11143         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11144                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11145                 BNX2X_ERR("BAD MCP validity signature\n");
11146
11147         if (!BP_NOMCP(bp)) {
11148                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11149                               & DRV_MSG_SEQ_NUMBER_MASK);
11150                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11151         }
11152 }
11153
11154 /**
11155  * bnx2x_io_error_detected - called when PCI error is detected
11156  * @pdev: Pointer to PCI device
11157  * @state: The current pci connection state
11158  *
11159  * This function is called after a PCI bus error affecting
11160  * this device has been detected.
11161  */
11162 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11163                                                 pci_channel_state_t state)
11164 {
11165         struct net_device *dev = pci_get_drvdata(pdev);
11166         struct bnx2x *bp = netdev_priv(dev);
11167
11168         rtnl_lock();
11169
11170         netif_device_detach(dev);
11171
11172         if (netif_running(dev))
11173                 bnx2x_eeh_nic_unload(bp);
11174
11175         pci_disable_device(pdev);
11176
11177         rtnl_unlock();
11178
11179         /* Request a slot reset */
11180         return PCI_ERS_RESULT_NEED_RESET;
11181 }
11182
11183 /**
11184  * bnx2x_io_slot_reset - called after the PCI bus has been reset
11185  * @pdev: Pointer to PCI device
11186  *
11187  * Restart the card from scratch, as if from a cold-boot.
11188  */
11189 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11190 {
11191         struct net_device *dev = pci_get_drvdata(pdev);
11192         struct bnx2x *bp = netdev_priv(dev);
11193
11194         rtnl_lock();
11195
11196         if (pci_enable_device(pdev)) {
11197                 dev_err(&pdev->dev,
11198                         "Cannot re-enable PCI device after reset\n");
11199                 rtnl_unlock();
11200                 return PCI_ERS_RESULT_DISCONNECT;
11201         }
11202
11203         pci_set_master(pdev);
11204         pci_restore_state(pdev);
11205
11206         if (netif_running(dev))
11207                 bnx2x_set_power_state(bp, PCI_D0);
11208
11209         rtnl_unlock();
11210
11211         return PCI_ERS_RESULT_RECOVERED;
11212 }
11213
11214 /**
11215  * bnx2x_io_resume - called when traffic can start flowing again
11216  * @pdev: Pointer to PCI device
11217  *
11218  * This callback is called when the error recovery driver tells us that
11219  * its OK to resume normal operation.
11220  */
11221 static void bnx2x_io_resume(struct pci_dev *pdev)
11222 {
11223         struct net_device *dev = pci_get_drvdata(pdev);
11224         struct bnx2x *bp = netdev_priv(dev);
11225
11226         rtnl_lock();
11227
11228         bnx2x_eeh_recover(bp);
11229
11230         if (netif_running(dev))
11231                 bnx2x_nic_load(bp, LOAD_NORMAL);
11232
11233         netif_device_attach(dev);
11234
11235         rtnl_unlock();
11236 }
11237
11238 static struct pci_error_handlers bnx2x_err_handler = {
11239         .error_detected = bnx2x_io_error_detected,
11240         .slot_reset = bnx2x_io_slot_reset,
11241         .resume = bnx2x_io_resume,
11242 };
11243
11244 static struct pci_driver bnx2x_pci_driver = {
11245         .name        = DRV_MODULE_NAME,
11246         .id_table    = bnx2x_pci_tbl,
11247         .probe       = bnx2x_init_one,
11248         .remove      = __devexit_p(bnx2x_remove_one),
11249         .suspend     = bnx2x_suspend,
11250         .resume      = bnx2x_resume,
11251         .err_handler = &bnx2x_err_handler,
11252 };
11253
11254 static int __init bnx2x_init(void)
11255 {
11256         bnx2x_wq = create_singlethread_workqueue("bnx2x");
11257         if (bnx2x_wq == NULL) {
11258                 printk(KERN_ERR PFX "Cannot create workqueue\n");
11259                 return -ENOMEM;
11260         }
11261
11262         return pci_register_driver(&bnx2x_pci_driver);
11263 }
11264
11265 static void __exit bnx2x_cleanup(void)
11266 {
11267         pci_unregister_driver(&bnx2x_pci_driver);
11268
11269         destroy_workqueue(bnx2x_wq);
11270 }
11271
11272 module_init(bnx2x_init);
11273 module_exit(bnx2x_cleanup);
11274