bnx2x: Debug prints
[safe/jmp/linux-2.6] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52
53
54 #include "bnx2x.h"
55 #include "bnx2x_init.h"
56
57 #define DRV_MODULE_VERSION      "1.48.102"
58 #define DRV_MODULE_RELDATE      "2009/02/12"
59 #define BNX2X_BC_VER            0x040200
60
61 /* Time in jiffies before concluding the transmitter is hung */
62 #define TX_TIMEOUT              (5*HZ)
63
64 static char version[] __devinitdata =
65         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
66         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
67
68 MODULE_AUTHOR("Eliezer Tamir");
69 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_MODULE_VERSION);
72
73 static int multi_mode = 1;
74 module_param(multi_mode, int, 0);
75 MODULE_PARM_DESC(multi_mode, " Use per-CPU queues");
76
77 static int disable_tpa;
78 module_param(disable_tpa, int, 0);
79 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
80
81 static int int_mode;
82 module_param(int_mode, int, 0);
83 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
84
85 static int poll;
86 module_param(poll, int, 0);
87 MODULE_PARM_DESC(poll, " Use polling (for debug)");
88
89 static int mrrs = -1;
90 module_param(mrrs, int, 0);
91 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
92
93 static int debug;
94 module_param(debug, int, 0);
95 MODULE_PARM_DESC(debug, " Default debug msglevel");
96
97 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
98
99 static struct workqueue_struct *bnx2x_wq;
100
101 enum bnx2x_board_type {
102         BCM57710 = 0,
103         BCM57711 = 1,
104         BCM57711E = 2,
105 };
106
107 /* indexed by board_type, above */
108 static struct {
109         char *name;
110 } board_info[] __devinitdata = {
111         { "Broadcom NetXtreme II BCM57710 XGb" },
112         { "Broadcom NetXtreme II BCM57711 XGb" },
113         { "Broadcom NetXtreme II BCM57711E XGb" }
114 };
115
116
117 static const struct pci_device_id bnx2x_pci_tbl[] = {
118         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
119                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
120         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
121                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
122         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
123                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
124         { 0 }
125 };
126
127 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
128
129 /****************************************************************************
130 * General service functions
131 ****************************************************************************/
132
133 /* used only at init
134  * locking is done by mcp
135  */
136 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
137 {
138         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
139         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
140         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
141                                PCICFG_VENDOR_ID_OFFSET);
142 }
143
144 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
145 {
146         u32 val;
147
148         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
149         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
150         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
151                                PCICFG_VENDOR_ID_OFFSET);
152
153         return val;
154 }
155
156 static const u32 dmae_reg_go_c[] = {
157         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
158         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
159         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
160         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
161 };
162
163 /* copy command into DMAE command memory and set DMAE command go */
164 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
165                             int idx)
166 {
167         u32 cmd_offset;
168         int i;
169
170         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
171         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
172                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
173
174                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
175                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
176         }
177         REG_WR(bp, dmae_reg_go_c[idx], 1);
178 }
179
180 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
181                       u32 len32)
182 {
183         struct dmae_command *dmae = &bp->init_dmae;
184         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
185         int cnt = 200;
186
187         if (!bp->dmae_ready) {
188                 u32 *data = bnx2x_sp(bp, wb_data[0]);
189
190                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
191                    "  using indirect\n", dst_addr, len32);
192                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
193                 return;
194         }
195
196         mutex_lock(&bp->dmae_mutex);
197
198         memset(dmae, 0, sizeof(struct dmae_command));
199
200         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
201                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
202                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
203 #ifdef __BIG_ENDIAN
204                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
205 #else
206                         DMAE_CMD_ENDIANITY_DW_SWAP |
207 #endif
208                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
209                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
210         dmae->src_addr_lo = U64_LO(dma_addr);
211         dmae->src_addr_hi = U64_HI(dma_addr);
212         dmae->dst_addr_lo = dst_addr >> 2;
213         dmae->dst_addr_hi = 0;
214         dmae->len = len32;
215         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
216         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
217         dmae->comp_val = DMAE_COMP_VAL;
218
219         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
220            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
221                     "dst_addr [%x:%08x (%08x)]\n"
222            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
223            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
224            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
225            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
226         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
227            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
228            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
229
230         *wb_comp = 0;
231
232         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
233
234         udelay(5);
235
236         while (*wb_comp != DMAE_COMP_VAL) {
237                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
238
239                 if (!cnt) {
240                         BNX2X_ERR("DMAE timeout!\n");
241                         break;
242                 }
243                 cnt--;
244                 /* adjust delay for emulation/FPGA */
245                 if (CHIP_REV_IS_SLOW(bp))
246                         msleep(100);
247                 else
248                         udelay(5);
249         }
250
251         mutex_unlock(&bp->dmae_mutex);
252 }
253
254 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
255 {
256         struct dmae_command *dmae = &bp->init_dmae;
257         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
258         int cnt = 200;
259
260         if (!bp->dmae_ready) {
261                 u32 *data = bnx2x_sp(bp, wb_data[0]);
262                 int i;
263
264                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
265                    "  using indirect\n", src_addr, len32);
266                 for (i = 0; i < len32; i++)
267                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
268                 return;
269         }
270
271         mutex_lock(&bp->dmae_mutex);
272
273         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
274         memset(dmae, 0, sizeof(struct dmae_command));
275
276         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
277                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
278                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
279 #ifdef __BIG_ENDIAN
280                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
281 #else
282                         DMAE_CMD_ENDIANITY_DW_SWAP |
283 #endif
284                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
285                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
286         dmae->src_addr_lo = src_addr >> 2;
287         dmae->src_addr_hi = 0;
288         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
289         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
290         dmae->len = len32;
291         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
292         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
293         dmae->comp_val = DMAE_COMP_VAL;
294
295         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
296            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
297                     "dst_addr [%x:%08x (%08x)]\n"
298            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
299            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
300            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
301            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
302
303         *wb_comp = 0;
304
305         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
306
307         udelay(5);
308
309         while (*wb_comp != DMAE_COMP_VAL) {
310
311                 if (!cnt) {
312                         BNX2X_ERR("DMAE timeout!\n");
313                         break;
314                 }
315                 cnt--;
316                 /* adjust delay for emulation/FPGA */
317                 if (CHIP_REV_IS_SLOW(bp))
318                         msleep(100);
319                 else
320                         udelay(5);
321         }
322         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
323            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
324            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
325
326         mutex_unlock(&bp->dmae_mutex);
327 }
328
329 /* used only for slowpath so not inlined */
330 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
331 {
332         u32 wb_write[2];
333
334         wb_write[0] = val_hi;
335         wb_write[1] = val_lo;
336         REG_WR_DMAE(bp, reg, wb_write, 2);
337 }
338
339 #ifdef USE_WB_RD
340 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
341 {
342         u32 wb_data[2];
343
344         REG_RD_DMAE(bp, reg, wb_data, 2);
345
346         return HILO_U64(wb_data[0], wb_data[1]);
347 }
348 #endif
349
350 static int bnx2x_mc_assert(struct bnx2x *bp)
351 {
352         char last_idx;
353         int i, rc = 0;
354         u32 row0, row1, row2, row3;
355
356         /* XSTORM */
357         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
358                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
359         if (last_idx)
360                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
361
362         /* print the asserts */
363         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
364
365                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
366                               XSTORM_ASSERT_LIST_OFFSET(i));
367                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
368                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
369                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
370                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
371                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
372                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
373
374                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
375                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
376                                   " 0x%08x 0x%08x 0x%08x\n",
377                                   i, row3, row2, row1, row0);
378                         rc++;
379                 } else {
380                         break;
381                 }
382         }
383
384         /* TSTORM */
385         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
386                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
387         if (last_idx)
388                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
389
390         /* print the asserts */
391         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
392
393                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
394                               TSTORM_ASSERT_LIST_OFFSET(i));
395                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
396                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
397                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
398                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
399                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
400                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
401
402                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
403                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
404                                   " 0x%08x 0x%08x 0x%08x\n",
405                                   i, row3, row2, row1, row0);
406                         rc++;
407                 } else {
408                         break;
409                 }
410         }
411
412         /* CSTORM */
413         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
414                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
415         if (last_idx)
416                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
417
418         /* print the asserts */
419         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
420
421                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
422                               CSTORM_ASSERT_LIST_OFFSET(i));
423                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
424                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
425                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
426                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
427                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
428                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
429
430                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
431                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
432                                   " 0x%08x 0x%08x 0x%08x\n",
433                                   i, row3, row2, row1, row0);
434                         rc++;
435                 } else {
436                         break;
437                 }
438         }
439
440         /* USTORM */
441         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
442                            USTORM_ASSERT_LIST_INDEX_OFFSET);
443         if (last_idx)
444                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
445
446         /* print the asserts */
447         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
448
449                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
450                               USTORM_ASSERT_LIST_OFFSET(i));
451                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
452                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
453                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
454                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
455                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
456                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
457
458                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
459                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
460                                   " 0x%08x 0x%08x 0x%08x\n",
461                                   i, row3, row2, row1, row0);
462                         rc++;
463                 } else {
464                         break;
465                 }
466         }
467
468         return rc;
469 }
470
471 static void bnx2x_fw_dump(struct bnx2x *bp)
472 {
473         u32 mark, offset;
474         __be32 data[9];
475         int word;
476
477         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
478         mark = ((mark + 0x3) & ~0x3);
479         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
480
481         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
482                 for (word = 0; word < 8; word++)
483                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
484                                                   offset + 4*word));
485                 data[8] = 0x0;
486                 printk(KERN_CONT "%s", (char *)data);
487         }
488         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
489                 for (word = 0; word < 8; word++)
490                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
491                                                   offset + 4*word));
492                 data[8] = 0x0;
493                 printk(KERN_CONT "%s", (char *)data);
494         }
495         printk("\n" KERN_ERR PFX "end of fw dump\n");
496 }
497
498 static void bnx2x_panic_dump(struct bnx2x *bp)
499 {
500         int i;
501         u16 j, start, end;
502
503         bp->stats_state = STATS_STATE_DISABLED;
504         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
505
506         BNX2X_ERR("begin crash dump -----------------\n");
507
508         /* Indices */
509         /* Common */
510         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
511                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
512                   "  spq_prod_idx(%u)\n",
513                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
514                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
515
516         /* Rx */
517         for_each_rx_queue(bp, i) {
518                 struct bnx2x_fastpath *fp = &bp->fp[i];
519
520                 BNX2X_ERR("fp%d: rx_bd_prod(%x)  rx_bd_cons(%x)"
521                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
522                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
523                           i, fp->rx_bd_prod, fp->rx_bd_cons,
524                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
525                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
526                 BNX2X_ERR("      rx_sge_prod(%x)  last_max_sge(%x)"
527                           "  fp_u_idx(%x) *sb_u_idx(%x)\n",
528                           fp->rx_sge_prod, fp->last_max_sge,
529                           le16_to_cpu(fp->fp_u_idx),
530                           fp->status_blk->u_status_block.status_block_index);
531         }
532
533         /* Tx */
534         for_each_tx_queue(bp, i) {
535                 struct bnx2x_fastpath *fp = &bp->fp[i];
536                 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
537
538                 BNX2X_ERR("fp%d: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
539                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
540                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
541                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
542                 BNX2X_ERR("      fp_c_idx(%x)  *sb_c_idx(%x)"
543                           "  bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx),
544                           fp->status_blk->c_status_block.status_block_index,
545                           hw_prods->packets_prod, hw_prods->bds_prod);
546         }
547
548         /* Rings */
549         /* Rx */
550         for_each_rx_queue(bp, i) {
551                 struct bnx2x_fastpath *fp = &bp->fp[i];
552
553                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
554                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
555                 for (j = start; j != end; j = RX_BD(j + 1)) {
556                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
557                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
558
559                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
560                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
561                 }
562
563                 start = RX_SGE(fp->rx_sge_prod);
564                 end = RX_SGE(fp->last_max_sge);
565                 for (j = start; j != end; j = RX_SGE(j + 1)) {
566                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
567                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
568
569                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
570                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
571                 }
572
573                 start = RCQ_BD(fp->rx_comp_cons - 10);
574                 end = RCQ_BD(fp->rx_comp_cons + 503);
575                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
576                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
577
578                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
579                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
580                 }
581         }
582
583         /* Tx */
584         for_each_tx_queue(bp, i) {
585                 struct bnx2x_fastpath *fp = &bp->fp[i];
586
587                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
588                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
589                 for (j = start; j != end; j = TX_BD(j + 1)) {
590                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
591
592                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
593                                   i, j, sw_bd->skb, sw_bd->first_bd);
594                 }
595
596                 start = TX_BD(fp->tx_bd_cons - 10);
597                 end = TX_BD(fp->tx_bd_cons + 254);
598                 for (j = start; j != end; j = TX_BD(j + 1)) {
599                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
600
601                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
602                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
603                 }
604         }
605
606         bnx2x_fw_dump(bp);
607         bnx2x_mc_assert(bp);
608         BNX2X_ERR("end crash dump -----------------\n");
609 }
610
611 static void bnx2x_int_enable(struct bnx2x *bp)
612 {
613         int port = BP_PORT(bp);
614         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
615         u32 val = REG_RD(bp, addr);
616         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
617         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
618
619         if (msix) {
620                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
621                          HC_CONFIG_0_REG_INT_LINE_EN_0);
622                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
623                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
624         } else if (msi) {
625                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
626                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
627                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
628                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
629         } else {
630                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
631                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
632                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
633                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
634
635                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
636                    val, port, addr);
637
638                 REG_WR(bp, addr, val);
639
640                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
641         }
642
643         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
644            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
645
646         REG_WR(bp, addr, val);
647
648         if (CHIP_IS_E1H(bp)) {
649                 /* init leading/trailing edge */
650                 if (IS_E1HMF(bp)) {
651                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
652                         if (bp->port.pmf)
653                                 /* enable nig and gpio3 attention */
654                                 val |= 0x1100;
655                 } else
656                         val = 0xffff;
657
658                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
659                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
660         }
661 }
662
663 static void bnx2x_int_disable(struct bnx2x *bp)
664 {
665         int port = BP_PORT(bp);
666         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
667         u32 val = REG_RD(bp, addr);
668
669         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
670                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
671                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
672                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
673
674         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
675            val, port, addr);
676
677         /* flush all outstanding writes */
678         mmiowb();
679
680         REG_WR(bp, addr, val);
681         if (REG_RD(bp, addr) != val)
682                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
683
684 }
685
686 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
687 {
688         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
689         int i, offset;
690
691         /* disable interrupt handling */
692         atomic_inc(&bp->intr_sem);
693         if (disable_hw)
694                 /* prevent the HW from sending interrupts */
695                 bnx2x_int_disable(bp);
696
697         /* make sure all ISRs are done */
698         if (msix) {
699                 synchronize_irq(bp->msix_table[0].vector);
700                 offset = 1;
701                 for_each_queue(bp, i)
702                         synchronize_irq(bp->msix_table[i + offset].vector);
703         } else
704                 synchronize_irq(bp->pdev->irq);
705
706         /* make sure sp_task is not running */
707         cancel_delayed_work(&bp->sp_task);
708         flush_workqueue(bnx2x_wq);
709 }
710
711 /* fast path */
712
713 /*
714  * General service functions
715  */
716
717 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
718                                 u8 storm, u16 index, u8 op, u8 update)
719 {
720         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
721                        COMMAND_REG_INT_ACK);
722         struct igu_ack_register igu_ack;
723
724         igu_ack.status_block_index = index;
725         igu_ack.sb_id_and_flags =
726                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
727                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
728                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
729                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
730
731         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
732            (*(u32 *)&igu_ack), hc_addr);
733         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
734 }
735
736 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
737 {
738         struct host_status_block *fpsb = fp->status_blk;
739         u16 rc = 0;
740
741         barrier(); /* status block is written to by the chip */
742         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
743                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
744                 rc |= 1;
745         }
746         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
747                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
748                 rc |= 2;
749         }
750         return rc;
751 }
752
753 static u16 bnx2x_ack_int(struct bnx2x *bp)
754 {
755         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
756                        COMMAND_REG_SIMD_MASK);
757         u32 result = REG_RD(bp, hc_addr);
758
759         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
760            result, hc_addr);
761
762         return result;
763 }
764
765
766 /*
767  * fast path service functions
768  */
769
770 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
771 {
772         u16 tx_cons_sb;
773
774         /* Tell compiler that status block fields can change */
775         barrier();
776         tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
777         return (fp->tx_pkt_cons != tx_cons_sb);
778 }
779
780 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
781 {
782         /* Tell compiler that consumer and producer can change */
783         barrier();
784         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
785 }
786
787 /* free skb in the packet ring at pos idx
788  * return idx of last bd freed
789  */
790 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
791                              u16 idx)
792 {
793         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
794         struct eth_tx_bd *tx_bd;
795         struct sk_buff *skb = tx_buf->skb;
796         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
797         int nbd;
798
799         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
800            idx, tx_buf, skb);
801
802         /* unmap first bd */
803         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
804         tx_bd = &fp->tx_desc_ring[bd_idx];
805         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
806                          BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
807
808         nbd = le16_to_cpu(tx_bd->nbd) - 1;
809         new_cons = nbd + tx_buf->first_bd;
810 #ifdef BNX2X_STOP_ON_ERROR
811         if (nbd > (MAX_SKB_FRAGS + 2)) {
812                 BNX2X_ERR("BAD nbd!\n");
813                 bnx2x_panic();
814         }
815 #endif
816
817         /* Skip a parse bd and the TSO split header bd
818            since they have no mapping */
819         if (nbd)
820                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
821
822         if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
823                                            ETH_TX_BD_FLAGS_TCP_CSUM |
824                                            ETH_TX_BD_FLAGS_SW_LSO)) {
825                 if (--nbd)
826                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
827                 tx_bd = &fp->tx_desc_ring[bd_idx];
828                 /* is this a TSO split header bd? */
829                 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
830                         if (--nbd)
831                                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
832                 }
833         }
834
835         /* now free frags */
836         while (nbd > 0) {
837
838                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
839                 tx_bd = &fp->tx_desc_ring[bd_idx];
840                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
841                                BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
842                 if (--nbd)
843                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
844         }
845
846         /* release skb */
847         WARN_ON(!skb);
848         dev_kfree_skb(skb);
849         tx_buf->first_bd = 0;
850         tx_buf->skb = NULL;
851
852         return new_cons;
853 }
854
855 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
856 {
857         s16 used;
858         u16 prod;
859         u16 cons;
860
861         barrier(); /* Tell compiler that prod and cons can change */
862         prod = fp->tx_bd_prod;
863         cons = fp->tx_bd_cons;
864
865         /* NUM_TX_RINGS = number of "next-page" entries
866            It will be used as a threshold */
867         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
868
869 #ifdef BNX2X_STOP_ON_ERROR
870         WARN_ON(used < 0);
871         WARN_ON(used > fp->bp->tx_ring_size);
872         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
873 #endif
874
875         return (s16)(fp->bp->tx_ring_size) - used;
876 }
877
878 static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
879 {
880         struct bnx2x *bp = fp->bp;
881         struct netdev_queue *txq;
882         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
883         int done = 0;
884
885 #ifdef BNX2X_STOP_ON_ERROR
886         if (unlikely(bp->panic))
887                 return;
888 #endif
889
890         txq = netdev_get_tx_queue(bp->dev, fp->index);
891         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
892         sw_cons = fp->tx_pkt_cons;
893
894         while (sw_cons != hw_cons) {
895                 u16 pkt_cons;
896
897                 pkt_cons = TX_BD(sw_cons);
898
899                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
900
901                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
902                    hw_cons, sw_cons, pkt_cons);
903
904 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
905                         rmb();
906                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
907                 }
908 */
909                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
910                 sw_cons++;
911                 done++;
912         }
913
914         fp->tx_pkt_cons = sw_cons;
915         fp->tx_bd_cons = bd_cons;
916
917         /* TBD need a thresh? */
918         if (unlikely(netif_tx_queue_stopped(txq))) {
919
920                 __netif_tx_lock(txq, smp_processor_id());
921
922                 /* Need to make the tx_bd_cons update visible to start_xmit()
923                  * before checking for netif_tx_queue_stopped().  Without the
924                  * memory barrier, there is a small possibility that
925                  * start_xmit() will miss it and cause the queue to be stopped
926                  * forever.
927                  */
928                 smp_mb();
929
930                 if ((netif_tx_queue_stopped(txq)) &&
931                     (bp->state == BNX2X_STATE_OPEN) &&
932                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
933                         netif_tx_wake_queue(txq);
934
935                 __netif_tx_unlock(txq);
936         }
937 }
938
939
940 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
941                            union eth_rx_cqe *rr_cqe)
942 {
943         struct bnx2x *bp = fp->bp;
944         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
945         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
946
947         DP(BNX2X_MSG_SP,
948            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
949            fp->index, cid, command, bp->state,
950            rr_cqe->ramrod_cqe.ramrod_type);
951
952         bp->spq_left++;
953
954         if (fp->index) {
955                 switch (command | fp->state) {
956                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
957                                                 BNX2X_FP_STATE_OPENING):
958                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
959                            cid);
960                         fp->state = BNX2X_FP_STATE_OPEN;
961                         break;
962
963                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
964                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
965                            cid);
966                         fp->state = BNX2X_FP_STATE_HALTED;
967                         break;
968
969                 default:
970                         BNX2X_ERR("unexpected MC reply (%d)  "
971                                   "fp->state is %x\n", command, fp->state);
972                         break;
973                 }
974                 mb(); /* force bnx2x_wait_ramrod() to see the change */
975                 return;
976         }
977
978         switch (command | bp->state) {
979         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
980                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
981                 bp->state = BNX2X_STATE_OPEN;
982                 break;
983
984         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
985                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
986                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
987                 fp->state = BNX2X_FP_STATE_HALTED;
988                 break;
989
990         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
991                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
992                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
993                 break;
994
995
996         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
997         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
998                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
999                 bp->set_mac_pending = 0;
1000                 break;
1001
1002         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1003                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1004                 break;
1005
1006         default:
1007                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
1008                           command, bp->state);
1009                 break;
1010         }
1011         mb(); /* force bnx2x_wait_ramrod() to see the change */
1012 }
1013
1014 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1015                                      struct bnx2x_fastpath *fp, u16 index)
1016 {
1017         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1018         struct page *page = sw_buf->page;
1019         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1020
1021         /* Skip "next page" elements */
1022         if (!page)
1023                 return;
1024
1025         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1026                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1027         __free_pages(page, PAGES_PER_SGE_SHIFT);
1028
1029         sw_buf->page = NULL;
1030         sge->addr_hi = 0;
1031         sge->addr_lo = 0;
1032 }
1033
1034 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1035                                            struct bnx2x_fastpath *fp, int last)
1036 {
1037         int i;
1038
1039         for (i = 0; i < last; i++)
1040                 bnx2x_free_rx_sge(bp, fp, i);
1041 }
1042
1043 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1044                                      struct bnx2x_fastpath *fp, u16 index)
1045 {
1046         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1047         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1048         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1049         dma_addr_t mapping;
1050
1051         if (unlikely(page == NULL))
1052                 return -ENOMEM;
1053
1054         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1055                                PCI_DMA_FROMDEVICE);
1056         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1057                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1058                 return -ENOMEM;
1059         }
1060
1061         sw_buf->page = page;
1062         pci_unmap_addr_set(sw_buf, mapping, mapping);
1063
1064         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1065         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1066
1067         return 0;
1068 }
1069
1070 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1071                                      struct bnx2x_fastpath *fp, u16 index)
1072 {
1073         struct sk_buff *skb;
1074         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1075         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1076         dma_addr_t mapping;
1077
1078         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1079         if (unlikely(skb == NULL))
1080                 return -ENOMEM;
1081
1082         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1083                                  PCI_DMA_FROMDEVICE);
1084         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1085                 dev_kfree_skb(skb);
1086                 return -ENOMEM;
1087         }
1088
1089         rx_buf->skb = skb;
1090         pci_unmap_addr_set(rx_buf, mapping, mapping);
1091
1092         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1093         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1094
1095         return 0;
1096 }
1097
1098 /* note that we are not allocating a new skb,
1099  * we are just moving one from cons to prod
1100  * we are not creating a new mapping,
1101  * so there is no need to check for dma_mapping_error().
1102  */
1103 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1104                                struct sk_buff *skb, u16 cons, u16 prod)
1105 {
1106         struct bnx2x *bp = fp->bp;
1107         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1108         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1109         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1110         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1111
1112         pci_dma_sync_single_for_device(bp->pdev,
1113                                        pci_unmap_addr(cons_rx_buf, mapping),
1114                                        RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1115
1116         prod_rx_buf->skb = cons_rx_buf->skb;
1117         pci_unmap_addr_set(prod_rx_buf, mapping,
1118                            pci_unmap_addr(cons_rx_buf, mapping));
1119         *prod_bd = *cons_bd;
1120 }
1121
1122 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1123                                              u16 idx)
1124 {
1125         u16 last_max = fp->last_max_sge;
1126
1127         if (SUB_S16(idx, last_max) > 0)
1128                 fp->last_max_sge = idx;
1129 }
1130
1131 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1132 {
1133         int i, j;
1134
1135         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1136                 int idx = RX_SGE_CNT * i - 1;
1137
1138                 for (j = 0; j < 2; j++) {
1139                         SGE_MASK_CLEAR_BIT(fp, idx);
1140                         idx--;
1141                 }
1142         }
1143 }
1144
1145 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1146                                   struct eth_fast_path_rx_cqe *fp_cqe)
1147 {
1148         struct bnx2x *bp = fp->bp;
1149         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1150                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1151                       SGE_PAGE_SHIFT;
1152         u16 last_max, last_elem, first_elem;
1153         u16 delta = 0;
1154         u16 i;
1155
1156         if (!sge_len)
1157                 return;
1158
1159         /* First mark all used pages */
1160         for (i = 0; i < sge_len; i++)
1161                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1162
1163         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1164            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1165
1166         /* Here we assume that the last SGE index is the biggest */
1167         prefetch((void *)(fp->sge_mask));
1168         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1169
1170         last_max = RX_SGE(fp->last_max_sge);
1171         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1172         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1173
1174         /* If ring is not full */
1175         if (last_elem + 1 != first_elem)
1176                 last_elem++;
1177
1178         /* Now update the prod */
1179         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1180                 if (likely(fp->sge_mask[i]))
1181                         break;
1182
1183                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1184                 delta += RX_SGE_MASK_ELEM_SZ;
1185         }
1186
1187         if (delta > 0) {
1188                 fp->rx_sge_prod += delta;
1189                 /* clear page-end entries */
1190                 bnx2x_clear_sge_mask_next_elems(fp);
1191         }
1192
1193         DP(NETIF_MSG_RX_STATUS,
1194            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1195            fp->last_max_sge, fp->rx_sge_prod);
1196 }
1197
1198 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1199 {
1200         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1201         memset(fp->sge_mask, 0xff,
1202                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1203
1204         /* Clear the two last indices in the page to 1:
1205            these are the indices that correspond to the "next" element,
1206            hence will never be indicated and should be removed from
1207            the calculations. */
1208         bnx2x_clear_sge_mask_next_elems(fp);
1209 }
1210
1211 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1212                             struct sk_buff *skb, u16 cons, u16 prod)
1213 {
1214         struct bnx2x *bp = fp->bp;
1215         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1216         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1217         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1218         dma_addr_t mapping;
1219
1220         /* move empty skb from pool to prod and map it */
1221         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1222         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1223                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1224         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1225
1226         /* move partial skb from cons to pool (don't unmap yet) */
1227         fp->tpa_pool[queue] = *cons_rx_buf;
1228
1229         /* mark bin state as start - print error if current state != stop */
1230         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1231                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1232
1233         fp->tpa_state[queue] = BNX2X_TPA_START;
1234
1235         /* point prod_bd to new skb */
1236         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1237         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1238
1239 #ifdef BNX2X_STOP_ON_ERROR
1240         fp->tpa_queue_used |= (1 << queue);
1241 #ifdef __powerpc64__
1242         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1243 #else
1244         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1245 #endif
1246            fp->tpa_queue_used);
1247 #endif
1248 }
1249
1250 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1251                                struct sk_buff *skb,
1252                                struct eth_fast_path_rx_cqe *fp_cqe,
1253                                u16 cqe_idx)
1254 {
1255         struct sw_rx_page *rx_pg, old_rx_pg;
1256         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1257         u32 i, frag_len, frag_size, pages;
1258         int err;
1259         int j;
1260
1261         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1262         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1263
1264         /* This is needed in order to enable forwarding support */
1265         if (frag_size)
1266                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1267                                                max(frag_size, (u32)len_on_bd));
1268
1269 #ifdef BNX2X_STOP_ON_ERROR
1270         if (pages >
1271             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1272                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1273                           pages, cqe_idx);
1274                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1275                           fp_cqe->pkt_len, len_on_bd);
1276                 bnx2x_panic();
1277                 return -EINVAL;
1278         }
1279 #endif
1280
1281         /* Run through the SGL and compose the fragmented skb */
1282         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1283                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1284
1285                 /* FW gives the indices of the SGE as if the ring is an array
1286                    (meaning that "next" element will consume 2 indices) */
1287                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1288                 rx_pg = &fp->rx_page_ring[sge_idx];
1289                 old_rx_pg = *rx_pg;
1290
1291                 /* If we fail to allocate a substitute page, we simply stop
1292                    where we are and drop the whole packet */
1293                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1294                 if (unlikely(err)) {
1295                         fp->eth_q_stats.rx_skb_alloc_failed++;
1296                         return err;
1297                 }
1298
1299                 /* Unmap the page as we r going to pass it to the stack */
1300                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1301                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1302
1303                 /* Add one frag and update the appropriate fields in the skb */
1304                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1305
1306                 skb->data_len += frag_len;
1307                 skb->truesize += frag_len;
1308                 skb->len += frag_len;
1309
1310                 frag_size -= frag_len;
1311         }
1312
1313         return 0;
1314 }
1315
1316 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1317                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1318                            u16 cqe_idx)
1319 {
1320         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1321         struct sk_buff *skb = rx_buf->skb;
1322         /* alloc new skb */
1323         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1324
1325         /* Unmap skb in the pool anyway, as we are going to change
1326            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1327            fails. */
1328         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1329                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1330
1331         if (likely(new_skb)) {
1332                 /* fix ip xsum and give it to the stack */
1333                 /* (no need to map the new skb) */
1334 #ifdef BCM_VLAN
1335                 int is_vlan_cqe =
1336                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1337                          PARSING_FLAGS_VLAN);
1338                 int is_not_hwaccel_vlan_cqe =
1339                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1340 #endif
1341
1342                 prefetch(skb);
1343                 prefetch(((char *)(skb)) + 128);
1344
1345 #ifdef BNX2X_STOP_ON_ERROR
1346                 if (pad + len > bp->rx_buf_size) {
1347                         BNX2X_ERR("skb_put is about to fail...  "
1348                                   "pad %d  len %d  rx_buf_size %d\n",
1349                                   pad, len, bp->rx_buf_size);
1350                         bnx2x_panic();
1351                         return;
1352                 }
1353 #endif
1354
1355                 skb_reserve(skb, pad);
1356                 skb_put(skb, len);
1357
1358                 skb->protocol = eth_type_trans(skb, bp->dev);
1359                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1360
1361                 {
1362                         struct iphdr *iph;
1363
1364                         iph = (struct iphdr *)skb->data;
1365 #ifdef BCM_VLAN
1366                         /* If there is no Rx VLAN offloading -
1367                            take VLAN tag into an account */
1368                         if (unlikely(is_not_hwaccel_vlan_cqe))
1369                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1370 #endif
1371                         iph->check = 0;
1372                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1373                 }
1374
1375                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1376                                          &cqe->fast_path_cqe, cqe_idx)) {
1377 #ifdef BCM_VLAN
1378                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1379                             (!is_not_hwaccel_vlan_cqe))
1380                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1381                                                 le16_to_cpu(cqe->fast_path_cqe.
1382                                                             vlan_tag));
1383                         else
1384 #endif
1385                                 netif_receive_skb(skb);
1386                 } else {
1387                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1388                            " - dropping packet!\n");
1389                         dev_kfree_skb(skb);
1390                 }
1391
1392
1393                 /* put new skb in bin */
1394                 fp->tpa_pool[queue].skb = new_skb;
1395
1396         } else {
1397                 /* else drop the packet and keep the buffer in the bin */
1398                 DP(NETIF_MSG_RX_STATUS,
1399                    "Failed to allocate new skb - dropping packet!\n");
1400                 fp->eth_q_stats.rx_skb_alloc_failed++;
1401         }
1402
1403         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1404 }
1405
1406 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1407                                         struct bnx2x_fastpath *fp,
1408                                         u16 bd_prod, u16 rx_comp_prod,
1409                                         u16 rx_sge_prod)
1410 {
1411         struct ustorm_eth_rx_producers rx_prods = {0};
1412         int i;
1413
1414         /* Update producers */
1415         rx_prods.bd_prod = bd_prod;
1416         rx_prods.cqe_prod = rx_comp_prod;
1417         rx_prods.sge_prod = rx_sge_prod;
1418
1419         /*
1420          * Make sure that the BD and SGE data is updated before updating the
1421          * producers since FW might read the BD/SGE right after the producer
1422          * is updated.
1423          * This is only applicable for weak-ordered memory model archs such
1424          * as IA-64. The following barrier is also mandatory since FW will
1425          * assumes BDs must have buffers.
1426          */
1427         wmb();
1428
1429         for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1430                 REG_WR(bp, BAR_USTRORM_INTMEM +
1431                        USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1432                        ((u32 *)&rx_prods)[i]);
1433
1434         mmiowb(); /* keep prod updates ordered */
1435
1436         DP(NETIF_MSG_RX_STATUS,
1437            "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
1438            fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1439 }
1440
1441 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1442 {
1443         struct bnx2x *bp = fp->bp;
1444         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1445         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1446         int rx_pkt = 0;
1447
1448 #ifdef BNX2X_STOP_ON_ERROR
1449         if (unlikely(bp->panic))
1450                 return 0;
1451 #endif
1452
1453         /* CQ "next element" is of the size of the regular element,
1454            that's why it's ok here */
1455         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1456         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1457                 hw_comp_cons++;
1458
1459         bd_cons = fp->rx_bd_cons;
1460         bd_prod = fp->rx_bd_prod;
1461         bd_prod_fw = bd_prod;
1462         sw_comp_cons = fp->rx_comp_cons;
1463         sw_comp_prod = fp->rx_comp_prod;
1464
1465         /* Memory barrier necessary as speculative reads of the rx
1466          * buffer can be ahead of the index in the status block
1467          */
1468         rmb();
1469
1470         DP(NETIF_MSG_RX_STATUS,
1471            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1472            fp->index, hw_comp_cons, sw_comp_cons);
1473
1474         while (sw_comp_cons != hw_comp_cons) {
1475                 struct sw_rx_bd *rx_buf = NULL;
1476                 struct sk_buff *skb;
1477                 union eth_rx_cqe *cqe;
1478                 u8 cqe_fp_flags;
1479                 u16 len, pad;
1480
1481                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1482                 bd_prod = RX_BD(bd_prod);
1483                 bd_cons = RX_BD(bd_cons);
1484
1485                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1486                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1487
1488                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1489                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1490                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1491                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1492                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1493                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1494
1495                 /* is this a slowpath msg? */
1496                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1497                         bnx2x_sp_event(fp, cqe);
1498                         goto next_cqe;
1499
1500                 /* this is an rx packet */
1501                 } else {
1502                         rx_buf = &fp->rx_buf_ring[bd_cons];
1503                         skb = rx_buf->skb;
1504                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1505                         pad = cqe->fast_path_cqe.placement_offset;
1506
1507                         /* If CQE is marked both TPA_START and TPA_END
1508                            it is a non-TPA CQE */
1509                         if ((!fp->disable_tpa) &&
1510                             (TPA_TYPE(cqe_fp_flags) !=
1511                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1512                                 u16 queue = cqe->fast_path_cqe.queue_index;
1513
1514                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1515                                         DP(NETIF_MSG_RX_STATUS,
1516                                            "calling tpa_start on queue %d\n",
1517                                            queue);
1518
1519                                         bnx2x_tpa_start(fp, queue, skb,
1520                                                         bd_cons, bd_prod);
1521                                         goto next_rx;
1522                                 }
1523
1524                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1525                                         DP(NETIF_MSG_RX_STATUS,
1526                                            "calling tpa_stop on queue %d\n",
1527                                            queue);
1528
1529                                         if (!BNX2X_RX_SUM_FIX(cqe))
1530                                                 BNX2X_ERR("STOP on none TCP "
1531                                                           "data\n");
1532
1533                                         /* This is a size of the linear data
1534                                            on this skb */
1535                                         len = le16_to_cpu(cqe->fast_path_cqe.
1536                                                                 len_on_bd);
1537                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1538                                                     len, cqe, comp_ring_cons);
1539 #ifdef BNX2X_STOP_ON_ERROR
1540                                         if (bp->panic)
1541                                                 return -EINVAL;
1542 #endif
1543
1544                                         bnx2x_update_sge_prod(fp,
1545                                                         &cqe->fast_path_cqe);
1546                                         goto next_cqe;
1547                                 }
1548                         }
1549
1550                         pci_dma_sync_single_for_device(bp->pdev,
1551                                         pci_unmap_addr(rx_buf, mapping),
1552                                                        pad + RX_COPY_THRESH,
1553                                                        PCI_DMA_FROMDEVICE);
1554                         prefetch(skb);
1555                         prefetch(((char *)(skb)) + 128);
1556
1557                         /* is this an error packet? */
1558                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1559                                 DP(NETIF_MSG_RX_ERR,
1560                                    "ERROR  flags %x  rx packet %u\n",
1561                                    cqe_fp_flags, sw_comp_cons);
1562                                 fp->eth_q_stats.rx_err_discard_pkt++;
1563                                 goto reuse_rx;
1564                         }
1565
1566                         /* Since we don't have a jumbo ring
1567                          * copy small packets if mtu > 1500
1568                          */
1569                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1570                             (len <= RX_COPY_THRESH)) {
1571                                 struct sk_buff *new_skb;
1572
1573                                 new_skb = netdev_alloc_skb(bp->dev,
1574                                                            len + pad);
1575                                 if (new_skb == NULL) {
1576                                         DP(NETIF_MSG_RX_ERR,
1577                                            "ERROR  packet dropped "
1578                                            "because of alloc failure\n");
1579                                         fp->eth_q_stats.rx_skb_alloc_failed++;
1580                                         goto reuse_rx;
1581                                 }
1582
1583                                 /* aligned copy */
1584                                 skb_copy_from_linear_data_offset(skb, pad,
1585                                                     new_skb->data + pad, len);
1586                                 skb_reserve(new_skb, pad);
1587                                 skb_put(new_skb, len);
1588
1589                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1590
1591                                 skb = new_skb;
1592
1593                         } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1594                                 pci_unmap_single(bp->pdev,
1595                                         pci_unmap_addr(rx_buf, mapping),
1596                                                  bp->rx_buf_size,
1597                                                  PCI_DMA_FROMDEVICE);
1598                                 skb_reserve(skb, pad);
1599                                 skb_put(skb, len);
1600
1601                         } else {
1602                                 DP(NETIF_MSG_RX_ERR,
1603                                    "ERROR  packet dropped because "
1604                                    "of alloc failure\n");
1605                                 fp->eth_q_stats.rx_skb_alloc_failed++;
1606 reuse_rx:
1607                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1608                                 goto next_rx;
1609                         }
1610
1611                         skb->protocol = eth_type_trans(skb, bp->dev);
1612
1613                         skb->ip_summed = CHECKSUM_NONE;
1614                         if (bp->rx_csum) {
1615                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1616                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1617                                 else
1618                                         fp->eth_q_stats.hw_csum_err++;
1619                         }
1620                 }
1621
1622                 skb_record_rx_queue(skb, fp->index);
1623 #ifdef BCM_VLAN
1624                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1625                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1626                      PARSING_FLAGS_VLAN))
1627                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1628                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1629                 else
1630 #endif
1631                         netif_receive_skb(skb);
1632
1633
1634 next_rx:
1635                 rx_buf->skb = NULL;
1636
1637                 bd_cons = NEXT_RX_IDX(bd_cons);
1638                 bd_prod = NEXT_RX_IDX(bd_prod);
1639                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1640                 rx_pkt++;
1641 next_cqe:
1642                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1643                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1644
1645                 if (rx_pkt == budget)
1646                         break;
1647         } /* while */
1648
1649         fp->rx_bd_cons = bd_cons;
1650         fp->rx_bd_prod = bd_prod_fw;
1651         fp->rx_comp_cons = sw_comp_cons;
1652         fp->rx_comp_prod = sw_comp_prod;
1653
1654         /* Update producers */
1655         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1656                              fp->rx_sge_prod);
1657
1658         fp->rx_pkt += rx_pkt;
1659         fp->rx_calls++;
1660
1661         return rx_pkt;
1662 }
1663
1664 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1665 {
1666         struct bnx2x_fastpath *fp = fp_cookie;
1667         struct bnx2x *bp = fp->bp;
1668         int index = fp->index;
1669
1670         /* Return here if interrupt is disabled */
1671         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1672                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1673                 return IRQ_HANDLED;
1674         }
1675
1676         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1677            index, fp->sb_id);
1678         bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1679
1680 #ifdef BNX2X_STOP_ON_ERROR
1681         if (unlikely(bp->panic))
1682                 return IRQ_HANDLED;
1683 #endif
1684
1685         prefetch(fp->rx_cons_sb);
1686         prefetch(fp->tx_cons_sb);
1687         prefetch(&fp->status_blk->c_status_block.status_block_index);
1688         prefetch(&fp->status_blk->u_status_block.status_block_index);
1689
1690         napi_schedule(&bnx2x_fp(bp, index, napi));
1691
1692         return IRQ_HANDLED;
1693 }
1694
1695 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1696 {
1697         struct bnx2x *bp = netdev_priv(dev_instance);
1698         u16 status = bnx2x_ack_int(bp);
1699         u16 mask;
1700
1701         /* Return here if interrupt is shared and it's not for us */
1702         if (unlikely(status == 0)) {
1703                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1704                 return IRQ_NONE;
1705         }
1706         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1707
1708         /* Return here if interrupt is disabled */
1709         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1710                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1711                 return IRQ_HANDLED;
1712         }
1713
1714 #ifdef BNX2X_STOP_ON_ERROR
1715         if (unlikely(bp->panic))
1716                 return IRQ_HANDLED;
1717 #endif
1718
1719         mask = 0x2 << bp->fp[0].sb_id;
1720         if (status & mask) {
1721                 struct bnx2x_fastpath *fp = &bp->fp[0];
1722
1723                 prefetch(fp->rx_cons_sb);
1724                 prefetch(fp->tx_cons_sb);
1725                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1726                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1727
1728                 napi_schedule(&bnx2x_fp(bp, 0, napi));
1729
1730                 status &= ~mask;
1731         }
1732
1733
1734         if (unlikely(status & 0x1)) {
1735                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1736
1737                 status &= ~0x1;
1738                 if (!status)
1739                         return IRQ_HANDLED;
1740         }
1741
1742         if (status)
1743                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1744                    status);
1745
1746         return IRQ_HANDLED;
1747 }
1748
1749 /* end of fast path */
1750
1751 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1752
1753 /* Link */
1754
1755 /*
1756  * General service functions
1757  */
1758
1759 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1760 {
1761         u32 lock_status;
1762         u32 resource_bit = (1 << resource);
1763         int func = BP_FUNC(bp);
1764         u32 hw_lock_control_reg;
1765         int cnt;
1766
1767         /* Validating that the resource is within range */
1768         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1769                 DP(NETIF_MSG_HW,
1770                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1771                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1772                 return -EINVAL;
1773         }
1774
1775         if (func <= 5) {
1776                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1777         } else {
1778                 hw_lock_control_reg =
1779                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1780         }
1781
1782         /* Validating that the resource is not already taken */
1783         lock_status = REG_RD(bp, hw_lock_control_reg);
1784         if (lock_status & resource_bit) {
1785                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1786                    lock_status, resource_bit);
1787                 return -EEXIST;
1788         }
1789
1790         /* Try for 5 second every 5ms */
1791         for (cnt = 0; cnt < 1000; cnt++) {
1792                 /* Try to acquire the lock */
1793                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1794                 lock_status = REG_RD(bp, hw_lock_control_reg);
1795                 if (lock_status & resource_bit)
1796                         return 0;
1797
1798                 msleep(5);
1799         }
1800         DP(NETIF_MSG_HW, "Timeout\n");
1801         return -EAGAIN;
1802 }
1803
1804 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1805 {
1806         u32 lock_status;
1807         u32 resource_bit = (1 << resource);
1808         int func = BP_FUNC(bp);
1809         u32 hw_lock_control_reg;
1810
1811         /* Validating that the resource is within range */
1812         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1813                 DP(NETIF_MSG_HW,
1814                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1815                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1816                 return -EINVAL;
1817         }
1818
1819         if (func <= 5) {
1820                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1821         } else {
1822                 hw_lock_control_reg =
1823                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1824         }
1825
1826         /* Validating that the resource is currently taken */
1827         lock_status = REG_RD(bp, hw_lock_control_reg);
1828         if (!(lock_status & resource_bit)) {
1829                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1830                    lock_status, resource_bit);
1831                 return -EFAULT;
1832         }
1833
1834         REG_WR(bp, hw_lock_control_reg, resource_bit);
1835         return 0;
1836 }
1837
1838 /* HW Lock for shared dual port PHYs */
1839 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1840 {
1841         mutex_lock(&bp->port.phy_mutex);
1842
1843         if (bp->port.need_hw_lock)
1844                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1845 }
1846
1847 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1848 {
1849         if (bp->port.need_hw_lock)
1850                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1851
1852         mutex_unlock(&bp->port.phy_mutex);
1853 }
1854
1855 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1856 {
1857         /* The GPIO should be swapped if swap register is set and active */
1858         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1859                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1860         int gpio_shift = gpio_num +
1861                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1862         u32 gpio_mask = (1 << gpio_shift);
1863         u32 gpio_reg;
1864         int value;
1865
1866         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1867                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1868                 return -EINVAL;
1869         }
1870
1871         /* read GPIO value */
1872         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1873
1874         /* get the requested pin value */
1875         if ((gpio_reg & gpio_mask) == gpio_mask)
1876                 value = 1;
1877         else
1878                 value = 0;
1879
1880         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1881
1882         return value;
1883 }
1884
1885 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1886 {
1887         /* The GPIO should be swapped if swap register is set and active */
1888         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1889                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1890         int gpio_shift = gpio_num +
1891                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1892         u32 gpio_mask = (1 << gpio_shift);
1893         u32 gpio_reg;
1894
1895         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1896                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1897                 return -EINVAL;
1898         }
1899
1900         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1901         /* read GPIO and mask except the float bits */
1902         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1903
1904         switch (mode) {
1905         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1906                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1907                    gpio_num, gpio_shift);
1908                 /* clear FLOAT and set CLR */
1909                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1910                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1911                 break;
1912
1913         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1914                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1915                    gpio_num, gpio_shift);
1916                 /* clear FLOAT and set SET */
1917                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1918                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1919                 break;
1920
1921         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1922                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1923                    gpio_num, gpio_shift);
1924                 /* set FLOAT */
1925                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1926                 break;
1927
1928         default:
1929                 break;
1930         }
1931
1932         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1933         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1934
1935         return 0;
1936 }
1937
1938 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1939 {
1940         /* The GPIO should be swapped if swap register is set and active */
1941         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1942                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1943         int gpio_shift = gpio_num +
1944                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1945         u32 gpio_mask = (1 << gpio_shift);
1946         u32 gpio_reg;
1947
1948         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1949                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1950                 return -EINVAL;
1951         }
1952
1953         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1954         /* read GPIO int */
1955         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1956
1957         switch (mode) {
1958         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1959                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1960                                    "output low\n", gpio_num, gpio_shift);
1961                 /* clear SET and set CLR */
1962                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1963                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1964                 break;
1965
1966         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1967                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1968                                    "output high\n", gpio_num, gpio_shift);
1969                 /* clear CLR and set SET */
1970                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1971                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1972                 break;
1973
1974         default:
1975                 break;
1976         }
1977
1978         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1979         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1980
1981         return 0;
1982 }
1983
1984 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1985 {
1986         u32 spio_mask = (1 << spio_num);
1987         u32 spio_reg;
1988
1989         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1990             (spio_num > MISC_REGISTERS_SPIO_7)) {
1991                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1992                 return -EINVAL;
1993         }
1994
1995         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1996         /* read SPIO and mask except the float bits */
1997         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1998
1999         switch (mode) {
2000         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2001                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2002                 /* clear FLOAT and set CLR */
2003                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2004                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2005                 break;
2006
2007         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2008                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2009                 /* clear FLOAT and set SET */
2010                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2011                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2012                 break;
2013
2014         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2015                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2016                 /* set FLOAT */
2017                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2018                 break;
2019
2020         default:
2021                 break;
2022         }
2023
2024         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2025         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2026
2027         return 0;
2028 }
2029
2030 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2031 {
2032         switch (bp->link_vars.ieee_fc &
2033                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2034         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2035                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2036                                           ADVERTISED_Pause);
2037                 break;
2038
2039         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2040                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2041                                          ADVERTISED_Pause);
2042                 break;
2043
2044         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2045                 bp->port.advertising |= ADVERTISED_Asym_Pause;
2046                 break;
2047
2048         default:
2049                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2050                                           ADVERTISED_Pause);
2051                 break;
2052         }
2053 }
2054
2055 static void bnx2x_link_report(struct bnx2x *bp)
2056 {
2057         if (bp->link_vars.link_up) {
2058                 if (bp->state == BNX2X_STATE_OPEN)
2059                         netif_carrier_on(bp->dev);
2060                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2061
2062                 printk("%d Mbps ", bp->link_vars.line_speed);
2063
2064                 if (bp->link_vars.duplex == DUPLEX_FULL)
2065                         printk("full duplex");
2066                 else
2067                         printk("half duplex");
2068
2069                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2070                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2071                                 printk(", receive ");
2072                                 if (bp->link_vars.flow_ctrl &
2073                                     BNX2X_FLOW_CTRL_TX)
2074                                         printk("& transmit ");
2075                         } else {
2076                                 printk(", transmit ");
2077                         }
2078                         printk("flow control ON");
2079                 }
2080                 printk("\n");
2081
2082         } else { /* link_down */
2083                 netif_carrier_off(bp->dev);
2084                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2085         }
2086 }
2087
2088 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2089 {
2090         if (!BP_NOMCP(bp)) {
2091                 u8 rc;
2092
2093                 /* Initialize link parameters structure variables */
2094                 /* It is recommended to turn off RX FC for jumbo frames
2095                    for better performance */
2096                 if (IS_E1HMF(bp))
2097                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2098                 else if (bp->dev->mtu > 5000)
2099                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2100                 else
2101                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2102
2103                 bnx2x_acquire_phy_lock(bp);
2104
2105                 if (load_mode == LOAD_DIAG)
2106                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2107
2108                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2109
2110                 bnx2x_release_phy_lock(bp);
2111
2112                 bnx2x_calc_fc_adv(bp);
2113
2114                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2115                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2116                         bnx2x_link_report(bp);
2117                 }
2118
2119                 return rc;
2120         }
2121         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2122         return -EINVAL;
2123 }
2124
2125 static void bnx2x_link_set(struct bnx2x *bp)
2126 {
2127         if (!BP_NOMCP(bp)) {
2128                 bnx2x_acquire_phy_lock(bp);
2129                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2130                 bnx2x_release_phy_lock(bp);
2131
2132                 bnx2x_calc_fc_adv(bp);
2133         } else
2134                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2135 }
2136
2137 static void bnx2x__link_reset(struct bnx2x *bp)
2138 {
2139         if (!BP_NOMCP(bp)) {
2140                 bnx2x_acquire_phy_lock(bp);
2141                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2142                 bnx2x_release_phy_lock(bp);
2143         } else
2144                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2145 }
2146
2147 static u8 bnx2x_link_test(struct bnx2x *bp)
2148 {
2149         u8 rc;
2150
2151         bnx2x_acquire_phy_lock(bp);
2152         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2153         bnx2x_release_phy_lock(bp);
2154
2155         return rc;
2156 }
2157
2158 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2159 {
2160         u32 r_param = bp->link_vars.line_speed / 8;
2161         u32 fair_periodic_timeout_usec;
2162         u32 t_fair;
2163
2164         memset(&(bp->cmng.rs_vars), 0,
2165                sizeof(struct rate_shaping_vars_per_port));
2166         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2167
2168         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2169         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2170
2171         /* this is the threshold below which no timer arming will occur
2172            1.25 coefficient is for the threshold to be a little bigger
2173            than the real time, to compensate for timer in-accuracy */
2174         bp->cmng.rs_vars.rs_threshold =
2175                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2176
2177         /* resolution of fairness timer */
2178         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2179         /* for 10G it is 1000usec. for 1G it is 10000usec. */
2180         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2181
2182         /* this is the threshold below which we won't arm the timer anymore */
2183         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2184
2185         /* we multiply by 1e3/8 to get bytes/msec.
2186            We don't want the credits to pass a credit
2187            of the t_fair*FAIR_MEM (algorithm resolution) */
2188         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2189         /* since each tick is 4 usec */
2190         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2191 }
2192
2193 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2194 {
2195         struct rate_shaping_vars_per_vn m_rs_vn;
2196         struct fairness_vars_per_vn m_fair_vn;
2197         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2198         u16 vn_min_rate, vn_max_rate;
2199         int i;
2200
2201         /* If function is hidden - set min and max to zeroes */
2202         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2203                 vn_min_rate = 0;
2204                 vn_max_rate = 0;
2205
2206         } else {
2207                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2208                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2209                 /* If fairness is enabled (not all min rates are zeroes) and
2210                    if current min rate is zero - set it to 1.
2211                    This is a requirement of the algorithm. */
2212                 if (bp->vn_weight_sum && (vn_min_rate == 0))
2213                         vn_min_rate = DEF_MIN_RATE;
2214                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2215                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2216         }
2217
2218         DP(NETIF_MSG_IFUP,
2219            "func %d: vn_min_rate=%d  vn_max_rate=%d  vn_weight_sum=%d\n",
2220            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2221
2222         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2223         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2224
2225         /* global vn counter - maximal Mbps for this vn */
2226         m_rs_vn.vn_counter.rate = vn_max_rate;
2227
2228         /* quota - number of bytes transmitted in this period */
2229         m_rs_vn.vn_counter.quota =
2230                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2231
2232         if (bp->vn_weight_sum) {
2233                 /* credit for each period of the fairness algorithm:
2234                    number of bytes in T_FAIR (the vn share the port rate).
2235                    vn_weight_sum should not be larger than 10000, thus
2236                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2237                    than zero */
2238                 m_fair_vn.vn_credit_delta =
2239                         max((u32)(vn_min_rate * (T_FAIR_COEF /
2240                                                  (8 * bp->vn_weight_sum))),
2241                             (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2242                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2243                    m_fair_vn.vn_credit_delta);
2244         }
2245
2246         /* Store it to internal memory */
2247         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2248                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2249                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2250                        ((u32 *)(&m_rs_vn))[i]);
2251
2252         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2253                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2254                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2255                        ((u32 *)(&m_fair_vn))[i]);
2256 }
2257
2258
2259 /* This function is called upon link interrupt */
2260 static void bnx2x_link_attn(struct bnx2x *bp)
2261 {
2262         /* Make sure that we are synced with the current statistics */
2263         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2264
2265         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2266
2267         if (bp->link_vars.link_up) {
2268
2269                 /* dropless flow control */
2270                 if (CHIP_IS_E1H(bp)) {
2271                         int port = BP_PORT(bp);
2272                         u32 pause_enabled = 0;
2273
2274                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2275                                 pause_enabled = 1;
2276
2277                         REG_WR(bp, BAR_USTRORM_INTMEM +
2278                                USTORM_PAUSE_ENABLED_OFFSET(port),
2279                                pause_enabled);
2280                 }
2281
2282                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2283                         struct host_port_stats *pstats;
2284
2285                         pstats = bnx2x_sp(bp, port_stats);
2286                         /* reset old bmac stats */
2287                         memset(&(pstats->mac_stx[0]), 0,
2288                                sizeof(struct mac_stx));
2289                 }
2290                 if ((bp->state == BNX2X_STATE_OPEN) ||
2291                     (bp->state == BNX2X_STATE_DISABLED))
2292                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2293         }
2294
2295         /* indicate link status */
2296         bnx2x_link_report(bp);
2297
2298         if (IS_E1HMF(bp)) {
2299                 int port = BP_PORT(bp);
2300                 int func;
2301                 int vn;
2302
2303                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2304                         if (vn == BP_E1HVN(bp))
2305                                 continue;
2306
2307                         func = ((vn << 1) | port);
2308
2309                         /* Set the attention towards other drivers
2310                            on the same port */
2311                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2312                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2313                 }
2314
2315                 if (bp->link_vars.link_up) {
2316                         int i;
2317
2318                         /* Init rate shaping and fairness contexts */
2319                         bnx2x_init_port_minmax(bp);
2320
2321                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2322                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2323
2324                         /* Store it to internal memory */
2325                         for (i = 0;
2326                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
2327                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2328                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2329                                        ((u32 *)(&bp->cmng))[i]);
2330                 }
2331         }
2332 }
2333
2334 static void bnx2x__link_status_update(struct bnx2x *bp)
2335 {
2336         if (bp->state != BNX2X_STATE_OPEN)
2337                 return;
2338
2339         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2340
2341         if (bp->link_vars.link_up)
2342                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2343         else
2344                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2345
2346         /* indicate link status */
2347         bnx2x_link_report(bp);
2348 }
2349
2350 static void bnx2x_pmf_update(struct bnx2x *bp)
2351 {
2352         int port = BP_PORT(bp);
2353         u32 val;
2354
2355         bp->port.pmf = 1;
2356         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2357
2358         /* enable nig attention */
2359         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2360         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2361         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2362
2363         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2364 }
2365
2366 /* end of Link */
2367
2368 /* slow path */
2369
2370 /*
2371  * General service functions
2372  */
2373
2374 /* the slow path queue is odd since completions arrive on the fastpath ring */
2375 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2376                          u32 data_hi, u32 data_lo, int common)
2377 {
2378         int func = BP_FUNC(bp);
2379
2380         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2381            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2382            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2383            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2384            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2385
2386 #ifdef BNX2X_STOP_ON_ERROR
2387         if (unlikely(bp->panic))
2388                 return -EIO;
2389 #endif
2390
2391         spin_lock_bh(&bp->spq_lock);
2392
2393         if (!bp->spq_left) {
2394                 BNX2X_ERR("BUG! SPQ ring full!\n");
2395                 spin_unlock_bh(&bp->spq_lock);
2396                 bnx2x_panic();
2397                 return -EBUSY;
2398         }
2399
2400         /* CID needs port number to be encoded int it */
2401         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2402                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2403                                      HW_CID(bp, cid)));
2404         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2405         if (common)
2406                 bp->spq_prod_bd->hdr.type |=
2407                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2408
2409         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2410         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2411
2412         bp->spq_left--;
2413
2414         if (bp->spq_prod_bd == bp->spq_last_bd) {
2415                 bp->spq_prod_bd = bp->spq;
2416                 bp->spq_prod_idx = 0;
2417                 DP(NETIF_MSG_TIMER, "end of spq\n");
2418
2419         } else {
2420                 bp->spq_prod_bd++;
2421                 bp->spq_prod_idx++;
2422         }
2423
2424         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2425                bp->spq_prod_idx);
2426
2427         spin_unlock_bh(&bp->spq_lock);
2428         return 0;
2429 }
2430
2431 /* acquire split MCP access lock register */
2432 static int bnx2x_acquire_alr(struct bnx2x *bp)
2433 {
2434         u32 i, j, val;
2435         int rc = 0;
2436
2437         might_sleep();
2438         i = 100;
2439         for (j = 0; j < i*10; j++) {
2440                 val = (1UL << 31);
2441                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2442                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2443                 if (val & (1L << 31))
2444                         break;
2445
2446                 msleep(5);
2447         }
2448         if (!(val & (1L << 31))) {
2449                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2450                 rc = -EBUSY;
2451         }
2452
2453         return rc;
2454 }
2455
2456 /* release split MCP access lock register */
2457 static void bnx2x_release_alr(struct bnx2x *bp)
2458 {
2459         u32 val = 0;
2460
2461         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2462 }
2463
2464 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2465 {
2466         struct host_def_status_block *def_sb = bp->def_status_blk;
2467         u16 rc = 0;
2468
2469         barrier(); /* status block is written to by the chip */
2470         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2471                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2472                 rc |= 1;
2473         }
2474         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2475                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2476                 rc |= 2;
2477         }
2478         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2479                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2480                 rc |= 4;
2481         }
2482         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2483                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2484                 rc |= 8;
2485         }
2486         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2487                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2488                 rc |= 16;
2489         }
2490         return rc;
2491 }
2492
2493 /*
2494  * slow path service functions
2495  */
2496
2497 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2498 {
2499         int port = BP_PORT(bp);
2500         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2501                        COMMAND_REG_ATTN_BITS_SET);
2502         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2503                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2504         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2505                                        NIG_REG_MASK_INTERRUPT_PORT0;
2506         u32 aeu_mask;
2507         u32 nig_mask = 0;
2508
2509         if (bp->attn_state & asserted)
2510                 BNX2X_ERR("IGU ERROR\n");
2511
2512         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2513         aeu_mask = REG_RD(bp, aeu_addr);
2514
2515         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2516            aeu_mask, asserted);
2517         aeu_mask &= ~(asserted & 0xff);
2518         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2519
2520         REG_WR(bp, aeu_addr, aeu_mask);
2521         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2522
2523         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2524         bp->attn_state |= asserted;
2525         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2526
2527         if (asserted & ATTN_HARD_WIRED_MASK) {
2528                 if (asserted & ATTN_NIG_FOR_FUNC) {
2529
2530                         bnx2x_acquire_phy_lock(bp);
2531
2532                         /* save nig interrupt mask */
2533                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2534                         REG_WR(bp, nig_int_mask_addr, 0);
2535
2536                         bnx2x_link_attn(bp);
2537
2538                         /* handle unicore attn? */
2539                 }
2540                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2541                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2542
2543                 if (asserted & GPIO_2_FUNC)
2544                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2545
2546                 if (asserted & GPIO_3_FUNC)
2547                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2548
2549                 if (asserted & GPIO_4_FUNC)
2550                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2551
2552                 if (port == 0) {
2553                         if (asserted & ATTN_GENERAL_ATTN_1) {
2554                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2555                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2556                         }
2557                         if (asserted & ATTN_GENERAL_ATTN_2) {
2558                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2559                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2560                         }
2561                         if (asserted & ATTN_GENERAL_ATTN_3) {
2562                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2563                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2564                         }
2565                 } else {
2566                         if (asserted & ATTN_GENERAL_ATTN_4) {
2567                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2568                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2569                         }
2570                         if (asserted & ATTN_GENERAL_ATTN_5) {
2571                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2572                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2573                         }
2574                         if (asserted & ATTN_GENERAL_ATTN_6) {
2575                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2576                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2577                         }
2578                 }
2579
2580         } /* if hardwired */
2581
2582         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2583            asserted, hc_addr);
2584         REG_WR(bp, hc_addr, asserted);
2585
2586         /* now set back the mask */
2587         if (asserted & ATTN_NIG_FOR_FUNC) {
2588                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2589                 bnx2x_release_phy_lock(bp);
2590         }
2591 }
2592
2593 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2594 {
2595         int port = BP_PORT(bp);
2596         int reg_offset;
2597         u32 val;
2598
2599         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2600                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2601
2602         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2603
2604                 val = REG_RD(bp, reg_offset);
2605                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2606                 REG_WR(bp, reg_offset, val);
2607
2608                 BNX2X_ERR("SPIO5 hw attention\n");
2609
2610                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2611                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2612                         /* Fan failure attention */
2613
2614                         /* The PHY reset is controlled by GPIO 1 */
2615                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2616                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2617                         /* Low power mode is controlled by GPIO 2 */
2618                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2619                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2620                         /* mark the failure */
2621                         bp->link_params.ext_phy_config &=
2622                                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2623                         bp->link_params.ext_phy_config |=
2624                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2625                         SHMEM_WR(bp,
2626                                  dev_info.port_hw_config[port].
2627                                                         external_phy_config,
2628                                  bp->link_params.ext_phy_config);
2629                         /* log the failure */
2630                         printk(KERN_ERR PFX "Fan Failure on Network"
2631                                " Controller %s has caused the driver to"
2632                                " shutdown the card to prevent permanent"
2633                                " damage.  Please contact Dell Support for"
2634                                " assistance\n", bp->dev->name);
2635                         break;
2636
2637                 default:
2638                         break;
2639                 }
2640         }
2641
2642         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2643                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2644                 bnx2x_acquire_phy_lock(bp);
2645                 bnx2x_handle_module_detect_int(&bp->link_params);
2646                 bnx2x_release_phy_lock(bp);
2647         }
2648
2649         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2650
2651                 val = REG_RD(bp, reg_offset);
2652                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2653                 REG_WR(bp, reg_offset, val);
2654
2655                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2656                           (attn & HW_INTERRUT_ASSERT_SET_0));
2657                 bnx2x_panic();
2658         }
2659 }
2660
2661 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2662 {
2663         u32 val;
2664
2665         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2666
2667                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2668                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2669                 /* DORQ discard attention */
2670                 if (val & 0x2)
2671                         BNX2X_ERR("FATAL error from DORQ\n");
2672         }
2673
2674         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2675
2676                 int port = BP_PORT(bp);
2677                 int reg_offset;
2678
2679                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2680                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2681
2682                 val = REG_RD(bp, reg_offset);
2683                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2684                 REG_WR(bp, reg_offset, val);
2685
2686                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2687                           (attn & HW_INTERRUT_ASSERT_SET_1));
2688                 bnx2x_panic();
2689         }
2690 }
2691
2692 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2693 {
2694         u32 val;
2695
2696         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2697
2698                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2699                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2700                 /* CFC error attention */
2701                 if (val & 0x2)
2702                         BNX2X_ERR("FATAL error from CFC\n");
2703         }
2704
2705         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2706
2707                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2708                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2709                 /* RQ_USDMDP_FIFO_OVERFLOW */
2710                 if (val & 0x18000)
2711                         BNX2X_ERR("FATAL error from PXP\n");
2712         }
2713
2714         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2715
2716                 int port = BP_PORT(bp);
2717                 int reg_offset;
2718
2719                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2720                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2721
2722                 val = REG_RD(bp, reg_offset);
2723                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2724                 REG_WR(bp, reg_offset, val);
2725
2726                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2727                           (attn & HW_INTERRUT_ASSERT_SET_2));
2728                 bnx2x_panic();
2729         }
2730 }
2731
2732 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2733 {
2734         u32 val;
2735
2736         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2737
2738                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2739                         int func = BP_FUNC(bp);
2740
2741                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2742                         bnx2x__link_status_update(bp);
2743                         if (SHMEM_RD(bp, func_mb[func].drv_status) &
2744                                                         DRV_STATUS_PMF)
2745                                 bnx2x_pmf_update(bp);
2746
2747                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2748
2749                         BNX2X_ERR("MC assert!\n");
2750                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2751                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2752                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2753                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2754                         bnx2x_panic();
2755
2756                 } else if (attn & BNX2X_MCP_ASSERT) {
2757
2758                         BNX2X_ERR("MCP assert!\n");
2759                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2760                         bnx2x_fw_dump(bp);
2761
2762                 } else
2763                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2764         }
2765
2766         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2767                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2768                 if (attn & BNX2X_GRC_TIMEOUT) {
2769                         val = CHIP_IS_E1H(bp) ?
2770                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2771                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2772                 }
2773                 if (attn & BNX2X_GRC_RSV) {
2774                         val = CHIP_IS_E1H(bp) ?
2775                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2776                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2777                 }
2778                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2779         }
2780 }
2781
2782 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2783 {
2784         struct attn_route attn;
2785         struct attn_route group_mask;
2786         int port = BP_PORT(bp);
2787         int index;
2788         u32 reg_addr;
2789         u32 val;
2790         u32 aeu_mask;
2791
2792         /* need to take HW lock because MCP or other port might also
2793            try to handle this event */
2794         bnx2x_acquire_alr(bp);
2795
2796         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2797         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2798         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2799         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2800         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2801            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2802
2803         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2804                 if (deasserted & (1 << index)) {
2805                         group_mask = bp->attn_group[index];
2806
2807                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2808                            index, group_mask.sig[0], group_mask.sig[1],
2809                            group_mask.sig[2], group_mask.sig[3]);
2810
2811                         bnx2x_attn_int_deasserted3(bp,
2812                                         attn.sig[3] & group_mask.sig[3]);
2813                         bnx2x_attn_int_deasserted1(bp,
2814                                         attn.sig[1] & group_mask.sig[1]);
2815                         bnx2x_attn_int_deasserted2(bp,
2816                                         attn.sig[2] & group_mask.sig[2]);
2817                         bnx2x_attn_int_deasserted0(bp,
2818                                         attn.sig[0] & group_mask.sig[0]);
2819
2820                         if ((attn.sig[0] & group_mask.sig[0] &
2821                                                 HW_PRTY_ASSERT_SET_0) ||
2822                             (attn.sig[1] & group_mask.sig[1] &
2823                                                 HW_PRTY_ASSERT_SET_1) ||
2824                             (attn.sig[2] & group_mask.sig[2] &
2825                                                 HW_PRTY_ASSERT_SET_2))
2826                                 BNX2X_ERR("FATAL HW block parity attention\n");
2827                 }
2828         }
2829
2830         bnx2x_release_alr(bp);
2831
2832         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2833
2834         val = ~deasserted;
2835         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2836            val, reg_addr);
2837         REG_WR(bp, reg_addr, val);
2838
2839         if (~bp->attn_state & deasserted)
2840                 BNX2X_ERR("IGU ERROR\n");
2841
2842         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2843                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2844
2845         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2846         aeu_mask = REG_RD(bp, reg_addr);
2847
2848         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2849            aeu_mask, deasserted);
2850         aeu_mask |= (deasserted & 0xff);
2851         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2852
2853         REG_WR(bp, reg_addr, aeu_mask);
2854         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2855
2856         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2857         bp->attn_state &= ~deasserted;
2858         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2859 }
2860
2861 static void bnx2x_attn_int(struct bnx2x *bp)
2862 {
2863         /* read local copy of bits */
2864         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2865                                                                 attn_bits);
2866         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2867                                                                 attn_bits_ack);
2868         u32 attn_state = bp->attn_state;
2869
2870         /* look for changed bits */
2871         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2872         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2873
2874         DP(NETIF_MSG_HW,
2875            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2876            attn_bits, attn_ack, asserted, deasserted);
2877
2878         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2879                 BNX2X_ERR("BAD attention state\n");
2880
2881         /* handle bits that were raised */
2882         if (asserted)
2883                 bnx2x_attn_int_asserted(bp, asserted);
2884
2885         if (deasserted)
2886                 bnx2x_attn_int_deasserted(bp, deasserted);
2887 }
2888
2889 static void bnx2x_sp_task(struct work_struct *work)
2890 {
2891         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2892         u16 status;
2893
2894
2895         /* Return here if interrupt is disabled */
2896         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2897                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2898                 return;
2899         }
2900
2901         status = bnx2x_update_dsb_idx(bp);
2902 /*      if (status == 0)                                     */
2903 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2904
2905         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2906
2907         /* HW attentions */
2908         if (status & 0x1)
2909                 bnx2x_attn_int(bp);
2910
2911         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2912                      IGU_INT_NOP, 1);
2913         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2914                      IGU_INT_NOP, 1);
2915         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2916                      IGU_INT_NOP, 1);
2917         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2918                      IGU_INT_NOP, 1);
2919         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2920                      IGU_INT_ENABLE, 1);
2921
2922 }
2923
2924 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2925 {
2926         struct net_device *dev = dev_instance;
2927         struct bnx2x *bp = netdev_priv(dev);
2928
2929         /* Return here if interrupt is disabled */
2930         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2931                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2932                 return IRQ_HANDLED;
2933         }
2934
2935         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2936
2937 #ifdef BNX2X_STOP_ON_ERROR
2938         if (unlikely(bp->panic))
2939                 return IRQ_HANDLED;
2940 #endif
2941
2942         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2943
2944         return IRQ_HANDLED;
2945 }
2946
2947 /* end of slow path */
2948
2949 /* Statistics */
2950
2951 /****************************************************************************
2952 * Macros
2953 ****************************************************************************/
2954
2955 /* sum[hi:lo] += add[hi:lo] */
2956 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2957         do { \
2958                 s_lo += a_lo; \
2959                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2960         } while (0)
2961
2962 /* difference = minuend - subtrahend */
2963 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2964         do { \
2965                 if (m_lo < s_lo) { \
2966                         /* underflow */ \
2967                         d_hi = m_hi - s_hi; \
2968                         if (d_hi > 0) { \
2969                                 /* we can 'loan' 1 */ \
2970                                 d_hi--; \
2971                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2972                         } else { \
2973                                 /* m_hi <= s_hi */ \
2974                                 d_hi = 0; \
2975                                 d_lo = 0; \
2976                         } \
2977                 } else { \
2978                         /* m_lo >= s_lo */ \
2979                         if (m_hi < s_hi) { \
2980                                 d_hi = 0; \
2981                                 d_lo = 0; \
2982                         } else { \
2983                                 /* m_hi >= s_hi */ \
2984                                 d_hi = m_hi - s_hi; \
2985                                 d_lo = m_lo - s_lo; \
2986                         } \
2987                 } \
2988         } while (0)
2989
2990 #define UPDATE_STAT64(s, t) \
2991         do { \
2992                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2993                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2994                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2995                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2996                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2997                        pstats->mac_stx[1].t##_lo, diff.lo); \
2998         } while (0)
2999
3000 #define UPDATE_STAT64_NIG(s, t) \
3001         do { \
3002                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3003                         diff.lo, new->s##_lo, old->s##_lo); \
3004                 ADD_64(estats->t##_hi, diff.hi, \
3005                        estats->t##_lo, diff.lo); \
3006         } while (0)
3007
3008 /* sum[hi:lo] += add */
3009 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3010         do { \
3011                 s_lo += a; \
3012                 s_hi += (s_lo < a) ? 1 : 0; \
3013         } while (0)
3014
3015 #define UPDATE_EXTEND_STAT(s) \
3016         do { \
3017                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3018                               pstats->mac_stx[1].s##_lo, \
3019                               new->s); \
3020         } while (0)
3021
3022 #define UPDATE_EXTEND_TSTAT(s, t) \
3023         do { \
3024                 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3025                 old_tclient->s = tclient->s; \
3026                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3027         } while (0)
3028
3029 #define UPDATE_EXTEND_USTAT(s, t) \
3030         do { \
3031                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3032                 old_uclient->s = uclient->s; \
3033                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3034         } while (0)
3035
3036 #define UPDATE_EXTEND_XSTAT(s, t) \
3037         do { \
3038                 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3039                 old_xclient->s = xclient->s; \
3040                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3041         } while (0)
3042
3043 /* minuend -= subtrahend */
3044 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3045         do { \
3046                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3047         } while (0)
3048
3049 /* minuend[hi:lo] -= subtrahend */
3050 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3051         do { \
3052                 SUB_64(m_hi, 0, m_lo, s); \
3053         } while (0)
3054
3055 #define SUB_EXTEND_USTAT(s, t) \
3056         do { \
3057                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3058                 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3059         } while (0)
3060
3061 /*
3062  * General service functions
3063  */
3064
3065 static inline long bnx2x_hilo(u32 *hiref)
3066 {
3067         u32 lo = *(hiref + 1);
3068 #if (BITS_PER_LONG == 64)
3069         u32 hi = *hiref;
3070
3071         return HILO_U64(hi, lo);
3072 #else
3073         return lo;
3074 #endif
3075 }
3076
3077 /*
3078  * Init service functions
3079  */
3080
3081 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3082 {
3083         if (!bp->stats_pending) {
3084                 struct eth_query_ramrod_data ramrod_data = {0};
3085                 int i, rc;
3086
3087                 ramrod_data.drv_counter = bp->stats_counter++;
3088                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3089                 for_each_queue(bp, i)
3090                         ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3091
3092                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3093                                    ((u32 *)&ramrod_data)[1],
3094                                    ((u32 *)&ramrod_data)[0], 0);
3095                 if (rc == 0) {
3096                         /* stats ramrod has it's own slot on the spq */
3097                         bp->spq_left++;
3098                         bp->stats_pending = 1;
3099                 }
3100         }
3101 }
3102
3103 static void bnx2x_stats_init(struct bnx2x *bp)
3104 {
3105         int port = BP_PORT(bp);
3106         int i;
3107
3108         bp->stats_pending = 0;
3109         bp->executer_idx = 0;
3110         bp->stats_counter = 0;
3111
3112         /* port stats */
3113         if (!BP_NOMCP(bp))
3114                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3115         else
3116                 bp->port.port_stx = 0;
3117         DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3118
3119         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3120         bp->port.old_nig_stats.brb_discard =
3121                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3122         bp->port.old_nig_stats.brb_truncate =
3123                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3124         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3125                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3126         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3127                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3128
3129         /* function stats */
3130         for_each_queue(bp, i) {
3131                 struct bnx2x_fastpath *fp = &bp->fp[i];
3132
3133                 memset(&fp->old_tclient, 0,
3134                        sizeof(struct tstorm_per_client_stats));
3135                 memset(&fp->old_uclient, 0,
3136                        sizeof(struct ustorm_per_client_stats));
3137                 memset(&fp->old_xclient, 0,
3138                        sizeof(struct xstorm_per_client_stats));
3139                 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3140         }
3141
3142         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3143         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3144
3145         bp->stats_state = STATS_STATE_DISABLED;
3146         if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3147                 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3148 }
3149
3150 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3151 {
3152         struct dmae_command *dmae = &bp->stats_dmae;
3153         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3154
3155         *stats_comp = DMAE_COMP_VAL;
3156         if (CHIP_REV_IS_SLOW(bp))
3157                 return;
3158
3159         /* loader */
3160         if (bp->executer_idx) {
3161                 int loader_idx = PMF_DMAE_C(bp);
3162
3163                 memset(dmae, 0, sizeof(struct dmae_command));
3164
3165                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3166                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3167                                 DMAE_CMD_DST_RESET |
3168 #ifdef __BIG_ENDIAN
3169                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3170 #else
3171                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3172 #endif
3173                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3174                                                DMAE_CMD_PORT_0) |
3175                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3176                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3177                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3178                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3179                                      sizeof(struct dmae_command) *
3180                                      (loader_idx + 1)) >> 2;
3181                 dmae->dst_addr_hi = 0;
3182                 dmae->len = sizeof(struct dmae_command) >> 2;
3183                 if (CHIP_IS_E1(bp))
3184                         dmae->len--;
3185                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3186                 dmae->comp_addr_hi = 0;
3187                 dmae->comp_val = 1;
3188
3189                 *stats_comp = 0;
3190                 bnx2x_post_dmae(bp, dmae, loader_idx);
3191
3192         } else if (bp->func_stx) {
3193                 *stats_comp = 0;
3194                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3195         }
3196 }
3197
3198 static int bnx2x_stats_comp(struct bnx2x *bp)
3199 {
3200         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3201         int cnt = 10;
3202
3203         might_sleep();
3204         while (*stats_comp != DMAE_COMP_VAL) {
3205                 if (!cnt) {
3206                         BNX2X_ERR("timeout waiting for stats finished\n");
3207                         break;
3208                 }
3209                 cnt--;
3210                 msleep(1);
3211         }
3212         return 1;
3213 }
3214
3215 /*
3216  * Statistics service functions
3217  */
3218
3219 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3220 {
3221         struct dmae_command *dmae;
3222         u32 opcode;
3223         int loader_idx = PMF_DMAE_C(bp);
3224         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3225
3226         /* sanity */
3227         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3228                 BNX2X_ERR("BUG!\n");
3229                 return;
3230         }
3231
3232         bp->executer_idx = 0;
3233
3234         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3235                   DMAE_CMD_C_ENABLE |
3236                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3237 #ifdef __BIG_ENDIAN
3238                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3239 #else
3240                   DMAE_CMD_ENDIANITY_DW_SWAP |
3241 #endif
3242                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3243                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3244
3245         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3246         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3247         dmae->src_addr_lo = bp->port.port_stx >> 2;
3248         dmae->src_addr_hi = 0;
3249         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3250         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3251         dmae->len = DMAE_LEN32_RD_MAX;
3252         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3253         dmae->comp_addr_hi = 0;
3254         dmae->comp_val = 1;
3255
3256         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3257         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3258         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3259         dmae->src_addr_hi = 0;
3260         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3261                                    DMAE_LEN32_RD_MAX * 4);
3262         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3263                                    DMAE_LEN32_RD_MAX * 4);
3264         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3265         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3266         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3267         dmae->comp_val = DMAE_COMP_VAL;
3268
3269         *stats_comp = 0;
3270         bnx2x_hw_stats_post(bp);
3271         bnx2x_stats_comp(bp);
3272 }
3273
3274 static void bnx2x_port_stats_init(struct bnx2x *bp)
3275 {
3276         struct dmae_command *dmae;
3277         int port = BP_PORT(bp);
3278         int vn = BP_E1HVN(bp);
3279         u32 opcode;
3280         int loader_idx = PMF_DMAE_C(bp);
3281         u32 mac_addr;
3282         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3283
3284         /* sanity */
3285         if (!bp->link_vars.link_up || !bp->port.pmf) {
3286                 BNX2X_ERR("BUG!\n");
3287                 return;
3288         }
3289
3290         bp->executer_idx = 0;
3291
3292         /* MCP */
3293         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3294                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3295                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3296 #ifdef __BIG_ENDIAN
3297                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3298 #else
3299                   DMAE_CMD_ENDIANITY_DW_SWAP |
3300 #endif
3301                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3302                   (vn << DMAE_CMD_E1HVN_SHIFT));
3303
3304         if (bp->port.port_stx) {
3305
3306                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3307                 dmae->opcode = opcode;
3308                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3309                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3310                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3311                 dmae->dst_addr_hi = 0;
3312                 dmae->len = sizeof(struct host_port_stats) >> 2;
3313                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3314                 dmae->comp_addr_hi = 0;
3315                 dmae->comp_val = 1;
3316         }
3317
3318         if (bp->func_stx) {
3319
3320                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3321                 dmae->opcode = opcode;
3322                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3323                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3324                 dmae->dst_addr_lo = bp->func_stx >> 2;
3325                 dmae->dst_addr_hi = 0;
3326                 dmae->len = sizeof(struct host_func_stats) >> 2;
3327                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3328                 dmae->comp_addr_hi = 0;
3329                 dmae->comp_val = 1;
3330         }
3331
3332         /* MAC */
3333         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3334                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3335                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3336 #ifdef __BIG_ENDIAN
3337                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3338 #else
3339                   DMAE_CMD_ENDIANITY_DW_SWAP |
3340 #endif
3341                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3342                   (vn << DMAE_CMD_E1HVN_SHIFT));
3343
3344         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3345
3346                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3347                                    NIG_REG_INGRESS_BMAC0_MEM);
3348
3349                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3350                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3351                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3352                 dmae->opcode = opcode;
3353                 dmae->src_addr_lo = (mac_addr +
3354                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3355                 dmae->src_addr_hi = 0;
3356                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3357                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3358                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3359                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3360                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3361                 dmae->comp_addr_hi = 0;
3362                 dmae->comp_val = 1;
3363
3364                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3365                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3366                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3367                 dmae->opcode = opcode;
3368                 dmae->src_addr_lo = (mac_addr +
3369                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3370                 dmae->src_addr_hi = 0;
3371                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3372                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3373                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3374                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3375                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3376                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3377                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3378                 dmae->comp_addr_hi = 0;
3379                 dmae->comp_val = 1;
3380
3381         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3382
3383                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3384
3385                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3386                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3387                 dmae->opcode = opcode;
3388                 dmae->src_addr_lo = (mac_addr +
3389                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3390                 dmae->src_addr_hi = 0;
3391                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3392                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3393                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3394                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3395                 dmae->comp_addr_hi = 0;
3396                 dmae->comp_val = 1;
3397
3398                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3399                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3400                 dmae->opcode = opcode;
3401                 dmae->src_addr_lo = (mac_addr +
3402                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3403                 dmae->src_addr_hi = 0;
3404                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3405                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3406                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3407                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3408                 dmae->len = 1;
3409                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3410                 dmae->comp_addr_hi = 0;
3411                 dmae->comp_val = 1;
3412
3413                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3414                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3415                 dmae->opcode = opcode;
3416                 dmae->src_addr_lo = (mac_addr +
3417                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3418                 dmae->src_addr_hi = 0;
3419                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3420                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3421                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3422                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3423                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3424                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3425                 dmae->comp_addr_hi = 0;
3426                 dmae->comp_val = 1;
3427         }
3428
3429         /* NIG */
3430         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3431         dmae->opcode = opcode;
3432         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3433                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3434         dmae->src_addr_hi = 0;
3435         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3436         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3437         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3438         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3439         dmae->comp_addr_hi = 0;
3440         dmae->comp_val = 1;
3441
3442         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3443         dmae->opcode = opcode;
3444         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3445                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3446         dmae->src_addr_hi = 0;
3447         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3448                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3449         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3450                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3451         dmae->len = (2*sizeof(u32)) >> 2;
3452         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3453         dmae->comp_addr_hi = 0;
3454         dmae->comp_val = 1;
3455
3456         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3457         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3458                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3459                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3460 #ifdef __BIG_ENDIAN
3461                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3462 #else
3463                         DMAE_CMD_ENDIANITY_DW_SWAP |
3464 #endif
3465                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3466                         (vn << DMAE_CMD_E1HVN_SHIFT));
3467         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3468                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3469         dmae->src_addr_hi = 0;
3470         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3471                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3472         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3473                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3474         dmae->len = (2*sizeof(u32)) >> 2;
3475         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3476         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3477         dmae->comp_val = DMAE_COMP_VAL;
3478
3479         *stats_comp = 0;
3480 }
3481
3482 static void bnx2x_func_stats_init(struct bnx2x *bp)
3483 {
3484         struct dmae_command *dmae = &bp->stats_dmae;
3485         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3486
3487         /* sanity */
3488         if (!bp->func_stx) {
3489                 BNX2X_ERR("BUG!\n");
3490                 return;
3491         }
3492
3493         bp->executer_idx = 0;
3494         memset(dmae, 0, sizeof(struct dmae_command));
3495
3496         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3497                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3498                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3499 #ifdef __BIG_ENDIAN
3500                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3501 #else
3502                         DMAE_CMD_ENDIANITY_DW_SWAP |
3503 #endif
3504                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3505                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3506         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3507         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3508         dmae->dst_addr_lo = bp->func_stx >> 2;
3509         dmae->dst_addr_hi = 0;
3510         dmae->len = sizeof(struct host_func_stats) >> 2;
3511         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3512         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3513         dmae->comp_val = DMAE_COMP_VAL;
3514
3515         *stats_comp = 0;
3516 }
3517
3518 static void bnx2x_stats_start(struct bnx2x *bp)
3519 {
3520         if (bp->port.pmf)
3521                 bnx2x_port_stats_init(bp);
3522
3523         else if (bp->func_stx)
3524                 bnx2x_func_stats_init(bp);
3525
3526         bnx2x_hw_stats_post(bp);
3527         bnx2x_storm_stats_post(bp);
3528 }
3529
3530 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3531 {
3532         bnx2x_stats_comp(bp);
3533         bnx2x_stats_pmf_update(bp);
3534         bnx2x_stats_start(bp);
3535 }
3536
3537 static void bnx2x_stats_restart(struct bnx2x *bp)
3538 {
3539         bnx2x_stats_comp(bp);
3540         bnx2x_stats_start(bp);
3541 }
3542
3543 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3544 {
3545         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3546         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3547         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3548         struct {
3549                 u32 lo;
3550                 u32 hi;
3551         } diff;
3552
3553         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3554         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3555         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3556         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3557         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3558         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3559         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3560         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3561         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3562         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3563         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3564         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3565         UPDATE_STAT64(tx_stat_gt127,
3566                                 tx_stat_etherstatspkts65octetsto127octets);
3567         UPDATE_STAT64(tx_stat_gt255,
3568                                 tx_stat_etherstatspkts128octetsto255octets);
3569         UPDATE_STAT64(tx_stat_gt511,
3570                                 tx_stat_etherstatspkts256octetsto511octets);
3571         UPDATE_STAT64(tx_stat_gt1023,
3572                                 tx_stat_etherstatspkts512octetsto1023octets);
3573         UPDATE_STAT64(tx_stat_gt1518,
3574                                 tx_stat_etherstatspkts1024octetsto1522octets);
3575         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3576         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3577         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3578         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3579         UPDATE_STAT64(tx_stat_gterr,
3580                                 tx_stat_dot3statsinternalmactransmiterrors);
3581         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3582
3583         estats->pause_frames_received_hi =
3584                                 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3585         estats->pause_frames_received_lo =
3586                                 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3587
3588         estats->pause_frames_sent_hi =
3589                                 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3590         estats->pause_frames_sent_lo =
3591                                 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3592 }
3593
3594 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3595 {
3596         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3597         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3598         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3599
3600         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3601         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3602         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3603         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3604         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3605         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3606         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3607         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3608         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3609         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3610         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3611         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3612         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3613         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3614         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3615         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3616         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3617         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3618         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3619         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3620         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3621         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3622         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3623         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3624         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3625         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3626         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3627         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3628         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3629         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3630         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3631
3632         estats->pause_frames_received_hi =
3633                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3634         estats->pause_frames_received_lo =
3635                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3636         ADD_64(estats->pause_frames_received_hi,
3637                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3638                estats->pause_frames_received_lo,
3639                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3640
3641         estats->pause_frames_sent_hi =
3642                         pstats->mac_stx[1].tx_stat_outxonsent_hi;
3643         estats->pause_frames_sent_lo =
3644                         pstats->mac_stx[1].tx_stat_outxonsent_lo;
3645         ADD_64(estats->pause_frames_sent_hi,
3646                pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3647                estats->pause_frames_sent_lo,
3648                pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3649 }
3650
3651 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3652 {
3653         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3654         struct nig_stats *old = &(bp->port.old_nig_stats);
3655         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3656         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3657         struct {
3658                 u32 lo;
3659                 u32 hi;
3660         } diff;
3661         u32 nig_timer_max;
3662
3663         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3664                 bnx2x_bmac_stats_update(bp);
3665
3666         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3667                 bnx2x_emac_stats_update(bp);
3668
3669         else { /* unreached */
3670                 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3671                 return -1;
3672         }
3673
3674         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3675                       new->brb_discard - old->brb_discard);
3676         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3677                       new->brb_truncate - old->brb_truncate);
3678
3679         UPDATE_STAT64_NIG(egress_mac_pkt0,
3680                                         etherstatspkts1024octetsto1522octets);
3681         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3682
3683         memcpy(old, new, sizeof(struct nig_stats));
3684
3685         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3686                sizeof(struct mac_stx));
3687         estats->brb_drop_hi = pstats->brb_drop_hi;
3688         estats->brb_drop_lo = pstats->brb_drop_lo;
3689
3690         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3691
3692         nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3693         if (nig_timer_max != estats->nig_timer_max) {
3694                 estats->nig_timer_max = nig_timer_max;
3695                 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3696         }
3697
3698         return 0;
3699 }
3700
3701 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3702 {
3703         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3704         struct tstorm_per_port_stats *tport =
3705                                         &stats->tstorm_common.port_statistics;
3706         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3707         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3708         int i;
3709
3710         memset(&(fstats->total_bytes_received_hi), 0,
3711                sizeof(struct host_func_stats) - 2*sizeof(u32));
3712         estats->error_bytes_received_hi = 0;
3713         estats->error_bytes_received_lo = 0;
3714         estats->etherstatsoverrsizepkts_hi = 0;
3715         estats->etherstatsoverrsizepkts_lo = 0;
3716         estats->no_buff_discard_hi = 0;
3717         estats->no_buff_discard_lo = 0;
3718
3719         for_each_queue(bp, i) {
3720                 struct bnx2x_fastpath *fp = &bp->fp[i];
3721                 int cl_id = fp->cl_id;
3722                 struct tstorm_per_client_stats *tclient =
3723                                 &stats->tstorm_common.client_statistics[cl_id];
3724                 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3725                 struct ustorm_per_client_stats *uclient =
3726                                 &stats->ustorm_common.client_statistics[cl_id];
3727                 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3728                 struct xstorm_per_client_stats *xclient =
3729                                 &stats->xstorm_common.client_statistics[cl_id];
3730                 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3731                 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3732                 u32 diff;
3733
3734                 /* are storm stats valid? */
3735                 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3736                                                         bp->stats_counter) {
3737                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3738                            "  xstorm counter (%d) != stats_counter (%d)\n",
3739                            i, xclient->stats_counter, bp->stats_counter);
3740                         return -1;
3741                 }
3742                 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3743                                                         bp->stats_counter) {
3744                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3745                            "  tstorm counter (%d) != stats_counter (%d)\n",
3746                            i, tclient->stats_counter, bp->stats_counter);
3747                         return -2;
3748                 }
3749                 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3750                                                         bp->stats_counter) {
3751                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3752                            "  ustorm counter (%d) != stats_counter (%d)\n",
3753                            i, uclient->stats_counter, bp->stats_counter);
3754                         return -4;
3755                 }
3756
3757                 qstats->total_bytes_received_hi =
3758                 qstats->valid_bytes_received_hi =
3759                                 le32_to_cpu(tclient->total_rcv_bytes.hi);
3760                 qstats->total_bytes_received_lo =
3761                 qstats->valid_bytes_received_lo =
3762                                 le32_to_cpu(tclient->total_rcv_bytes.lo);
3763
3764                 qstats->error_bytes_received_hi =
3765                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
3766                 qstats->error_bytes_received_lo =
3767                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
3768
3769                 ADD_64(qstats->total_bytes_received_hi,
3770                        qstats->error_bytes_received_hi,
3771                        qstats->total_bytes_received_lo,
3772                        qstats->error_bytes_received_lo);
3773
3774                 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3775                                         total_unicast_packets_received);
3776                 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3777                                         total_multicast_packets_received);
3778                 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3779                                         total_broadcast_packets_received);
3780                 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3781                                         etherstatsoverrsizepkts);
3782                 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3783
3784                 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3785                                         total_unicast_packets_received);
3786                 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3787                                         total_multicast_packets_received);
3788                 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3789                                         total_broadcast_packets_received);
3790                 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3791                 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3792                 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3793
3794                 qstats->total_bytes_transmitted_hi =
3795                                 le32_to_cpu(xclient->total_sent_bytes.hi);
3796                 qstats->total_bytes_transmitted_lo =
3797                                 le32_to_cpu(xclient->total_sent_bytes.lo);
3798
3799                 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3800                                         total_unicast_packets_transmitted);
3801                 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3802                                         total_multicast_packets_transmitted);
3803                 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3804                                         total_broadcast_packets_transmitted);
3805
3806                 old_tclient->checksum_discard = tclient->checksum_discard;
3807                 old_tclient->ttl0_discard = tclient->ttl0_discard;
3808
3809                 ADD_64(fstats->total_bytes_received_hi,
3810                        qstats->total_bytes_received_hi,
3811                        fstats->total_bytes_received_lo,
3812                        qstats->total_bytes_received_lo);
3813                 ADD_64(fstats->total_bytes_transmitted_hi,
3814                        qstats->total_bytes_transmitted_hi,
3815                        fstats->total_bytes_transmitted_lo,
3816                        qstats->total_bytes_transmitted_lo);
3817                 ADD_64(fstats->total_unicast_packets_received_hi,
3818                        qstats->total_unicast_packets_received_hi,
3819                        fstats->total_unicast_packets_received_lo,
3820                        qstats->total_unicast_packets_received_lo);
3821                 ADD_64(fstats->total_multicast_packets_received_hi,
3822                        qstats->total_multicast_packets_received_hi,
3823                        fstats->total_multicast_packets_received_lo,
3824                        qstats->total_multicast_packets_received_lo);
3825                 ADD_64(fstats->total_broadcast_packets_received_hi,
3826                        qstats->total_broadcast_packets_received_hi,
3827                        fstats->total_broadcast_packets_received_lo,
3828                        qstats->total_broadcast_packets_received_lo);
3829                 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3830                        qstats->total_unicast_packets_transmitted_hi,
3831                        fstats->total_unicast_packets_transmitted_lo,
3832                        qstats->total_unicast_packets_transmitted_lo);
3833                 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3834                        qstats->total_multicast_packets_transmitted_hi,
3835                        fstats->total_multicast_packets_transmitted_lo,
3836                        qstats->total_multicast_packets_transmitted_lo);
3837                 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3838                        qstats->total_broadcast_packets_transmitted_hi,
3839                        fstats->total_broadcast_packets_transmitted_lo,
3840                        qstats->total_broadcast_packets_transmitted_lo);
3841                 ADD_64(fstats->valid_bytes_received_hi,
3842                        qstats->valid_bytes_received_hi,
3843                        fstats->valid_bytes_received_lo,
3844                        qstats->valid_bytes_received_lo);
3845
3846                 ADD_64(estats->error_bytes_received_hi,
3847                        qstats->error_bytes_received_hi,
3848                        estats->error_bytes_received_lo,
3849                        qstats->error_bytes_received_lo);
3850                 ADD_64(estats->etherstatsoverrsizepkts_hi,
3851                        qstats->etherstatsoverrsizepkts_hi,
3852                        estats->etherstatsoverrsizepkts_lo,
3853                        qstats->etherstatsoverrsizepkts_lo);
3854                 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3855                        estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3856         }
3857
3858         ADD_64(fstats->total_bytes_received_hi,
3859                estats->rx_stat_ifhcinbadoctets_hi,
3860                fstats->total_bytes_received_lo,
3861                estats->rx_stat_ifhcinbadoctets_lo);
3862
3863         memcpy(estats, &(fstats->total_bytes_received_hi),
3864                sizeof(struct host_func_stats) - 2*sizeof(u32));
3865
3866         ADD_64(estats->etherstatsoverrsizepkts_hi,
3867                estats->rx_stat_dot3statsframestoolong_hi,
3868                estats->etherstatsoverrsizepkts_lo,
3869                estats->rx_stat_dot3statsframestoolong_lo);
3870         ADD_64(estats->error_bytes_received_hi,
3871                estats->rx_stat_ifhcinbadoctets_hi,
3872                estats->error_bytes_received_lo,
3873                estats->rx_stat_ifhcinbadoctets_lo);
3874
3875         if (bp->port.pmf) {
3876                 estats->mac_filter_discard =
3877                                 le32_to_cpu(tport->mac_filter_discard);
3878                 estats->xxoverflow_discard =
3879                                 le32_to_cpu(tport->xxoverflow_discard);
3880                 estats->brb_truncate_discard =
3881                                 le32_to_cpu(tport->brb_truncate_discard);
3882                 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3883         }
3884
3885         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3886
3887         bp->stats_pending = 0;
3888
3889         return 0;
3890 }
3891
3892 static void bnx2x_net_stats_update(struct bnx2x *bp)
3893 {
3894         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3895         struct net_device_stats *nstats = &bp->dev->stats;
3896         int i;
3897
3898         nstats->rx_packets =
3899                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3900                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3901                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3902
3903         nstats->tx_packets =
3904                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3905                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3906                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3907
3908         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
3909
3910         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3911
3912         nstats->rx_dropped = estats->mac_discard;
3913         for_each_queue(bp, i)
3914                 nstats->rx_dropped +=
3915                         le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3916
3917         nstats->tx_dropped = 0;
3918
3919         nstats->multicast =
3920                 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
3921
3922         nstats->collisions =
3923                 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
3924
3925         nstats->rx_length_errors =
3926                 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3927                 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3928         nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3929                                  bnx2x_hilo(&estats->brb_truncate_hi);
3930         nstats->rx_crc_errors =
3931                 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3932         nstats->rx_frame_errors =
3933                 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3934         nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
3935         nstats->rx_missed_errors = estats->xxoverflow_discard;
3936
3937         nstats->rx_errors = nstats->rx_length_errors +
3938                             nstats->rx_over_errors +
3939                             nstats->rx_crc_errors +
3940                             nstats->rx_frame_errors +
3941                             nstats->rx_fifo_errors +
3942                             nstats->rx_missed_errors;
3943
3944         nstats->tx_aborted_errors =
3945                 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3946                 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3947         nstats->tx_carrier_errors =
3948                 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
3949         nstats->tx_fifo_errors = 0;
3950         nstats->tx_heartbeat_errors = 0;
3951         nstats->tx_window_errors = 0;
3952
3953         nstats->tx_errors = nstats->tx_aborted_errors +
3954                             nstats->tx_carrier_errors +
3955             bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3956 }
3957
3958 static void bnx2x_drv_stats_update(struct bnx2x *bp)
3959 {
3960         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3961         int i;
3962
3963         estats->driver_xoff = 0;
3964         estats->rx_err_discard_pkt = 0;
3965         estats->rx_skb_alloc_failed = 0;
3966         estats->hw_csum_err = 0;
3967         for_each_queue(bp, i) {
3968                 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3969
3970                 estats->driver_xoff += qstats->driver_xoff;
3971                 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3972                 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3973                 estats->hw_csum_err += qstats->hw_csum_err;
3974         }
3975 }
3976
3977 static void bnx2x_stats_update(struct bnx2x *bp)
3978 {
3979         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3980
3981         if (*stats_comp != DMAE_COMP_VAL)
3982                 return;
3983
3984         if (bp->port.pmf)
3985                 bnx2x_hw_stats_update(bp);
3986
3987         if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3988                 BNX2X_ERR("storm stats were not updated for 3 times\n");
3989                 bnx2x_panic();
3990                 return;
3991         }
3992
3993         bnx2x_net_stats_update(bp);
3994         bnx2x_drv_stats_update(bp);
3995
3996         if (bp->msglevel & NETIF_MSG_TIMER) {
3997                 struct tstorm_per_client_stats *old_tclient =
3998                                                         &bp->fp->old_tclient;
3999                 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4000                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4001                 struct net_device_stats *nstats = &bp->dev->stats;
4002                 int i;
4003
4004                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4005                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
4006                                   "  tx pkt (%lx)\n",
4007                        bnx2x_tx_avail(bp->fp),
4008                        le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
4009                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
4010                                   "  rx pkt (%lx)\n",
4011                        (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
4012                              bp->fp->rx_comp_cons),
4013                        le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
4014                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u  "
4015                                   "brb truncate %u\n",
4016                        (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4017                        qstats->driver_xoff,
4018                        estats->brb_drop_lo, estats->brb_truncate_lo);
4019                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
4020                         "packets_too_big_discard %lu  no_buff_discard %lu  "
4021                         "mac_discard %u  mac_filter_discard %u  "
4022                         "xxovrflow_discard %u  brb_truncate_discard %u  "
4023                         "ttl0_discard %u\n",
4024                        le32_to_cpu(old_tclient->checksum_discard),
4025                        bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4026                        bnx2x_hilo(&qstats->no_buff_discard_hi),
4027                        estats->mac_discard, estats->mac_filter_discard,
4028                        estats->xxoverflow_discard, estats->brb_truncate_discard,
4029                        le32_to_cpu(old_tclient->ttl0_discard));
4030
4031                 for_each_queue(bp, i) {
4032                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4033                                bnx2x_fp(bp, i, tx_pkt),
4034                                bnx2x_fp(bp, i, rx_pkt),
4035                                bnx2x_fp(bp, i, rx_calls));
4036                 }
4037         }
4038
4039         bnx2x_hw_stats_post(bp);
4040         bnx2x_storm_stats_post(bp);
4041 }
4042
4043 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4044 {
4045         struct dmae_command *dmae;
4046         u32 opcode;
4047         int loader_idx = PMF_DMAE_C(bp);
4048         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4049
4050         bp->executer_idx = 0;
4051
4052         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4053                   DMAE_CMD_C_ENABLE |
4054                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4055 #ifdef __BIG_ENDIAN
4056                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4057 #else
4058                   DMAE_CMD_ENDIANITY_DW_SWAP |
4059 #endif
4060                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4061                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4062
4063         if (bp->port.port_stx) {
4064
4065                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4066                 if (bp->func_stx)
4067                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4068                 else
4069                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4070                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4071                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4072                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4073                 dmae->dst_addr_hi = 0;
4074                 dmae->len = sizeof(struct host_port_stats) >> 2;
4075                 if (bp->func_stx) {
4076                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4077                         dmae->comp_addr_hi = 0;
4078                         dmae->comp_val = 1;
4079                 } else {
4080                         dmae->comp_addr_lo =
4081                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4082                         dmae->comp_addr_hi =
4083                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4084                         dmae->comp_val = DMAE_COMP_VAL;
4085
4086                         *stats_comp = 0;
4087                 }
4088         }
4089
4090         if (bp->func_stx) {
4091
4092                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4093                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4094                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4095                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4096                 dmae->dst_addr_lo = bp->func_stx >> 2;
4097                 dmae->dst_addr_hi = 0;
4098                 dmae->len = sizeof(struct host_func_stats) >> 2;
4099                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4100                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4101                 dmae->comp_val = DMAE_COMP_VAL;
4102
4103                 *stats_comp = 0;
4104         }
4105 }
4106
4107 static void bnx2x_stats_stop(struct bnx2x *bp)
4108 {
4109         int update = 0;
4110
4111         bnx2x_stats_comp(bp);
4112
4113         if (bp->port.pmf)
4114                 update = (bnx2x_hw_stats_update(bp) == 0);
4115
4116         update |= (bnx2x_storm_stats_update(bp) == 0);
4117
4118         if (update) {
4119                 bnx2x_net_stats_update(bp);
4120
4121                 if (bp->port.pmf)
4122                         bnx2x_port_stats_stop(bp);
4123
4124                 bnx2x_hw_stats_post(bp);
4125                 bnx2x_stats_comp(bp);
4126         }
4127 }
4128
4129 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4130 {
4131 }
4132
4133 static const struct {
4134         void (*action)(struct bnx2x *bp);
4135         enum bnx2x_stats_state next_state;
4136 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4137 /* state        event   */
4138 {
4139 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4140 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
4141 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4142 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4143 },
4144 {
4145 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
4146 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
4147 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
4148 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
4149 }
4150 };
4151
4152 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4153 {
4154         enum bnx2x_stats_state state = bp->stats_state;
4155
4156         bnx2x_stats_stm[state][event].action(bp);
4157         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4158
4159         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4160                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4161                    state, event, bp->stats_state);
4162 }
4163
4164 static void bnx2x_timer(unsigned long data)
4165 {
4166         struct bnx2x *bp = (struct bnx2x *) data;
4167
4168         if (!netif_running(bp->dev))
4169                 return;
4170
4171         if (atomic_read(&bp->intr_sem) != 0)
4172                 goto timer_restart;
4173
4174         if (poll) {
4175                 struct bnx2x_fastpath *fp = &bp->fp[0];
4176                 int rc;
4177
4178                 bnx2x_tx_int(fp);
4179                 rc = bnx2x_rx_int(fp, 1000);
4180         }
4181
4182         if (!BP_NOMCP(bp)) {
4183                 int func = BP_FUNC(bp);
4184                 u32 drv_pulse;
4185                 u32 mcp_pulse;
4186
4187                 ++bp->fw_drv_pulse_wr_seq;
4188                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4189                 /* TBD - add SYSTEM_TIME */
4190                 drv_pulse = bp->fw_drv_pulse_wr_seq;
4191                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4192
4193                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4194                              MCP_PULSE_SEQ_MASK);
4195                 /* The delta between driver pulse and mcp response
4196                  * should be 1 (before mcp response) or 0 (after mcp response)
4197                  */
4198                 if ((drv_pulse != mcp_pulse) &&
4199                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4200                         /* someone lost a heartbeat... */
4201                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4202                                   drv_pulse, mcp_pulse);
4203                 }
4204         }
4205
4206         if ((bp->state == BNX2X_STATE_OPEN) ||
4207             (bp->state == BNX2X_STATE_DISABLED))
4208                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4209
4210 timer_restart:
4211         mod_timer(&bp->timer, jiffies + bp->current_interval);
4212 }
4213
4214 /* end of Statistics */
4215
4216 /* nic init */
4217
4218 /*
4219  * nic init service functions
4220  */
4221
4222 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4223 {
4224         int port = BP_PORT(bp);
4225
4226         bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
4227                         USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4228                         sizeof(struct ustorm_status_block)/4);
4229         bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
4230                         CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4231                         sizeof(struct cstorm_status_block)/4);
4232 }
4233
4234 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4235                           dma_addr_t mapping, int sb_id)
4236 {
4237         int port = BP_PORT(bp);
4238         int func = BP_FUNC(bp);
4239         int index;
4240         u64 section;
4241
4242         /* USTORM */
4243         section = ((u64)mapping) + offsetof(struct host_status_block,
4244                                             u_status_block);
4245         sb->u_status_block.status_block_id = sb_id;
4246
4247         REG_WR(bp, BAR_USTRORM_INTMEM +
4248                USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4249         REG_WR(bp, BAR_USTRORM_INTMEM +
4250                ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4251                U64_HI(section));
4252         REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4253                 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4254
4255         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4256                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4257                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4258
4259         /* CSTORM */
4260         section = ((u64)mapping) + offsetof(struct host_status_block,
4261                                             c_status_block);
4262         sb->c_status_block.status_block_id = sb_id;
4263
4264         REG_WR(bp, BAR_CSTRORM_INTMEM +
4265                CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4266         REG_WR(bp, BAR_CSTRORM_INTMEM +
4267                ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4268                U64_HI(section));
4269         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4270                 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4271
4272         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4273                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4274                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4275
4276         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4277 }
4278
4279 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4280 {
4281         int func = BP_FUNC(bp);
4282
4283         bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR +
4284                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4285                         sizeof(struct tstorm_def_status_block)/4);
4286         bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
4287                         USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4288                         sizeof(struct ustorm_def_status_block)/4);
4289         bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
4290                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4291                         sizeof(struct cstorm_def_status_block)/4);
4292         bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR +
4293                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4294                         sizeof(struct xstorm_def_status_block)/4);
4295 }
4296
4297 static void bnx2x_init_def_sb(struct bnx2x *bp,
4298                               struct host_def_status_block *def_sb,
4299                               dma_addr_t mapping, int sb_id)
4300 {
4301         int port = BP_PORT(bp);
4302         int func = BP_FUNC(bp);
4303         int index, val, reg_offset;
4304         u64 section;
4305
4306         /* ATTN */
4307         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4308                                             atten_status_block);
4309         def_sb->atten_status_block.status_block_id = sb_id;
4310
4311         bp->attn_state = 0;
4312
4313         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4314                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4315
4316         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4317                 bp->attn_group[index].sig[0] = REG_RD(bp,
4318                                                      reg_offset + 0x10*index);
4319                 bp->attn_group[index].sig[1] = REG_RD(bp,
4320                                                reg_offset + 0x4 + 0x10*index);
4321                 bp->attn_group[index].sig[2] = REG_RD(bp,
4322                                                reg_offset + 0x8 + 0x10*index);
4323                 bp->attn_group[index].sig[3] = REG_RD(bp,
4324                                                reg_offset + 0xc + 0x10*index);
4325         }
4326
4327         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4328                              HC_REG_ATTN_MSG0_ADDR_L);
4329
4330         REG_WR(bp, reg_offset, U64_LO(section));
4331         REG_WR(bp, reg_offset + 4, U64_HI(section));
4332
4333         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4334
4335         val = REG_RD(bp, reg_offset);
4336         val |= sb_id;
4337         REG_WR(bp, reg_offset, val);
4338
4339         /* USTORM */
4340         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4341                                             u_def_status_block);
4342         def_sb->u_def_status_block.status_block_id = sb_id;
4343
4344         REG_WR(bp, BAR_USTRORM_INTMEM +
4345                USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4346         REG_WR(bp, BAR_USTRORM_INTMEM +
4347                ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4348                U64_HI(section));
4349         REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4350                 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4351
4352         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4353                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4354                          USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4355
4356         /* CSTORM */
4357         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4358                                             c_def_status_block);
4359         def_sb->c_def_status_block.status_block_id = sb_id;
4360
4361         REG_WR(bp, BAR_CSTRORM_INTMEM +
4362                CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4363         REG_WR(bp, BAR_CSTRORM_INTMEM +
4364                ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4365                U64_HI(section));
4366         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4367                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4368
4369         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4370                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4371                          CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4372
4373         /* TSTORM */
4374         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4375                                             t_def_status_block);
4376         def_sb->t_def_status_block.status_block_id = sb_id;
4377
4378         REG_WR(bp, BAR_TSTRORM_INTMEM +
4379                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4380         REG_WR(bp, BAR_TSTRORM_INTMEM +
4381                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4382                U64_HI(section));
4383         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4384                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4385
4386         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4387                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4388                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4389
4390         /* XSTORM */
4391         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4392                                             x_def_status_block);
4393         def_sb->x_def_status_block.status_block_id = sb_id;
4394
4395         REG_WR(bp, BAR_XSTRORM_INTMEM +
4396                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4397         REG_WR(bp, BAR_XSTRORM_INTMEM +
4398                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4399                U64_HI(section));
4400         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4401                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4402
4403         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4404                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4405                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4406
4407         bp->stats_pending = 0;
4408         bp->set_mac_pending = 0;
4409
4410         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4411 }
4412
4413 static void bnx2x_update_coalesce(struct bnx2x *bp)
4414 {
4415         int port = BP_PORT(bp);
4416         int i;
4417
4418         for_each_queue(bp, i) {
4419                 int sb_id = bp->fp[i].sb_id;
4420
4421                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4422                 REG_WR8(bp, BAR_USTRORM_INTMEM +
4423                         USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4424                                                     U_SB_ETH_RX_CQ_INDEX),
4425                         bp->rx_ticks/12);
4426                 REG_WR16(bp, BAR_USTRORM_INTMEM +
4427                          USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4428                                                      U_SB_ETH_RX_CQ_INDEX),
4429                          bp->rx_ticks ? 0 : 1);
4430
4431                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4432                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4433                         CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4434                                                     C_SB_ETH_TX_CQ_INDEX),
4435                         bp->tx_ticks/12);
4436                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4437                          CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4438                                                      C_SB_ETH_TX_CQ_INDEX),
4439                          bp->tx_ticks ? 0 : 1);
4440         }
4441 }
4442
4443 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4444                                        struct bnx2x_fastpath *fp, int last)
4445 {
4446         int i;
4447
4448         for (i = 0; i < last; i++) {
4449                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4450                 struct sk_buff *skb = rx_buf->skb;
4451
4452                 if (skb == NULL) {
4453                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4454                         continue;
4455                 }
4456
4457                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4458                         pci_unmap_single(bp->pdev,
4459                                          pci_unmap_addr(rx_buf, mapping),
4460                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4461
4462                 dev_kfree_skb(skb);
4463                 rx_buf->skb = NULL;
4464         }
4465 }
4466
4467 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4468 {
4469         int func = BP_FUNC(bp);
4470         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4471                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4472         u16 ring_prod, cqe_ring_prod;
4473         int i, j;
4474
4475         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4476         DP(NETIF_MSG_IFUP,
4477            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4478
4479         if (bp->flags & TPA_ENABLE_FLAG) {
4480
4481                 for_each_rx_queue(bp, j) {
4482                         struct bnx2x_fastpath *fp = &bp->fp[j];
4483
4484                         for (i = 0; i < max_agg_queues; i++) {
4485                                 fp->tpa_pool[i].skb =
4486                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4487                                 if (!fp->tpa_pool[i].skb) {
4488                                         BNX2X_ERR("Failed to allocate TPA "
4489                                                   "skb pool for queue[%d] - "
4490                                                   "disabling TPA on this "
4491                                                   "queue!\n", j);
4492                                         bnx2x_free_tpa_pool(bp, fp, i);
4493                                         fp->disable_tpa = 1;
4494                                         break;
4495                                 }
4496                                 pci_unmap_addr_set((struct sw_rx_bd *)
4497                                                         &bp->fp->tpa_pool[i],
4498                                                    mapping, 0);
4499                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4500                         }
4501                 }
4502         }
4503
4504         for_each_rx_queue(bp, j) {
4505                 struct bnx2x_fastpath *fp = &bp->fp[j];
4506
4507                 fp->rx_bd_cons = 0;
4508                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4509                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4510
4511                 /* "next page" elements initialization */
4512                 /* SGE ring */
4513                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4514                         struct eth_rx_sge *sge;
4515
4516                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4517                         sge->addr_hi =
4518                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4519                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4520                         sge->addr_lo =
4521                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4522                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4523                 }
4524
4525                 bnx2x_init_sge_ring_bit_mask(fp);
4526
4527                 /* RX BD ring */
4528                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4529                         struct eth_rx_bd *rx_bd;
4530
4531                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4532                         rx_bd->addr_hi =
4533                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4534                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4535                         rx_bd->addr_lo =
4536                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4537                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4538                 }
4539
4540                 /* CQ ring */
4541                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4542                         struct eth_rx_cqe_next_page *nextpg;
4543
4544                         nextpg = (struct eth_rx_cqe_next_page *)
4545                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4546                         nextpg->addr_hi =
4547                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4548                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4549                         nextpg->addr_lo =
4550                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4551                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4552                 }
4553
4554                 /* Allocate SGEs and initialize the ring elements */
4555                 for (i = 0, ring_prod = 0;
4556                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4557
4558                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4559                                 BNX2X_ERR("was only able to allocate "
4560                                           "%d rx sges\n", i);
4561                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4562                                 /* Cleanup already allocated elements */
4563                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4564                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4565                                 fp->disable_tpa = 1;
4566                                 ring_prod = 0;
4567                                 break;
4568                         }
4569                         ring_prod = NEXT_SGE_IDX(ring_prod);
4570                 }
4571                 fp->rx_sge_prod = ring_prod;
4572
4573                 /* Allocate BDs and initialize BD ring */
4574                 fp->rx_comp_cons = 0;
4575                 cqe_ring_prod = ring_prod = 0;
4576                 for (i = 0; i < bp->rx_ring_size; i++) {
4577                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4578                                 BNX2X_ERR("was only able to allocate "
4579                                           "%d rx skbs on queue[%d]\n", i, j);
4580                                 fp->eth_q_stats.rx_skb_alloc_failed++;
4581                                 break;
4582                         }
4583                         ring_prod = NEXT_RX_IDX(ring_prod);
4584                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4585                         WARN_ON(ring_prod <= i);
4586                 }
4587
4588                 fp->rx_bd_prod = ring_prod;
4589                 /* must not have more available CQEs than BDs */
4590                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4591                                        cqe_ring_prod);
4592                 fp->rx_pkt = fp->rx_calls = 0;
4593
4594                 /* Warning!
4595                  * this will generate an interrupt (to the TSTORM)
4596                  * must only be done after chip is initialized
4597                  */
4598                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4599                                      fp->rx_sge_prod);
4600                 if (j != 0)
4601                         continue;
4602
4603                 REG_WR(bp, BAR_USTRORM_INTMEM +
4604                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4605                        U64_LO(fp->rx_comp_mapping));
4606                 REG_WR(bp, BAR_USTRORM_INTMEM +
4607                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4608                        U64_HI(fp->rx_comp_mapping));
4609         }
4610 }
4611
4612 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4613 {
4614         int i, j;
4615
4616         for_each_tx_queue(bp, j) {
4617                 struct bnx2x_fastpath *fp = &bp->fp[j];
4618
4619                 for (i = 1; i <= NUM_TX_RINGS; i++) {
4620                         struct eth_tx_bd *tx_bd =
4621                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4622
4623                         tx_bd->addr_hi =
4624                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4625                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4626                         tx_bd->addr_lo =
4627                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4628                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4629                 }
4630
4631                 fp->tx_pkt_prod = 0;
4632                 fp->tx_pkt_cons = 0;
4633                 fp->tx_bd_prod = 0;
4634                 fp->tx_bd_cons = 0;
4635                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4636                 fp->tx_pkt = 0;
4637         }
4638 }
4639
4640 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4641 {
4642         int func = BP_FUNC(bp);
4643
4644         spin_lock_init(&bp->spq_lock);
4645
4646         bp->spq_left = MAX_SPQ_PENDING;
4647         bp->spq_prod_idx = 0;
4648         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4649         bp->spq_prod_bd = bp->spq;
4650         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4651
4652         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4653                U64_LO(bp->spq_mapping));
4654         REG_WR(bp,
4655                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4656                U64_HI(bp->spq_mapping));
4657
4658         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4659                bp->spq_prod_idx);
4660 }
4661
4662 static void bnx2x_init_context(struct bnx2x *bp)
4663 {
4664         int i;
4665
4666         for_each_queue(bp, i) {
4667                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4668                 struct bnx2x_fastpath *fp = &bp->fp[i];
4669                 u8 cl_id = fp->cl_id;
4670                 u8 sb_id = fp->sb_id;
4671
4672                 context->ustorm_st_context.common.sb_index_numbers =
4673                                                 BNX2X_RX_SB_INDEX_NUM;
4674                 context->ustorm_st_context.common.clientId = cl_id;
4675                 context->ustorm_st_context.common.status_block_id = sb_id;
4676                 context->ustorm_st_context.common.flags =
4677                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4678                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4679                 context->ustorm_st_context.common.statistics_counter_id =
4680                                                 cl_id;
4681                 context->ustorm_st_context.common.mc_alignment_log_size =
4682                                                 BNX2X_RX_ALIGN_SHIFT;
4683                 context->ustorm_st_context.common.bd_buff_size =
4684                                                 bp->rx_buf_size;
4685                 context->ustorm_st_context.common.bd_page_base_hi =
4686                                                 U64_HI(fp->rx_desc_mapping);
4687                 context->ustorm_st_context.common.bd_page_base_lo =
4688                                                 U64_LO(fp->rx_desc_mapping);
4689                 if (!fp->disable_tpa) {
4690                         context->ustorm_st_context.common.flags |=
4691                                 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4692                                  USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4693                         context->ustorm_st_context.common.sge_buff_size =
4694                                 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4695                                          (u32)0xffff);
4696                         context->ustorm_st_context.common.sge_page_base_hi =
4697                                                 U64_HI(fp->rx_sge_mapping);
4698                         context->ustorm_st_context.common.sge_page_base_lo =
4699                                                 U64_LO(fp->rx_sge_mapping);
4700                 }
4701
4702                 context->ustorm_ag_context.cdu_usage =
4703                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4704                                                CDU_REGION_NUMBER_UCM_AG,
4705                                                ETH_CONNECTION_TYPE);
4706
4707                 context->xstorm_st_context.tx_bd_page_base_hi =
4708                                                 U64_HI(fp->tx_desc_mapping);
4709                 context->xstorm_st_context.tx_bd_page_base_lo =
4710                                                 U64_LO(fp->tx_desc_mapping);
4711                 context->xstorm_st_context.db_data_addr_hi =
4712                                                 U64_HI(fp->tx_prods_mapping);
4713                 context->xstorm_st_context.db_data_addr_lo =
4714                                                 U64_LO(fp->tx_prods_mapping);
4715                 context->xstorm_st_context.statistics_data = (cl_id |
4716                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4717                 context->cstorm_st_context.sb_index_number =
4718                                                 C_SB_ETH_TX_CQ_INDEX;
4719                 context->cstorm_st_context.status_block_id = sb_id;
4720
4721                 context->xstorm_ag_context.cdu_reserved =
4722                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4723                                                CDU_REGION_NUMBER_XCM_AG,
4724                                                ETH_CONNECTION_TYPE);
4725         }
4726 }
4727
4728 static void bnx2x_init_ind_table(struct bnx2x *bp)
4729 {
4730         int func = BP_FUNC(bp);
4731         int i;
4732
4733         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4734                 return;
4735
4736         DP(NETIF_MSG_IFUP,
4737            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
4738         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4739                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4740                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4741                         bp->fp->cl_id + (i % bp->num_rx_queues));
4742 }
4743
4744 static void bnx2x_set_client_config(struct bnx2x *bp)
4745 {
4746         struct tstorm_eth_client_config tstorm_client = {0};
4747         int port = BP_PORT(bp);
4748         int i;
4749
4750         tstorm_client.mtu = bp->dev->mtu;
4751         tstorm_client.config_flags =
4752                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4753                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
4754 #ifdef BCM_VLAN
4755         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4756                 tstorm_client.config_flags |=
4757                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
4758                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4759         }
4760 #endif
4761
4762         if (bp->flags & TPA_ENABLE_FLAG) {
4763                 tstorm_client.max_sges_for_packet =
4764                         SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4765                 tstorm_client.max_sges_for_packet =
4766                         ((tstorm_client.max_sges_for_packet +
4767                           PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4768                         PAGES_PER_SGE_SHIFT;
4769
4770                 tstorm_client.config_flags |=
4771                                 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4772         }
4773
4774         for_each_queue(bp, i) {
4775                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4776
4777                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4778                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4779                        ((u32 *)&tstorm_client)[0]);
4780                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4781                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4782                        ((u32 *)&tstorm_client)[1]);
4783         }
4784
4785         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4786            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4787 }
4788
4789 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4790 {
4791         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4792         int mode = bp->rx_mode;
4793         int mask = (1 << BP_L_ID(bp));
4794         int func = BP_FUNC(bp);
4795         int i;
4796
4797         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
4798
4799         switch (mode) {
4800         case BNX2X_RX_MODE_NONE: /* no Rx */
4801                 tstorm_mac_filter.ucast_drop_all = mask;
4802                 tstorm_mac_filter.mcast_drop_all = mask;
4803                 tstorm_mac_filter.bcast_drop_all = mask;
4804                 break;
4805
4806         case BNX2X_RX_MODE_NORMAL:
4807                 tstorm_mac_filter.bcast_accept_all = mask;
4808                 break;
4809
4810         case BNX2X_RX_MODE_ALLMULTI:
4811                 tstorm_mac_filter.mcast_accept_all = mask;
4812                 tstorm_mac_filter.bcast_accept_all = mask;
4813                 break;
4814
4815         case BNX2X_RX_MODE_PROMISC:
4816                 tstorm_mac_filter.ucast_accept_all = mask;
4817                 tstorm_mac_filter.mcast_accept_all = mask;
4818                 tstorm_mac_filter.bcast_accept_all = mask;
4819                 break;
4820
4821         default:
4822                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4823                 break;
4824         }
4825
4826         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4827                 REG_WR(bp, BAR_TSTRORM_INTMEM +
4828                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4829                        ((u32 *)&tstorm_mac_filter)[i]);
4830
4831 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4832                    ((u32 *)&tstorm_mac_filter)[i]); */
4833         }
4834
4835         if (mode != BNX2X_RX_MODE_NONE)
4836                 bnx2x_set_client_config(bp);
4837 }
4838
4839 static void bnx2x_init_internal_common(struct bnx2x *bp)
4840 {
4841         int i;
4842
4843         if (bp->flags & TPA_ENABLE_FLAG) {
4844                 struct tstorm_eth_tpa_exist tpa = {0};
4845
4846                 tpa.tpa_exist = 1;
4847
4848                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4849                        ((u32 *)&tpa)[0]);
4850                 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4851                        ((u32 *)&tpa)[1]);
4852         }
4853
4854         /* Zero this manually as its initialization is
4855            currently missing in the initTool */
4856         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4857                 REG_WR(bp, BAR_USTRORM_INTMEM +
4858                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
4859 }
4860
4861 static void bnx2x_init_internal_port(struct bnx2x *bp)
4862 {
4863         int port = BP_PORT(bp);
4864
4865         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4866         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4867         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4868         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4869 }
4870
4871 /* Calculates the sum of vn_min_rates.
4872    It's needed for further normalizing of the min_rates.
4873    Returns:
4874      sum of vn_min_rates.
4875        or
4876      0 - if all the min_rates are 0.
4877      In the later case fainess algorithm should be deactivated.
4878      If not all min_rates are zero then those that are zeroes will be set to 1.
4879  */
4880 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4881 {
4882         int all_zero = 1;
4883         int port = BP_PORT(bp);
4884         int vn;
4885
4886         bp->vn_weight_sum = 0;
4887         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4888                 int func = 2*vn + port;
4889                 u32 vn_cfg =
4890                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4891                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4892                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4893
4894                 /* Skip hidden vns */
4895                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4896                         continue;
4897
4898                 /* If min rate is zero - set it to 1 */
4899                 if (!vn_min_rate)
4900                         vn_min_rate = DEF_MIN_RATE;
4901                 else
4902                         all_zero = 0;
4903
4904                 bp->vn_weight_sum += vn_min_rate;
4905         }
4906
4907         /* ... only if all min rates are zeros - disable fairness */
4908         if (all_zero)
4909                 bp->vn_weight_sum = 0;
4910 }
4911
4912 static void bnx2x_init_internal_func(struct bnx2x *bp)
4913 {
4914         struct tstorm_eth_function_common_config tstorm_config = {0};
4915         struct stats_indication_flags stats_flags = {0};
4916         int port = BP_PORT(bp);
4917         int func = BP_FUNC(bp);
4918         int i, j;
4919         u32 offset;
4920         u16 max_agg_size;
4921
4922         if (is_multi(bp)) {
4923                 tstorm_config.config_flags = MULTI_FLAGS(bp);
4924                 tstorm_config.rss_result_mask = MULTI_MASK;
4925         }
4926         if (IS_E1HMF(bp))
4927                 tstorm_config.config_flags |=
4928                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
4929
4930         tstorm_config.leading_client_id = BP_L_ID(bp);
4931
4932         REG_WR(bp, BAR_TSTRORM_INTMEM +
4933                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4934                (*(u32 *)&tstorm_config));
4935
4936         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4937         bnx2x_set_storm_rx_mode(bp);
4938
4939         for_each_queue(bp, i) {
4940                 u8 cl_id = bp->fp[i].cl_id;
4941
4942                 /* reset xstorm per client statistics */
4943                 offset = BAR_XSTRORM_INTMEM +
4944                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4945                 for (j = 0;
4946                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4947                         REG_WR(bp, offset + j*4, 0);
4948
4949                 /* reset tstorm per client statistics */
4950                 offset = BAR_TSTRORM_INTMEM +
4951                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4952                 for (j = 0;
4953                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4954                         REG_WR(bp, offset + j*4, 0);
4955
4956                 /* reset ustorm per client statistics */
4957                 offset = BAR_USTRORM_INTMEM +
4958                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4959                 for (j = 0;
4960                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4961                         REG_WR(bp, offset + j*4, 0);
4962         }
4963
4964         /* Init statistics related context */
4965         stats_flags.collect_eth = 1;
4966
4967         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4968                ((u32 *)&stats_flags)[0]);
4969         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4970                ((u32 *)&stats_flags)[1]);
4971
4972         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4973                ((u32 *)&stats_flags)[0]);
4974         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4975                ((u32 *)&stats_flags)[1]);
4976
4977         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4978                ((u32 *)&stats_flags)[0]);
4979         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4980                ((u32 *)&stats_flags)[1]);
4981
4982         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4983                ((u32 *)&stats_flags)[0]);
4984         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4985                ((u32 *)&stats_flags)[1]);
4986
4987         REG_WR(bp, BAR_XSTRORM_INTMEM +
4988                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4989                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4990         REG_WR(bp, BAR_XSTRORM_INTMEM +
4991                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4992                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4993
4994         REG_WR(bp, BAR_TSTRORM_INTMEM +
4995                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4996                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4997         REG_WR(bp, BAR_TSTRORM_INTMEM +
4998                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4999                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5000
5001         REG_WR(bp, BAR_USTRORM_INTMEM +
5002                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5003                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5004         REG_WR(bp, BAR_USTRORM_INTMEM +
5005                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5006                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5007
5008         if (CHIP_IS_E1H(bp)) {
5009                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5010                         IS_E1HMF(bp));
5011                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5012                         IS_E1HMF(bp));
5013                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5014                         IS_E1HMF(bp));
5015                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5016                         IS_E1HMF(bp));
5017
5018                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5019                          bp->e1hov);
5020         }
5021
5022         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5023         max_agg_size =
5024                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5025                           SGE_PAGE_SIZE * PAGES_PER_SGE),
5026                     (u32)0xffff);
5027         for_each_rx_queue(bp, i) {
5028                 struct bnx2x_fastpath *fp = &bp->fp[i];
5029
5030                 REG_WR(bp, BAR_USTRORM_INTMEM +
5031                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5032                        U64_LO(fp->rx_comp_mapping));
5033                 REG_WR(bp, BAR_USTRORM_INTMEM +
5034                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5035                        U64_HI(fp->rx_comp_mapping));
5036
5037                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5038                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5039                          max_agg_size);
5040         }
5041
5042         /* dropless flow control */
5043         if (CHIP_IS_E1H(bp)) {
5044                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5045
5046                 rx_pause.bd_thr_low = 250;
5047                 rx_pause.cqe_thr_low = 250;
5048                 rx_pause.cos = 1;
5049                 rx_pause.sge_thr_low = 0;
5050                 rx_pause.bd_thr_high = 350;
5051                 rx_pause.cqe_thr_high = 350;
5052                 rx_pause.sge_thr_high = 0;
5053
5054                 for_each_rx_queue(bp, i) {
5055                         struct bnx2x_fastpath *fp = &bp->fp[i];
5056
5057                         if (!fp->disable_tpa) {
5058                                 rx_pause.sge_thr_low = 150;
5059                                 rx_pause.sge_thr_high = 250;
5060                         }
5061
5062
5063                         offset = BAR_USTRORM_INTMEM +
5064                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5065                                                                    fp->cl_id);
5066                         for (j = 0;
5067                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5068                              j++)
5069                                 REG_WR(bp, offset + j*4,
5070                                        ((u32 *)&rx_pause)[j]);
5071                 }
5072         }
5073
5074         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5075
5076         /* Init rate shaping and fairness contexts */
5077         if (IS_E1HMF(bp)) {
5078                 int vn;
5079
5080                 /* During init there is no active link
5081                    Until link is up, set link rate to 10Gbps */
5082                 bp->link_vars.line_speed = SPEED_10000;
5083                 bnx2x_init_port_minmax(bp);
5084
5085                 bnx2x_calc_vn_weight_sum(bp);
5086
5087                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5088                         bnx2x_init_vn_minmax(bp, 2*vn + port);
5089
5090                 /* Enable rate shaping and fairness */
5091                 bp->cmng.flags.cmng_enables =
5092                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5093                 if (bp->vn_weight_sum)
5094                         bp->cmng.flags.cmng_enables |=
5095                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5096                 else
5097                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5098                            "  fairness will be disabled\n");
5099         } else {
5100                 /* rate shaping and fairness are disabled */
5101                 DP(NETIF_MSG_IFUP,
5102                    "single function mode  minmax will be disabled\n");
5103         }
5104
5105
5106         /* Store it to internal memory */
5107         if (bp->port.pmf)
5108                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5109                         REG_WR(bp, BAR_XSTRORM_INTMEM +
5110                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5111                                ((u32 *)(&bp->cmng))[i]);
5112 }
5113
5114 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5115 {
5116         switch (load_code) {
5117         case FW_MSG_CODE_DRV_LOAD_COMMON:
5118                 bnx2x_init_internal_common(bp);
5119                 /* no break */
5120
5121         case FW_MSG_CODE_DRV_LOAD_PORT:
5122                 bnx2x_init_internal_port(bp);
5123                 /* no break */
5124
5125         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5126                 bnx2x_init_internal_func(bp);
5127                 break;
5128
5129         default:
5130                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5131                 break;
5132         }
5133 }
5134
5135 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5136 {
5137         int i;
5138
5139         for_each_queue(bp, i) {
5140                 struct bnx2x_fastpath *fp = &bp->fp[i];
5141
5142                 fp->bp = bp;
5143                 fp->state = BNX2X_FP_STATE_CLOSED;
5144                 fp->index = i;
5145                 fp->cl_id = BP_L_ID(bp) + i;
5146                 fp->sb_id = fp->cl_id;
5147                 DP(NETIF_MSG_IFUP,
5148                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
5149                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5150                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5151                               fp->sb_id);
5152                 bnx2x_update_fpsb_idx(fp);
5153         }
5154
5155         /* ensure status block indices were read */
5156         rmb();
5157
5158
5159         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5160                           DEF_SB_ID);
5161         bnx2x_update_dsb_idx(bp);
5162         bnx2x_update_coalesce(bp);
5163         bnx2x_init_rx_rings(bp);
5164         bnx2x_init_tx_ring(bp);
5165         bnx2x_init_sp_ring(bp);
5166         bnx2x_init_context(bp);
5167         bnx2x_init_internal(bp, load_code);
5168         bnx2x_init_ind_table(bp);
5169         bnx2x_stats_init(bp);
5170
5171         /* At this point, we are ready for interrupts */
5172         atomic_set(&bp->intr_sem, 0);
5173
5174         /* flush all before enabling interrupts */
5175         mb();
5176         mmiowb();
5177
5178         bnx2x_int_enable(bp);
5179 }
5180
5181 /* end of nic init */
5182
5183 /*
5184  * gzip service functions
5185  */
5186
5187 static int bnx2x_gunzip_init(struct bnx2x *bp)
5188 {
5189         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5190                                               &bp->gunzip_mapping);
5191         if (bp->gunzip_buf  == NULL)
5192                 goto gunzip_nomem1;
5193
5194         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5195         if (bp->strm  == NULL)
5196                 goto gunzip_nomem2;
5197
5198         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5199                                       GFP_KERNEL);
5200         if (bp->strm->workspace == NULL)
5201                 goto gunzip_nomem3;
5202
5203         return 0;
5204
5205 gunzip_nomem3:
5206         kfree(bp->strm);
5207         bp->strm = NULL;
5208
5209 gunzip_nomem2:
5210         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5211                             bp->gunzip_mapping);
5212         bp->gunzip_buf = NULL;
5213
5214 gunzip_nomem1:
5215         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5216                " un-compression\n", bp->dev->name);
5217         return -ENOMEM;
5218 }
5219
5220 static void bnx2x_gunzip_end(struct bnx2x *bp)
5221 {
5222         kfree(bp->strm->workspace);
5223
5224         kfree(bp->strm);
5225         bp->strm = NULL;
5226
5227         if (bp->gunzip_buf) {
5228                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5229                                     bp->gunzip_mapping);
5230                 bp->gunzip_buf = NULL;
5231         }
5232 }
5233
5234 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5235 {
5236         int n, rc;
5237
5238         /* check gzip header */
5239         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5240                 return -EINVAL;
5241
5242         n = 10;
5243
5244 #define FNAME                           0x8
5245
5246         if (zbuf[3] & FNAME)
5247                 while ((zbuf[n++] != 0) && (n < len));
5248
5249         bp->strm->next_in = zbuf + n;
5250         bp->strm->avail_in = len - n;
5251         bp->strm->next_out = bp->gunzip_buf;
5252         bp->strm->avail_out = FW_BUF_SIZE;
5253
5254         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5255         if (rc != Z_OK)
5256                 return rc;
5257
5258         rc = zlib_inflate(bp->strm, Z_FINISH);
5259         if ((rc != Z_OK) && (rc != Z_STREAM_END))
5260                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5261                        bp->dev->name, bp->strm->msg);
5262
5263         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5264         if (bp->gunzip_outlen & 0x3)
5265                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5266                                     " gunzip_outlen (%d) not aligned\n",
5267                        bp->dev->name, bp->gunzip_outlen);
5268         bp->gunzip_outlen >>= 2;
5269
5270         zlib_inflateEnd(bp->strm);
5271
5272         if (rc == Z_STREAM_END)
5273                 return 0;
5274
5275         return rc;
5276 }
5277
5278 /* nic load/unload */
5279
5280 /*
5281  * General service functions
5282  */
5283
5284 /* send a NIG loopback debug packet */
5285 static void bnx2x_lb_pckt(struct bnx2x *bp)
5286 {
5287         u32 wb_write[3];
5288
5289         /* Ethernet source and destination addresses */
5290         wb_write[0] = 0x55555555;
5291         wb_write[1] = 0x55555555;
5292         wb_write[2] = 0x20;             /* SOP */
5293         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5294
5295         /* NON-IP protocol */
5296         wb_write[0] = 0x09000000;
5297         wb_write[1] = 0x55555555;
5298         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
5299         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5300 }
5301
5302 /* some of the internal memories
5303  * are not directly readable from the driver
5304  * to test them we send debug packets
5305  */
5306 static int bnx2x_int_mem_test(struct bnx2x *bp)
5307 {
5308         int factor;
5309         int count, i;
5310         u32 val = 0;
5311
5312         if (CHIP_REV_IS_FPGA(bp))
5313                 factor = 120;
5314         else if (CHIP_REV_IS_EMUL(bp))
5315                 factor = 200;
5316         else
5317                 factor = 1;
5318
5319         DP(NETIF_MSG_HW, "start part1\n");
5320
5321         /* Disable inputs of parser neighbor blocks */
5322         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5323         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5324         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5325         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5326
5327         /*  Write 0 to parser credits for CFC search request */
5328         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5329
5330         /* send Ethernet packet */
5331         bnx2x_lb_pckt(bp);
5332
5333         /* TODO do i reset NIG statistic? */
5334         /* Wait until NIG register shows 1 packet of size 0x10 */
5335         count = 1000 * factor;
5336         while (count) {
5337
5338                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5339                 val = *bnx2x_sp(bp, wb_data[0]);
5340                 if (val == 0x10)
5341                         break;
5342
5343                 msleep(10);
5344                 count--;
5345         }
5346         if (val != 0x10) {
5347                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5348                 return -1;
5349         }
5350
5351         /* Wait until PRS register shows 1 packet */
5352         count = 1000 * factor;
5353         while (count) {
5354                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5355                 if (val == 1)
5356                         break;
5357
5358                 msleep(10);
5359                 count--;
5360         }
5361         if (val != 0x1) {
5362                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5363                 return -2;
5364         }
5365
5366         /* Reset and init BRB, PRS */
5367         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5368         msleep(50);
5369         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5370         msleep(50);
5371         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5372         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5373
5374         DP(NETIF_MSG_HW, "part2\n");
5375
5376         /* Disable inputs of parser neighbor blocks */
5377         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5378         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5379         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5380         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5381
5382         /* Write 0 to parser credits for CFC search request */
5383         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5384
5385         /* send 10 Ethernet packets */
5386         for (i = 0; i < 10; i++)
5387                 bnx2x_lb_pckt(bp);
5388
5389         /* Wait until NIG register shows 10 + 1
5390            packets of size 11*0x10 = 0xb0 */
5391         count = 1000 * factor;
5392         while (count) {
5393
5394                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5395                 val = *bnx2x_sp(bp, wb_data[0]);
5396                 if (val == 0xb0)
5397                         break;
5398
5399                 msleep(10);
5400                 count--;
5401         }
5402         if (val != 0xb0) {
5403                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5404                 return -3;
5405         }
5406
5407         /* Wait until PRS register shows 2 packets */
5408         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5409         if (val != 2)
5410                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5411
5412         /* Write 1 to parser credits for CFC search request */
5413         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5414
5415         /* Wait until PRS register shows 3 packets */
5416         msleep(10 * factor);
5417         /* Wait until NIG register shows 1 packet of size 0x10 */
5418         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5419         if (val != 3)
5420                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5421
5422         /* clear NIG EOP FIFO */
5423         for (i = 0; i < 11; i++)
5424                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5425         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5426         if (val != 1) {
5427                 BNX2X_ERR("clear of NIG failed\n");
5428                 return -4;
5429         }
5430
5431         /* Reset and init BRB, PRS, NIG */
5432         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5433         msleep(50);
5434         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5435         msleep(50);
5436         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5437         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5438 #ifndef BCM_ISCSI
5439         /* set NIC mode */
5440         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5441 #endif
5442
5443         /* Enable inputs of parser neighbor blocks */
5444         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5445         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5446         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5447         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5448
5449         DP(NETIF_MSG_HW, "done\n");
5450
5451         return 0; /* OK */
5452 }
5453
5454 static void enable_blocks_attention(struct bnx2x *bp)
5455 {
5456         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5457         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5458         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5459         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5460         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5461         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5462         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5463         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5464         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5465 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5466 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5467         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5468         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5469         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5470 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5471 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5472         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5473         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5474         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5475         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5476 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5477 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5478         if (CHIP_REV_IS_FPGA(bp))
5479                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5480         else
5481                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5482         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5483         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5484         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5485 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5486 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5487         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5488         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5489 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5490         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5491 }
5492
5493
5494 static void bnx2x_reset_common(struct bnx2x *bp)
5495 {
5496         /* reset_common */
5497         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5498                0xd3ffff7f);
5499         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5500 }
5501
5502 static int bnx2x_init_common(struct bnx2x *bp)
5503 {
5504         u32 val, i;
5505
5506         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5507
5508         bnx2x_reset_common(bp);
5509         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5510         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5511
5512         bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5513         if (CHIP_IS_E1H(bp))
5514                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5515
5516         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5517         msleep(30);
5518         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5519
5520         bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5521         if (CHIP_IS_E1(bp)) {
5522                 /* enable HW interrupt from PXP on USDM overflow
5523                    bit 16 on INT_MASK_0 */
5524                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5525         }
5526
5527         bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5528         bnx2x_init_pxp(bp);
5529
5530 #ifdef __BIG_ENDIAN
5531         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5532         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5533         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5534         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5535         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5536         /* make sure this value is 0 */
5537         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5538
5539 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5540         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5541         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5542         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5543         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5544 #endif
5545
5546         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5547 #ifdef BCM_ISCSI
5548         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5549         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5550         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5551 #endif
5552
5553         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5554                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5555
5556         /* let the HW do it's magic ... */
5557         msleep(100);
5558         /* finish PXP init */
5559         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5560         if (val != 1) {
5561                 BNX2X_ERR("PXP2 CFG failed\n");
5562                 return -EBUSY;
5563         }
5564         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5565         if (val != 1) {
5566                 BNX2X_ERR("PXP2 RD_INIT failed\n");
5567                 return -EBUSY;
5568         }
5569
5570         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5571         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5572
5573         bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5574
5575         /* clean the DMAE memory */
5576         bp->dmae_ready = 1;
5577         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5578
5579         bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5580         bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5581         bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5582         bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5583
5584         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5585         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5586         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5587         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5588
5589         bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5590         /* soft reset pulse */
5591         REG_WR(bp, QM_REG_SOFT_RESET, 1);
5592         REG_WR(bp, QM_REG_SOFT_RESET, 0);
5593
5594 #ifdef BCM_ISCSI
5595         bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5596 #endif
5597
5598         bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5599         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5600         if (!CHIP_REV_IS_SLOW(bp)) {
5601                 /* enable hw interrupt from doorbell Q */
5602                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5603         }
5604
5605         bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5606         bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5607         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5608         /* set NIC mode */
5609         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5610         if (CHIP_IS_E1H(bp))
5611                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5612
5613         bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5614         bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5615         bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5616         bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5617
5618         bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5619         bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5620         bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5621         bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5622
5623         bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5624         bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5625         bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5626         bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5627
5628         /* sync semi rtc */
5629         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5630                0x80000000);
5631         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5632                0x80000000);
5633
5634         bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5635         bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5636         bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5637
5638         REG_WR(bp, SRC_REG_SOFT_RST, 1);
5639         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5640                 REG_WR(bp, i, 0xc0cac01a);
5641                 /* TODO: replace with something meaningful */
5642         }
5643         bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5644         REG_WR(bp, SRC_REG_SOFT_RST, 0);
5645
5646         if (sizeof(union cdu_context) != 1024)
5647                 /* we currently assume that a context is 1024 bytes */
5648                 printk(KERN_ALERT PFX "please adjust the size of"
5649                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5650
5651         bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5652         val = (4 << 24) + (0 << 12) + 1024;
5653         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5654         if (CHIP_IS_E1(bp)) {
5655                 /* !!! fix pxp client crdit until excel update */
5656                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5657                 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5658         }
5659
5660         bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5661         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5662         /* enable context validation interrupt from CFC */
5663         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5664
5665         /* set the thresholds to prevent CFC/CDU race */
5666         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5667
5668         bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5669         bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5670
5671         /* PXPCS COMMON comes here */
5672         /* Reset PCIE errors for debug */
5673         REG_WR(bp, 0x2814, 0xffffffff);
5674         REG_WR(bp, 0x3820, 0xffffffff);
5675
5676         /* EMAC0 COMMON comes here */
5677         /* EMAC1 COMMON comes here */
5678         /* DBU COMMON comes here */
5679         /* DBG COMMON comes here */
5680
5681         bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5682         if (CHIP_IS_E1H(bp)) {
5683                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5684                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5685         }
5686
5687         if (CHIP_REV_IS_SLOW(bp))
5688                 msleep(200);
5689
5690         /* finish CFC init */
5691         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5692         if (val != 1) {
5693                 BNX2X_ERR("CFC LL_INIT failed\n");
5694                 return -EBUSY;
5695         }
5696         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5697         if (val != 1) {
5698                 BNX2X_ERR("CFC AC_INIT failed\n");
5699                 return -EBUSY;
5700         }
5701         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5702         if (val != 1) {
5703                 BNX2X_ERR("CFC CAM_INIT failed\n");
5704                 return -EBUSY;
5705         }
5706         REG_WR(bp, CFC_REG_DEBUG0, 0);
5707
5708         /* read NIG statistic
5709            to see if this is our first up since powerup */
5710         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5711         val = *bnx2x_sp(bp, wb_data[0]);
5712
5713         /* do internal memory self test */
5714         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5715                 BNX2X_ERR("internal mem self test failed\n");
5716                 return -EBUSY;
5717         }
5718
5719         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5720         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5721         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5722         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5723                 bp->port.need_hw_lock = 1;
5724                 break;
5725
5726         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5727                 /* Fan failure is indicated by SPIO 5 */
5728                 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5729                                MISC_REGISTERS_SPIO_INPUT_HI_Z);
5730
5731                 /* set to active low mode */
5732                 val = REG_RD(bp, MISC_REG_SPIO_INT);
5733                 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5734                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5735                 REG_WR(bp, MISC_REG_SPIO_INT, val);
5736
5737                 /* enable interrupt to signal the IGU */
5738                 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5739                 val |= (1 << MISC_REGISTERS_SPIO_5);
5740                 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5741                 break;
5742
5743         default:
5744                 break;
5745         }
5746
5747         /* clear PXP2 attentions */
5748         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5749
5750         enable_blocks_attention(bp);
5751
5752         if (!BP_NOMCP(bp)) {
5753                 bnx2x_acquire_phy_lock(bp);
5754                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5755                 bnx2x_release_phy_lock(bp);
5756         } else
5757                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5758
5759         return 0;
5760 }
5761
5762 static int bnx2x_init_port(struct bnx2x *bp)
5763 {
5764         int port = BP_PORT(bp);
5765         u32 low, high;
5766         u32 val;
5767
5768         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
5769
5770         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5771
5772         /* Port PXP comes here */
5773         /* Port PXP2 comes here */
5774 #ifdef BCM_ISCSI
5775         /* Port0  1
5776          * Port1  385 */
5777         i++;
5778         wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5779         wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5780         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5781         REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5782
5783         /* Port0  2
5784          * Port1  386 */
5785         i++;
5786         wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5787         wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5788         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5789         REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5790
5791         /* Port0  3
5792          * Port1  387 */
5793         i++;
5794         wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5795         wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5796         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5797         REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5798 #endif
5799         /* Port CMs come here */
5800         bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
5801                              (port ? XCM_PORT1_END : XCM_PORT0_END));
5802
5803         /* Port QM comes here */
5804 #ifdef BCM_ISCSI
5805         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5806         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5807
5808         bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5809                              func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5810 #endif
5811         /* Port DQ comes here */
5812
5813         bnx2x_init_block(bp, (port ? BRB1_PORT1_START : BRB1_PORT0_START),
5814                              (port ? BRB1_PORT1_END : BRB1_PORT0_END));
5815         if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5816                 /* no pause for emulation and FPGA */
5817                 low = 0;
5818                 high = 513;
5819         } else {
5820                 if (IS_E1HMF(bp))
5821                         low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5822                 else if (bp->dev->mtu > 4096) {
5823                         if (bp->flags & ONE_PORT_FLAG)
5824                                 low = 160;
5825                         else {
5826                                 val = bp->dev->mtu;
5827                                 /* (24*1024 + val*4)/256 */
5828                                 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5829                         }
5830                 } else
5831                         low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5832                 high = low + 56;        /* 14*1024/256 */
5833         }
5834         REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5835         REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5836
5837
5838         /* Port PRS comes here */
5839         /* Port TSDM comes here */
5840         /* Port CSDM comes here */
5841         /* Port USDM comes here */
5842         /* Port XSDM comes here */
5843
5844         bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5845                              port ? TSEM_PORT1_END : TSEM_PORT0_END);
5846         bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5847                              port ? USEM_PORT1_END : USEM_PORT0_END);
5848         bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5849                              port ? CSEM_PORT1_END : CSEM_PORT0_END);
5850         bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5851                              port ? XSEM_PORT1_END : XSEM_PORT0_END);
5852
5853         /* Port UPB comes here */
5854         /* Port XPB comes here */
5855
5856         bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5857                              port ? PBF_PORT1_END : PBF_PORT0_END);
5858
5859         /* configure PBF to work without PAUSE mtu 9000 */
5860         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5861
5862         /* update threshold */
5863         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5864         /* update init credit */
5865         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5866
5867         /* probe changes */
5868         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5869         msleep(5);
5870         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5871
5872 #ifdef BCM_ISCSI
5873         /* tell the searcher where the T2 table is */
5874         REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5875
5876         wb_write[0] = U64_LO(bp->t2_mapping);
5877         wb_write[1] = U64_HI(bp->t2_mapping);
5878         REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5879         wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5880         wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5881         REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5882
5883         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5884         /* Port SRCH comes here */
5885 #endif
5886         /* Port CDU comes here */
5887         /* Port CFC comes here */
5888
5889         if (CHIP_IS_E1(bp)) {
5890                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5891                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5892         }
5893         bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5894                              port ? HC_PORT1_END : HC_PORT0_END);
5895
5896         bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5897                                     MISC_AEU_PORT0_START,
5898                              port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5899         /* init aeu_mask_attn_func_0/1:
5900          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5901          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5902          *             bits 4-7 are used for "per vn group attention" */
5903         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5904                (IS_E1HMF(bp) ? 0xF7 : 0x7));
5905
5906         /* Port PXPCS comes here */
5907         /* Port EMAC0 comes here */
5908         /* Port EMAC1 comes here */
5909         /* Port DBU comes here */
5910         /* Port DBG comes here */
5911
5912         bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5913                              port ? NIG_PORT1_END : NIG_PORT0_END);
5914
5915         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5916
5917         if (CHIP_IS_E1H(bp)) {
5918                 /* 0x2 disable e1hov, 0x1 enable */
5919                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5920                        (IS_E1HMF(bp) ? 0x1 : 0x2));
5921
5922                 /* support pause requests from USDM, TSDM and BRB */
5923                 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
5924
5925                 {
5926                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5927                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5928                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5929                 }
5930         }
5931
5932         /* Port MCP comes here */
5933         /* Port DMAE comes here */
5934
5935         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5936         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5937                 {
5938                 u32 swap_val, swap_override, aeu_gpio_mask, offset;
5939
5940                 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5941                                MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
5942
5943                 /* The GPIO should be swapped if the swap register is
5944                    set and active */
5945                 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5946                 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5947
5948                 /* Select function upon port-swap configuration */
5949                 if (port == 0) {
5950                         offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
5951                         aeu_gpio_mask = (swap_val && swap_override) ?
5952                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
5953                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
5954                 } else {
5955                         offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
5956                         aeu_gpio_mask = (swap_val && swap_override) ?
5957                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
5958                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
5959                 }
5960                 val = REG_RD(bp, offset);
5961                 /* add GPIO3 to group */
5962                 val |= aeu_gpio_mask;
5963                 REG_WR(bp, offset, val);
5964                 }
5965                 break;
5966
5967         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5968                 /* add SPIO 5 to group 0 */
5969                 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5970                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5971                 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5972                 break;
5973
5974         default:
5975                 break;
5976         }
5977
5978         bnx2x__link_reset(bp);
5979
5980         return 0;
5981 }
5982
5983 #define ILT_PER_FUNC            (768/2)
5984 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
5985 /* the phys address is shifted right 12 bits and has an added
5986    1=valid bit added to the 53rd bit
5987    then since this is a wide register(TM)
5988    we split it into two 32 bit writes
5989  */
5990 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5991 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
5992 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
5993 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
5994
5995 #define CNIC_ILT_LINES          0
5996
5997 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5998 {
5999         int reg;
6000
6001         if (CHIP_IS_E1H(bp))
6002                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6003         else /* E1 */
6004                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6005
6006         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6007 }
6008
6009 static int bnx2x_init_func(struct bnx2x *bp)
6010 {
6011         int port = BP_PORT(bp);
6012         int func = BP_FUNC(bp);
6013         u32 addr, val;
6014         int i;
6015
6016         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
6017
6018         /* set MSI reconfigure capability */
6019         addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6020         val = REG_RD(bp, addr);
6021         val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6022         REG_WR(bp, addr, val);
6023
6024         i = FUNC_ILT_BASE(func);
6025
6026         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6027         if (CHIP_IS_E1H(bp)) {
6028                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6029                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6030         } else /* E1 */
6031                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6032                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6033
6034
6035         if (CHIP_IS_E1H(bp)) {
6036                 for (i = 0; i < 9; i++)
6037                         bnx2x_init_block(bp,
6038                                          cm_start[func][i], cm_end[func][i]);
6039
6040                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6041                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6042         }
6043
6044         /* HC init per function */
6045         if (CHIP_IS_E1H(bp)) {
6046                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6047
6048                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6049                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6050         }
6051         bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
6052
6053         /* Reset PCIE errors for debug */
6054         REG_WR(bp, 0x2114, 0xffffffff);
6055         REG_WR(bp, 0x2120, 0xffffffff);
6056
6057         return 0;
6058 }
6059
6060 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6061 {
6062         int i, rc = 0;
6063
6064         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
6065            BP_FUNC(bp), load_code);
6066
6067         bp->dmae_ready = 0;
6068         mutex_init(&bp->dmae_mutex);
6069         bnx2x_gunzip_init(bp);
6070
6071         switch (load_code) {
6072         case FW_MSG_CODE_DRV_LOAD_COMMON:
6073                 rc = bnx2x_init_common(bp);
6074                 if (rc)
6075                         goto init_hw_err;
6076                 /* no break */
6077
6078         case FW_MSG_CODE_DRV_LOAD_PORT:
6079                 bp->dmae_ready = 1;
6080                 rc = bnx2x_init_port(bp);
6081                 if (rc)
6082                         goto init_hw_err;
6083                 /* no break */
6084
6085         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6086                 bp->dmae_ready = 1;
6087                 rc = bnx2x_init_func(bp);
6088                 if (rc)
6089                         goto init_hw_err;
6090                 break;
6091
6092         default:
6093                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6094                 break;
6095         }
6096
6097         if (!BP_NOMCP(bp)) {
6098                 int func = BP_FUNC(bp);
6099
6100                 bp->fw_drv_pulse_wr_seq =
6101                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6102                                  DRV_PULSE_SEQ_MASK);
6103                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6104                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x  func_stx 0x%x\n",
6105                    bp->fw_drv_pulse_wr_seq, bp->func_stx);
6106         } else
6107                 bp->func_stx = 0;
6108
6109         /* this needs to be done before gunzip end */
6110         bnx2x_zero_def_sb(bp);
6111         for_each_queue(bp, i)
6112                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6113
6114 init_hw_err:
6115         bnx2x_gunzip_end(bp);
6116
6117         return rc;
6118 }
6119
6120 /* send the MCP a request, block until there is a reply */
6121 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6122 {
6123         int func = BP_FUNC(bp);
6124         u32 seq = ++bp->fw_seq;
6125         u32 rc = 0;
6126         u32 cnt = 1;
6127         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
6128
6129         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
6130         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
6131
6132         do {
6133                 /* let the FW do it's magic ... */
6134                 msleep(delay);
6135
6136                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
6137
6138                 /* Give the FW up to 2 second (200*10ms) */
6139         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6140
6141         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6142            cnt*delay, rc, seq);
6143
6144         /* is this a reply to our command? */
6145         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6146                 rc &= FW_MSG_CODE_MASK;
6147
6148         } else {
6149                 /* FW BUG! */
6150                 BNX2X_ERR("FW failed to respond!\n");
6151                 bnx2x_fw_dump(bp);
6152                 rc = 0;
6153         }
6154
6155         return rc;
6156 }
6157
6158 static void bnx2x_free_mem(struct bnx2x *bp)
6159 {
6160
6161 #define BNX2X_PCI_FREE(x, y, size) \
6162         do { \
6163                 if (x) { \
6164                         pci_free_consistent(bp->pdev, size, x, y); \
6165                         x = NULL; \
6166                         y = 0; \
6167                 } \
6168         } while (0)
6169
6170 #define BNX2X_FREE(x) \
6171         do { \
6172                 if (x) { \
6173                         vfree(x); \
6174                         x = NULL; \
6175                 } \
6176         } while (0)
6177
6178         int i;
6179
6180         /* fastpath */
6181         /* Common */
6182         for_each_queue(bp, i) {
6183
6184                 /* status blocks */
6185                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6186                                bnx2x_fp(bp, i, status_blk_mapping),
6187                                sizeof(struct host_status_block) +
6188                                sizeof(struct eth_tx_db_data));
6189         }
6190         /* Rx */
6191         for_each_rx_queue(bp, i) {
6192
6193                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6194                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6195                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6196                                bnx2x_fp(bp, i, rx_desc_mapping),
6197                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
6198
6199                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6200                                bnx2x_fp(bp, i, rx_comp_mapping),
6201                                sizeof(struct eth_fast_path_rx_cqe) *
6202                                NUM_RCQ_BD);
6203
6204                 /* SGE ring */
6205                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6206                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6207                                bnx2x_fp(bp, i, rx_sge_mapping),
6208                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6209         }
6210         /* Tx */
6211         for_each_tx_queue(bp, i) {
6212
6213                 /* fastpath tx rings: tx_buf tx_desc */
6214                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6215                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6216                                bnx2x_fp(bp, i, tx_desc_mapping),
6217                                sizeof(struct eth_tx_bd) * NUM_TX_BD);
6218         }
6219         /* end of fastpath */
6220
6221         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6222                        sizeof(struct host_def_status_block));
6223
6224         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6225                        sizeof(struct bnx2x_slowpath));
6226
6227 #ifdef BCM_ISCSI
6228         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6229         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6230         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6231         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6232 #endif
6233         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6234
6235 #undef BNX2X_PCI_FREE
6236 #undef BNX2X_KFREE
6237 }
6238
6239 static int bnx2x_alloc_mem(struct bnx2x *bp)
6240 {
6241
6242 #define BNX2X_PCI_ALLOC(x, y, size) \
6243         do { \
6244                 x = pci_alloc_consistent(bp->pdev, size, y); \
6245                 if (x == NULL) \
6246                         goto alloc_mem_err; \
6247                 memset(x, 0, size); \
6248         } while (0)
6249
6250 #define BNX2X_ALLOC(x, size) \
6251         do { \
6252                 x = vmalloc(size); \
6253                 if (x == NULL) \
6254                         goto alloc_mem_err; \
6255                 memset(x, 0, size); \
6256         } while (0)
6257
6258         int i;
6259
6260         /* fastpath */
6261         /* Common */
6262         for_each_queue(bp, i) {
6263                 bnx2x_fp(bp, i, bp) = bp;
6264
6265                 /* status blocks */
6266                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6267                                 &bnx2x_fp(bp, i, status_blk_mapping),
6268                                 sizeof(struct host_status_block) +
6269                                 sizeof(struct eth_tx_db_data));
6270         }
6271         /* Rx */
6272         for_each_rx_queue(bp, i) {
6273
6274                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6275                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6276                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6277                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6278                                 &bnx2x_fp(bp, i, rx_desc_mapping),
6279                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6280
6281                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6282                                 &bnx2x_fp(bp, i, rx_comp_mapping),
6283                                 sizeof(struct eth_fast_path_rx_cqe) *
6284                                 NUM_RCQ_BD);
6285
6286                 /* SGE ring */
6287                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6288                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6289                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6290                                 &bnx2x_fp(bp, i, rx_sge_mapping),
6291                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6292         }
6293         /* Tx */
6294         for_each_tx_queue(bp, i) {
6295
6296                 bnx2x_fp(bp, i, hw_tx_prods) =
6297                                 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6298
6299                 bnx2x_fp(bp, i, tx_prods_mapping) =
6300                                 bnx2x_fp(bp, i, status_blk_mapping) +
6301                                 sizeof(struct host_status_block);
6302
6303                 /* fastpath tx rings: tx_buf tx_desc */
6304                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6305                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6306                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6307                                 &bnx2x_fp(bp, i, tx_desc_mapping),
6308                                 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6309         }
6310         /* end of fastpath */
6311
6312         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6313                         sizeof(struct host_def_status_block));
6314
6315         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6316                         sizeof(struct bnx2x_slowpath));
6317
6318 #ifdef BCM_ISCSI
6319         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6320
6321         /* Initialize T1 */
6322         for (i = 0; i < 64*1024; i += 64) {
6323                 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6324                 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6325         }
6326
6327         /* allocate searcher T2 table
6328            we allocate 1/4 of alloc num for T2
6329           (which is not entered into the ILT) */
6330         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6331
6332         /* Initialize T2 */
6333         for (i = 0; i < 16*1024; i += 64)
6334                 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6335
6336         /* now fixup the last line in the block to point to the next block */
6337         *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6338
6339         /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6340         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6341
6342         /* QM queues (128*MAX_CONN) */
6343         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6344 #endif
6345
6346         /* Slow path ring */
6347         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6348
6349         return 0;
6350
6351 alloc_mem_err:
6352         bnx2x_free_mem(bp);
6353         return -ENOMEM;
6354
6355 #undef BNX2X_PCI_ALLOC
6356 #undef BNX2X_ALLOC
6357 }
6358
6359 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6360 {
6361         int i;
6362
6363         for_each_tx_queue(bp, i) {
6364                 struct bnx2x_fastpath *fp = &bp->fp[i];
6365
6366                 u16 bd_cons = fp->tx_bd_cons;
6367                 u16 sw_prod = fp->tx_pkt_prod;
6368                 u16 sw_cons = fp->tx_pkt_cons;
6369
6370                 while (sw_cons != sw_prod) {
6371                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6372                         sw_cons++;
6373                 }
6374         }
6375 }
6376
6377 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6378 {
6379         int i, j;
6380
6381         for_each_rx_queue(bp, j) {
6382                 struct bnx2x_fastpath *fp = &bp->fp[j];
6383
6384                 for (i = 0; i < NUM_RX_BD; i++) {
6385                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6386                         struct sk_buff *skb = rx_buf->skb;
6387
6388                         if (skb == NULL)
6389                                 continue;
6390
6391                         pci_unmap_single(bp->pdev,
6392                                          pci_unmap_addr(rx_buf, mapping),
6393                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6394
6395                         rx_buf->skb = NULL;
6396                         dev_kfree_skb(skb);
6397                 }
6398                 if (!fp->disable_tpa)
6399                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6400                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
6401                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
6402         }
6403 }
6404
6405 static void bnx2x_free_skbs(struct bnx2x *bp)
6406 {
6407         bnx2x_free_tx_skbs(bp);
6408         bnx2x_free_rx_skbs(bp);
6409 }
6410
6411 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6412 {
6413         int i, offset = 1;
6414
6415         free_irq(bp->msix_table[0].vector, bp->dev);
6416         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6417            bp->msix_table[0].vector);
6418
6419         for_each_queue(bp, i) {
6420                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
6421                    "state %x\n", i, bp->msix_table[i + offset].vector,
6422                    bnx2x_fp(bp, i, state));
6423
6424                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6425         }
6426 }
6427
6428 static void bnx2x_free_irq(struct bnx2x *bp)
6429 {
6430         if (bp->flags & USING_MSIX_FLAG) {
6431                 bnx2x_free_msix_irqs(bp);
6432                 pci_disable_msix(bp->pdev);
6433                 bp->flags &= ~USING_MSIX_FLAG;
6434
6435         } else if (bp->flags & USING_MSI_FLAG) {
6436                 free_irq(bp->pdev->irq, bp->dev);
6437                 pci_disable_msi(bp->pdev);
6438                 bp->flags &= ~USING_MSI_FLAG;
6439
6440         } else
6441                 free_irq(bp->pdev->irq, bp->dev);
6442 }
6443
6444 static int bnx2x_enable_msix(struct bnx2x *bp)
6445 {
6446         int i, rc, offset = 1;
6447         int igu_vec = 0;
6448
6449         bp->msix_table[0].entry = igu_vec;
6450         DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6451
6452         for_each_queue(bp, i) {
6453                 igu_vec = BP_L_ID(bp) + offset + i;
6454                 bp->msix_table[i + offset].entry = igu_vec;
6455                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6456                    "(fastpath #%u)\n", i + offset, igu_vec, i);
6457         }
6458
6459         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6460                              BNX2X_NUM_QUEUES(bp) + offset);
6461         if (rc) {
6462                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
6463                 return rc;
6464         }
6465
6466         bp->flags |= USING_MSIX_FLAG;
6467
6468         return 0;
6469 }
6470
6471 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6472 {
6473         int i, rc, offset = 1;
6474
6475         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6476                          bp->dev->name, bp->dev);
6477         if (rc) {
6478                 BNX2X_ERR("request sp irq failed\n");
6479                 return -EBUSY;
6480         }
6481
6482         for_each_queue(bp, i) {
6483                 struct bnx2x_fastpath *fp = &bp->fp[i];
6484
6485                 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
6486                 rc = request_irq(bp->msix_table[i + offset].vector,
6487                                  bnx2x_msix_fp_int, 0, fp->name, fp);
6488                 if (rc) {
6489                         BNX2X_ERR("request fp #%d irq failed  rc %d\n", i, rc);
6490                         bnx2x_free_msix_irqs(bp);
6491                         return -EBUSY;
6492                 }
6493
6494                 fp->state = BNX2X_FP_STATE_IRQ;
6495         }
6496
6497         i = BNX2X_NUM_QUEUES(bp);
6498         if (is_multi(bp))
6499                 printk(KERN_INFO PFX
6500                        "%s: using MSI-X  IRQs: sp %d  fp %d - %d\n",
6501                        bp->dev->name, bp->msix_table[0].vector,
6502                        bp->msix_table[offset].vector,
6503                        bp->msix_table[offset + i - 1].vector);
6504         else
6505                 printk(KERN_INFO PFX "%s: using MSI-X  IRQs: sp %d  fp %d\n",
6506                        bp->dev->name, bp->msix_table[0].vector,
6507                        bp->msix_table[offset + i - 1].vector);
6508
6509         return 0;
6510 }
6511
6512 static int bnx2x_enable_msi(struct bnx2x *bp)
6513 {
6514         int rc;
6515
6516         rc = pci_enable_msi(bp->pdev);
6517         if (rc) {
6518                 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6519                 return -1;
6520         }
6521         bp->flags |= USING_MSI_FLAG;
6522
6523         return 0;
6524 }
6525
6526 static int bnx2x_req_irq(struct bnx2x *bp)
6527 {
6528         unsigned long flags;
6529         int rc;
6530
6531         if (bp->flags & USING_MSI_FLAG)
6532                 flags = 0;
6533         else
6534                 flags = IRQF_SHARED;
6535
6536         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
6537                          bp->dev->name, bp->dev);
6538         if (!rc)
6539                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6540
6541         return rc;
6542 }
6543
6544 static void bnx2x_napi_enable(struct bnx2x *bp)
6545 {
6546         int i;
6547
6548         for_each_rx_queue(bp, i)
6549                 napi_enable(&bnx2x_fp(bp, i, napi));
6550 }
6551
6552 static void bnx2x_napi_disable(struct bnx2x *bp)
6553 {
6554         int i;
6555
6556         for_each_rx_queue(bp, i)
6557                 napi_disable(&bnx2x_fp(bp, i, napi));
6558 }
6559
6560 static void bnx2x_netif_start(struct bnx2x *bp)
6561 {
6562         if (atomic_dec_and_test(&bp->intr_sem)) {
6563                 if (netif_running(bp->dev)) {
6564                         bnx2x_napi_enable(bp);
6565                         bnx2x_int_enable(bp);
6566                         if (bp->state == BNX2X_STATE_OPEN)
6567                                 netif_tx_wake_all_queues(bp->dev);
6568                 }
6569         }
6570 }
6571
6572 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6573 {
6574         bnx2x_int_disable_sync(bp, disable_hw);
6575         bnx2x_napi_disable(bp);
6576         netif_tx_disable(bp->dev);
6577         bp->dev->trans_start = jiffies; /* prevent tx timeout */
6578 }
6579
6580 /*
6581  * Init service functions
6582  */
6583
6584 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6585 {
6586         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6587         int port = BP_PORT(bp);
6588
6589         /* CAM allocation
6590          * unicasts 0-31:port0 32-63:port1
6591          * multicast 64-127:port0 128-191:port1
6592          */
6593         config->hdr.length = 2;
6594         config->hdr.offset = port ? 32 : 0;
6595         config->hdr.client_id = bp->fp->cl_id;
6596         config->hdr.reserved1 = 0;
6597
6598         /* primary MAC */
6599         config->config_table[0].cam_entry.msb_mac_addr =
6600                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6601         config->config_table[0].cam_entry.middle_mac_addr =
6602                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6603         config->config_table[0].cam_entry.lsb_mac_addr =
6604                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6605         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6606         if (set)
6607                 config->config_table[0].target_table_entry.flags = 0;
6608         else
6609                 CAM_INVALIDATE(config->config_table[0]);
6610         config->config_table[0].target_table_entry.client_id = 0;
6611         config->config_table[0].target_table_entry.vlan_id = 0;
6612
6613         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6614            (set ? "setting" : "clearing"),
6615            config->config_table[0].cam_entry.msb_mac_addr,
6616            config->config_table[0].cam_entry.middle_mac_addr,
6617            config->config_table[0].cam_entry.lsb_mac_addr);
6618
6619         /* broadcast */
6620         config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
6621         config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
6622         config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
6623         config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6624         if (set)
6625                 config->config_table[1].target_table_entry.flags =
6626                                 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6627         else
6628                 CAM_INVALIDATE(config->config_table[1]);
6629         config->config_table[1].target_table_entry.client_id = 0;
6630         config->config_table[1].target_table_entry.vlan_id = 0;
6631
6632         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6633                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6634                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6635 }
6636
6637 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6638 {
6639         struct mac_configuration_cmd_e1h *config =
6640                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6641
6642         if (set && (bp->state != BNX2X_STATE_OPEN)) {
6643                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6644                 return;
6645         }
6646
6647         /* CAM allocation for E1H
6648          * unicasts: by func number
6649          * multicast: 20+FUNC*20, 20 each
6650          */
6651         config->hdr.length = 1;
6652         config->hdr.offset = BP_FUNC(bp);
6653         config->hdr.client_id = bp->fp->cl_id;
6654         config->hdr.reserved1 = 0;
6655
6656         /* primary MAC */
6657         config->config_table[0].msb_mac_addr =
6658                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
6659         config->config_table[0].middle_mac_addr =
6660                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
6661         config->config_table[0].lsb_mac_addr =
6662                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
6663         config->config_table[0].client_id = BP_L_ID(bp);
6664         config->config_table[0].vlan_id = 0;
6665         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6666         if (set)
6667                 config->config_table[0].flags = BP_PORT(bp);
6668         else
6669                 config->config_table[0].flags =
6670                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6671
6672         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID %d\n",
6673            (set ? "setting" : "clearing"),
6674            config->config_table[0].msb_mac_addr,
6675            config->config_table[0].middle_mac_addr,
6676            config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6677
6678         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6679                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6680                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6681 }
6682
6683 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6684                              int *state_p, int poll)
6685 {
6686         /* can take a while if any port is running */
6687         int cnt = 5000;
6688
6689         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6690            poll ? "polling" : "waiting", state, idx);
6691
6692         might_sleep();
6693         while (cnt--) {
6694                 if (poll) {
6695                         bnx2x_rx_int(bp->fp, 10);
6696                         /* if index is different from 0
6697                          * the reply for some commands will
6698                          * be on the non default queue
6699                          */
6700                         if (idx)
6701                                 bnx2x_rx_int(&bp->fp[idx], 10);
6702                 }
6703
6704                 mb(); /* state is changed by bnx2x_sp_event() */
6705                 if (*state_p == state) {
6706 #ifdef BNX2X_STOP_ON_ERROR
6707                         DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
6708 #endif
6709                         return 0;
6710                 }
6711
6712                 msleep(1);
6713         }
6714
6715         /* timeout! */
6716         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6717                   poll ? "polling" : "waiting", state, idx);
6718 #ifdef BNX2X_STOP_ON_ERROR
6719         bnx2x_panic();
6720 #endif
6721
6722         return -EBUSY;
6723 }
6724
6725 static int bnx2x_setup_leading(struct bnx2x *bp)
6726 {
6727         int rc;
6728
6729         /* reset IGU state */
6730         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6731
6732         /* SETUP ramrod */
6733         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6734
6735         /* Wait for completion */
6736         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6737
6738         return rc;
6739 }
6740
6741 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6742 {
6743         struct bnx2x_fastpath *fp = &bp->fp[index];
6744
6745         /* reset IGU state */
6746         bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6747
6748         /* SETUP ramrod */
6749         fp->state = BNX2X_FP_STATE_OPENING;
6750         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6751                       fp->cl_id, 0);
6752
6753         /* Wait for completion */
6754         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6755                                  &(fp->state), 0);
6756 }
6757
6758 static int bnx2x_poll(struct napi_struct *napi, int budget);
6759
6760 static void bnx2x_set_int_mode(struct bnx2x *bp)
6761 {
6762         int num_queues;
6763
6764         switch (int_mode) {
6765         case INT_MODE_INTx:
6766         case INT_MODE_MSI:
6767                 num_queues = 1;
6768                 bp->num_rx_queues = num_queues;
6769                 bp->num_tx_queues = num_queues;
6770                 DP(NETIF_MSG_IFUP,
6771                    "set number of queues to %d\n", num_queues);
6772                 break;
6773
6774         case INT_MODE_MSIX:
6775         default:
6776                 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6777                         num_queues = min_t(u32, num_online_cpus(),
6778                                            BNX2X_MAX_QUEUES(bp));
6779                 else
6780                         num_queues = 1;
6781                 bp->num_rx_queues = num_queues;
6782                 bp->num_tx_queues = num_queues;
6783                 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6784                    "  number of tx queues to %d\n",
6785                    bp->num_rx_queues, bp->num_tx_queues);
6786                 /* if we can't use MSI-X we only need one fp,
6787                  * so try to enable MSI-X with the requested number of fp's
6788                  * and fallback to MSI or legacy INTx with one fp
6789                  */
6790                 if (bnx2x_enable_msix(bp)) {
6791                         /* failed to enable MSI-X */
6792                         num_queues = 1;
6793                         bp->num_rx_queues = num_queues;
6794                         bp->num_tx_queues = num_queues;
6795                         if (bp->multi_mode)
6796                                 BNX2X_ERR("Multi requested but failed to "
6797                                           "enable MSI-X  set number of "
6798                                           "queues to %d\n", num_queues);
6799                 }
6800                 break;
6801         }
6802         bp->dev->real_num_tx_queues = bp->num_tx_queues;
6803 }
6804
6805 static void bnx2x_set_rx_mode(struct net_device *dev);
6806
6807 /* must be called with rtnl_lock */
6808 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6809 {
6810         u32 load_code;
6811         int i, rc = 0;
6812 #ifdef BNX2X_STOP_ON_ERROR
6813         DP(NETIF_MSG_IFUP, "enter  load_mode %d\n", load_mode);
6814         if (unlikely(bp->panic))
6815                 return -EPERM;
6816 #endif
6817
6818         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6819
6820         bnx2x_set_int_mode(bp);
6821
6822         if (bnx2x_alloc_mem(bp))
6823                 return -ENOMEM;
6824
6825         for_each_rx_queue(bp, i)
6826                 bnx2x_fp(bp, i, disable_tpa) =
6827                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
6828
6829         for_each_rx_queue(bp, i)
6830                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6831                                bnx2x_poll, 128);
6832
6833 #ifdef BNX2X_STOP_ON_ERROR
6834         for_each_rx_queue(bp, i) {
6835                 struct bnx2x_fastpath *fp = &bp->fp[i];
6836
6837                 fp->poll_no_work = 0;
6838                 fp->poll_calls = 0;
6839                 fp->poll_max_calls = 0;
6840                 fp->poll_complete = 0;
6841                 fp->poll_exit = 0;
6842         }
6843 #endif
6844         bnx2x_napi_enable(bp);
6845
6846         if (bp->flags & USING_MSIX_FLAG) {
6847                 rc = bnx2x_req_msix_irqs(bp);
6848                 if (rc) {
6849                         pci_disable_msix(bp->pdev);
6850                         goto load_error1;
6851                 }
6852         } else {
6853                 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6854                         bnx2x_enable_msi(bp);
6855                 bnx2x_ack_int(bp);
6856                 rc = bnx2x_req_irq(bp);
6857                 if (rc) {
6858                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
6859                         if (bp->flags & USING_MSI_FLAG)
6860                                 pci_disable_msi(bp->pdev);
6861                         goto load_error1;
6862                 }
6863                 if (bp->flags & USING_MSI_FLAG) {
6864                         bp->dev->irq = bp->pdev->irq;
6865                         printk(KERN_INFO PFX "%s: using MSI  IRQ %d\n",
6866                                bp->dev->name, bp->pdev->irq);
6867                 }
6868         }
6869
6870         /* Send LOAD_REQUEST command to MCP
6871            Returns the type of LOAD command:
6872            if it is the first port to be initialized
6873            common blocks should be initialized, otherwise - not
6874         */
6875         if (!BP_NOMCP(bp)) {
6876                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6877                 if (!load_code) {
6878                         BNX2X_ERR("MCP response failure, aborting\n");
6879                         rc = -EBUSY;
6880                         goto load_error2;
6881                 }
6882                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6883                         rc = -EBUSY; /* other port in diagnostic mode */
6884                         goto load_error2;
6885                 }
6886
6887         } else {
6888                 int port = BP_PORT(bp);
6889
6890                 DP(NETIF_MSG_IFUP, "NO MCP - load counts      %d, %d, %d\n",
6891                    load_count[0], load_count[1], load_count[2]);
6892                 load_count[0]++;
6893                 load_count[1 + port]++;
6894                 DP(NETIF_MSG_IFUP, "NO MCP - new load counts  %d, %d, %d\n",
6895                    load_count[0], load_count[1], load_count[2]);
6896                 if (load_count[0] == 1)
6897                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6898                 else if (load_count[1 + port] == 1)
6899                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6900                 else
6901                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6902         }
6903
6904         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6905             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6906                 bp->port.pmf = 1;
6907         else
6908                 bp->port.pmf = 0;
6909         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6910
6911         /* Initialize HW */
6912         rc = bnx2x_init_hw(bp, load_code);
6913         if (rc) {
6914                 BNX2X_ERR("HW init failed, aborting\n");
6915                 goto load_error2;
6916         }
6917
6918         /* Setup NIC internals and enable interrupts */
6919         bnx2x_nic_init(bp, load_code);
6920
6921         /* Send LOAD_DONE command to MCP */
6922         if (!BP_NOMCP(bp)) {
6923                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6924                 if (!load_code) {
6925                         BNX2X_ERR("MCP response failure, aborting\n");
6926                         rc = -EBUSY;
6927                         goto load_error3;
6928                 }
6929         }
6930
6931         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6932
6933         rc = bnx2x_setup_leading(bp);
6934         if (rc) {
6935                 BNX2X_ERR("Setup leading failed!\n");
6936                 goto load_error3;
6937         }
6938
6939         if (CHIP_IS_E1H(bp))
6940                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6941                         DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
6942                         bp->state = BNX2X_STATE_DISABLED;
6943                 }
6944
6945         if (bp->state == BNX2X_STATE_OPEN)
6946                 for_each_nondefault_queue(bp, i) {
6947                         rc = bnx2x_setup_multi(bp, i);
6948                         if (rc)
6949                                 goto load_error3;
6950                 }
6951
6952         if (CHIP_IS_E1(bp))
6953                 bnx2x_set_mac_addr_e1(bp, 1);
6954         else
6955                 bnx2x_set_mac_addr_e1h(bp, 1);
6956
6957         if (bp->port.pmf)
6958                 bnx2x_initial_phy_init(bp, load_mode);
6959
6960         /* Start fast path */
6961         switch (load_mode) {
6962         case LOAD_NORMAL:
6963                 /* Tx queue should be only reenabled */
6964                 netif_tx_wake_all_queues(bp->dev);
6965                 /* Initialize the receive filter. */
6966                 bnx2x_set_rx_mode(bp->dev);
6967                 break;
6968
6969         case LOAD_OPEN:
6970                 netif_tx_start_all_queues(bp->dev);
6971                 /* Initialize the receive filter. */
6972                 bnx2x_set_rx_mode(bp->dev);
6973                 break;
6974
6975         case LOAD_DIAG:
6976                 /* Initialize the receive filter. */
6977                 bnx2x_set_rx_mode(bp->dev);
6978                 bp->state = BNX2X_STATE_DIAG;
6979                 break;
6980
6981         default:
6982                 break;
6983         }
6984
6985         if (!bp->port.pmf)
6986                 bnx2x__link_status_update(bp);
6987
6988         /* start the timer */
6989         mod_timer(&bp->timer, jiffies + bp->current_interval);
6990
6991
6992         return 0;
6993
6994 load_error3:
6995         bnx2x_int_disable_sync(bp, 1);
6996         if (!BP_NOMCP(bp)) {
6997                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
6998                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6999         }
7000         bp->port.pmf = 0;
7001         /* Free SKBs, SGEs, TPA pool and driver internals */
7002         bnx2x_free_skbs(bp);
7003         for_each_rx_queue(bp, i)
7004                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7005 load_error2:
7006         /* Release IRQs */
7007         bnx2x_free_irq(bp);
7008 load_error1:
7009         bnx2x_napi_disable(bp);
7010         for_each_rx_queue(bp, i)
7011                 netif_napi_del(&bnx2x_fp(bp, i, napi));
7012         bnx2x_free_mem(bp);
7013
7014         return rc;
7015 }
7016
7017 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7018 {
7019         struct bnx2x_fastpath *fp = &bp->fp[index];
7020         int rc;
7021
7022         /* halt the connection */
7023         fp->state = BNX2X_FP_STATE_HALTING;
7024         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7025
7026         /* Wait for completion */
7027         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7028                                &(fp->state), 1);
7029         if (rc) /* timeout */
7030                 return rc;
7031
7032         /* delete cfc entry */
7033         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7034
7035         /* Wait for completion */
7036         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7037                                &(fp->state), 1);
7038         return rc;
7039 }
7040
7041 static int bnx2x_stop_leading(struct bnx2x *bp)
7042 {
7043         __le16 dsb_sp_prod_idx;
7044         /* if the other port is handling traffic,
7045            this can take a lot of time */
7046         int cnt = 500;
7047         int rc;
7048
7049         might_sleep();
7050
7051         /* Send HALT ramrod */
7052         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7053         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7054
7055         /* Wait for completion */
7056         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7057                                &(bp->fp[0].state), 1);
7058         if (rc) /* timeout */
7059                 return rc;
7060
7061         dsb_sp_prod_idx = *bp->dsb_sp_prod;
7062
7063         /* Send PORT_DELETE ramrod */
7064         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7065
7066         /* Wait for completion to arrive on default status block
7067            we are going to reset the chip anyway
7068            so there is not much to do if this times out
7069          */
7070         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7071                 if (!cnt) {
7072                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7073                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7074                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
7075 #ifdef BNX2X_STOP_ON_ERROR
7076                         bnx2x_panic();
7077 #endif
7078                         rc = -EBUSY;
7079                         break;
7080                 }
7081                 cnt--;
7082                 msleep(1);
7083                 rmb(); /* Refresh the dsb_sp_prod */
7084         }
7085         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7086         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7087
7088         return rc;
7089 }
7090
7091 static void bnx2x_reset_func(struct bnx2x *bp)
7092 {
7093         int port = BP_PORT(bp);
7094         int func = BP_FUNC(bp);
7095         int base, i;
7096
7097         /* Configure IGU */
7098         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7099         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7100
7101         /* Clear ILT */
7102         base = FUNC_ILT_BASE(func);
7103         for (i = base; i < base + ILT_PER_FUNC; i++)
7104                 bnx2x_ilt_wr(bp, i, 0);
7105 }
7106
7107 static void bnx2x_reset_port(struct bnx2x *bp)
7108 {
7109         int port = BP_PORT(bp);
7110         u32 val;
7111
7112         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7113
7114         /* Do not rcv packets to BRB */
7115         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7116         /* Do not direct rcv packets that are not for MCP to the BRB */
7117         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7118                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7119
7120         /* Configure AEU */
7121         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7122
7123         msleep(100);
7124         /* Check for BRB port occupancy */
7125         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7126         if (val)
7127                 DP(NETIF_MSG_IFDOWN,
7128                    "BRB1 is not empty  %d blocks are occupied\n", val);
7129
7130         /* TODO: Close Doorbell port? */
7131 }
7132
7133 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7134 {
7135         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
7136            BP_FUNC(bp), reset_code);
7137
7138         switch (reset_code) {
7139         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7140                 bnx2x_reset_port(bp);
7141                 bnx2x_reset_func(bp);
7142                 bnx2x_reset_common(bp);
7143                 break;
7144
7145         case FW_MSG_CODE_DRV_UNLOAD_PORT:
7146                 bnx2x_reset_port(bp);
7147                 bnx2x_reset_func(bp);
7148                 break;
7149
7150         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7151                 bnx2x_reset_func(bp);
7152                 break;
7153
7154         default:
7155                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7156                 break;
7157         }
7158 }
7159
7160 /* must be called with rtnl_lock */
7161 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7162 {
7163         int port = BP_PORT(bp);
7164         u32 reset_code = 0;
7165         int i, cnt, rc;
7166
7167         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7168
7169         bp->rx_mode = BNX2X_RX_MODE_NONE;
7170         bnx2x_set_storm_rx_mode(bp);
7171
7172         bnx2x_netif_stop(bp, 1);
7173
7174         del_timer_sync(&bp->timer);
7175         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7176                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7177         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7178
7179         /* Release IRQs */
7180         bnx2x_free_irq(bp);
7181
7182         /* Wait until tx fastpath tasks complete */
7183         for_each_tx_queue(bp, i) {
7184                 struct bnx2x_fastpath *fp = &bp->fp[i];
7185
7186                 cnt = 1000;
7187                 while (bnx2x_has_tx_work_unload(fp)) {
7188
7189                         bnx2x_tx_int(fp);
7190                         if (!cnt) {
7191                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
7192                                           i);
7193 #ifdef BNX2X_STOP_ON_ERROR
7194                                 bnx2x_panic();
7195                                 return -EBUSY;
7196 #else
7197                                 break;
7198 #endif
7199                         }
7200                         cnt--;
7201                         msleep(1);
7202                 }
7203         }
7204         /* Give HW time to discard old tx messages */
7205         msleep(1);
7206
7207         if (CHIP_IS_E1(bp)) {
7208                 struct mac_configuration_cmd *config =
7209                                                 bnx2x_sp(bp, mcast_config);
7210
7211                 bnx2x_set_mac_addr_e1(bp, 0);
7212
7213                 for (i = 0; i < config->hdr.length; i++)
7214                         CAM_INVALIDATE(config->config_table[i]);
7215
7216                 config->hdr.length = i;
7217                 if (CHIP_REV_IS_SLOW(bp))
7218                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7219                 else
7220                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7221                 config->hdr.client_id = bp->fp->cl_id;
7222                 config->hdr.reserved1 = 0;
7223
7224                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7225                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7226                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7227
7228         } else { /* E1H */
7229                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7230
7231                 bnx2x_set_mac_addr_e1h(bp, 0);
7232
7233                 for (i = 0; i < MC_HASH_SIZE; i++)
7234                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7235         }
7236
7237         if (unload_mode == UNLOAD_NORMAL)
7238                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7239
7240         else if (bp->flags & NO_WOL_FLAG) {
7241                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7242                 if (CHIP_IS_E1H(bp))
7243                         REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7244
7245         } else if (bp->wol) {
7246                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7247                 u8 *mac_addr = bp->dev->dev_addr;
7248                 u32 val;
7249                 /* The mac address is written to entries 1-4 to
7250                    preserve entry 0 which is used by the PMF */
7251                 u8 entry = (BP_E1HVN(bp) + 1)*8;
7252
7253                 val = (mac_addr[0] << 8) | mac_addr[1];
7254                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7255
7256                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7257                       (mac_addr[4] << 8) | mac_addr[5];
7258                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7259
7260                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7261
7262         } else
7263                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7264
7265         /* Close multi and leading connections
7266            Completions for ramrods are collected in a synchronous way */
7267         for_each_nondefault_queue(bp, i)
7268                 if (bnx2x_stop_multi(bp, i))
7269                         goto unload_error;
7270
7271         rc = bnx2x_stop_leading(bp);
7272         if (rc) {
7273                 BNX2X_ERR("Stop leading failed!\n");
7274 #ifdef BNX2X_STOP_ON_ERROR
7275                 return -EBUSY;
7276 #else
7277                 goto unload_error;
7278 #endif
7279         }
7280
7281 unload_error:
7282         if (!BP_NOMCP(bp))
7283                 reset_code = bnx2x_fw_command(bp, reset_code);
7284         else {
7285                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts      %d, %d, %d\n",
7286                    load_count[0], load_count[1], load_count[2]);
7287                 load_count[0]--;
7288                 load_count[1 + port]--;
7289                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts  %d, %d, %d\n",
7290                    load_count[0], load_count[1], load_count[2]);
7291                 if (load_count[0] == 0)
7292                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7293                 else if (load_count[1 + port] == 0)
7294                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7295                 else
7296                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7297         }
7298
7299         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7300             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7301                 bnx2x__link_reset(bp);
7302
7303         /* Reset the chip */
7304         bnx2x_reset_chip(bp, reset_code);
7305
7306         /* Report UNLOAD_DONE to MCP */
7307         if (!BP_NOMCP(bp))
7308                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7309
7310         bp->port.pmf = 0;
7311
7312         /* Free SKBs, SGEs, TPA pool and driver internals */
7313         bnx2x_free_skbs(bp);
7314         for_each_rx_queue(bp, i)
7315                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7316         for_each_rx_queue(bp, i)
7317                 netif_napi_del(&bnx2x_fp(bp, i, napi));
7318         bnx2x_free_mem(bp);
7319
7320         bp->state = BNX2X_STATE_CLOSED;
7321
7322         netif_carrier_off(bp->dev);
7323
7324         return 0;
7325 }
7326
7327 static void bnx2x_reset_task(struct work_struct *work)
7328 {
7329         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7330
7331 #ifdef BNX2X_STOP_ON_ERROR
7332         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7333                   " so reset not done to allow debug dump,\n"
7334          KERN_ERR " you will need to reboot when done\n");
7335         return;
7336 #endif
7337
7338         rtnl_lock();
7339
7340         if (!netif_running(bp->dev))
7341                 goto reset_task_exit;
7342
7343         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7344         bnx2x_nic_load(bp, LOAD_NORMAL);
7345
7346 reset_task_exit:
7347         rtnl_unlock();
7348 }
7349
7350 /* end of nic load/unload */
7351
7352 /* ethtool_ops */
7353
7354 /*
7355  * Init service functions
7356  */
7357
7358 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7359 {
7360         switch (func) {
7361         case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7362         case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7363         case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7364         case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7365         case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7366         case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7367         case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7368         case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7369         default:
7370                 BNX2X_ERR("Unsupported function index: %d\n", func);
7371                 return (u32)(-1);
7372         }
7373 }
7374
7375 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7376 {
7377         u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7378
7379         /* Flush all outstanding writes */
7380         mmiowb();
7381
7382         /* Pretend to be function 0 */
7383         REG_WR(bp, reg, 0);
7384         /* Flush the GRC transaction (in the chip) */
7385         new_val = REG_RD(bp, reg);
7386         if (new_val != 0) {
7387                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7388                           new_val);
7389                 BUG();
7390         }
7391
7392         /* From now we are in the "like-E1" mode */
7393         bnx2x_int_disable(bp);
7394
7395         /* Flush all outstanding writes */
7396         mmiowb();
7397
7398         /* Restore the original funtion settings */
7399         REG_WR(bp, reg, orig_func);
7400         new_val = REG_RD(bp, reg);
7401         if (new_val != orig_func) {
7402                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7403                           orig_func, new_val);
7404                 BUG();
7405         }
7406 }
7407
7408 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7409 {
7410         if (CHIP_IS_E1H(bp))
7411                 bnx2x_undi_int_disable_e1h(bp, func);
7412         else
7413                 bnx2x_int_disable(bp);
7414 }
7415
7416 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7417 {
7418         u32 val;
7419
7420         /* Check if there is any driver already loaded */
7421         val = REG_RD(bp, MISC_REG_UNPREPARED);
7422         if (val == 0x1) {
7423                 /* Check if it is the UNDI driver
7424                  * UNDI driver initializes CID offset for normal bell to 0x7
7425                  */
7426                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7427                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7428                 if (val == 0x7) {
7429                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7430                         /* save our func */
7431                         int func = BP_FUNC(bp);
7432                         u32 swap_en;
7433                         u32 swap_val;
7434
7435                         /* clear the UNDI indication */
7436                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7437
7438                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
7439
7440                         /* try unload UNDI on port 0 */
7441                         bp->func = 0;
7442                         bp->fw_seq =
7443                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7444                                 DRV_MSG_SEQ_NUMBER_MASK);
7445                         reset_code = bnx2x_fw_command(bp, reset_code);
7446
7447                         /* if UNDI is loaded on the other port */
7448                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7449
7450                                 /* send "DONE" for previous unload */
7451                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7452
7453                                 /* unload UNDI on port 1 */
7454                                 bp->func = 1;
7455                                 bp->fw_seq =
7456                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7457                                         DRV_MSG_SEQ_NUMBER_MASK);
7458                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7459
7460                                 bnx2x_fw_command(bp, reset_code);
7461                         }
7462
7463                         /* now it's safe to release the lock */
7464                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7465
7466                         bnx2x_undi_int_disable(bp, func);
7467
7468                         /* close input traffic and wait for it */
7469                         /* Do not rcv packets to BRB */
7470                         REG_WR(bp,
7471                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7472                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7473                         /* Do not direct rcv packets that are not for MCP to
7474                          * the BRB */
7475                         REG_WR(bp,
7476                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7477                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7478                         /* clear AEU */
7479                         REG_WR(bp,
7480                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7481                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7482                         msleep(10);
7483
7484                         /* save NIG port swap info */
7485                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7486                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7487                         /* reset device */
7488                         REG_WR(bp,
7489                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7490                                0xd3ffffff);
7491                         REG_WR(bp,
7492                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7493                                0x1403);
7494                         /* take the NIG out of reset and restore swap values */
7495                         REG_WR(bp,
7496                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7497                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
7498                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7499                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7500
7501                         /* send unload done to the MCP */
7502                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7503
7504                         /* restore our func and fw_seq */
7505                         bp->func = func;
7506                         bp->fw_seq =
7507                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7508                                 DRV_MSG_SEQ_NUMBER_MASK);
7509
7510                 } else
7511                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7512         }
7513 }
7514
7515 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7516 {
7517         u32 val, val2, val3, val4, id;
7518         u16 pmc;
7519
7520         /* Get the chip revision id and number. */
7521         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7522         val = REG_RD(bp, MISC_REG_CHIP_NUM);
7523         id = ((val & 0xffff) << 16);
7524         val = REG_RD(bp, MISC_REG_CHIP_REV);
7525         id |= ((val & 0xf) << 12);
7526         val = REG_RD(bp, MISC_REG_CHIP_METAL);
7527         id |= ((val & 0xff) << 4);
7528         val = REG_RD(bp, MISC_REG_BOND_ID);
7529         id |= (val & 0xf);
7530         bp->common.chip_id = id;
7531         bp->link_params.chip_id = bp->common.chip_id;
7532         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7533
7534         val = (REG_RD(bp, 0x2874) & 0x55);
7535         if ((bp->common.chip_id & 0x1) ||
7536             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7537                 bp->flags |= ONE_PORT_FLAG;
7538                 BNX2X_DEV_INFO("single port device\n");
7539         }
7540
7541         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7542         bp->common.flash_size = (NVRAM_1MB_SIZE <<
7543                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
7544         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7545                        bp->common.flash_size, bp->common.flash_size);
7546
7547         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7548         bp->link_params.shmem_base = bp->common.shmem_base;
7549         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7550
7551         if (!bp->common.shmem_base ||
7552             (bp->common.shmem_base < 0xA0000) ||
7553             (bp->common.shmem_base >= 0xC0000)) {
7554                 BNX2X_DEV_INFO("MCP not active\n");
7555                 bp->flags |= NO_MCP_FLAG;
7556                 return;
7557         }
7558
7559         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7560         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7561                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7562                 BNX2X_ERR("BAD MCP validity signature\n");
7563
7564         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7565         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
7566
7567         bp->link_params.hw_led_mode = ((bp->common.hw_config &
7568                                         SHARED_HW_CFG_LED_MODE_MASK) >>
7569                                        SHARED_HW_CFG_LED_MODE_SHIFT);
7570
7571         bp->link_params.feature_config_flags = 0;
7572         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7573         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7574                 bp->link_params.feature_config_flags |=
7575                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7576         else
7577                 bp->link_params.feature_config_flags &=
7578                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7579
7580         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7581         bp->common.bc_ver = val;
7582         BNX2X_DEV_INFO("bc_ver %X\n", val);
7583         if (val < BNX2X_BC_VER) {
7584                 /* for now only warn
7585                  * later we might need to enforce this */
7586                 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7587                           " please upgrade BC\n", BNX2X_BC_VER, val);
7588         }
7589
7590         if (BP_E1HVN(bp) == 0) {
7591                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7592                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7593         } else {
7594                 /* no WOL capability for E1HVN != 0 */
7595                 bp->flags |= NO_WOL_FLAG;
7596         }
7597         BNX2X_DEV_INFO("%sWoL capable\n",
7598                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
7599
7600         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7601         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7602         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7603         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7604
7605         printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7606                val, val2, val3, val4);
7607 }
7608
7609 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7610                                                     u32 switch_cfg)
7611 {
7612         int port = BP_PORT(bp);
7613         u32 ext_phy_type;
7614
7615         switch (switch_cfg) {
7616         case SWITCH_CFG_1G:
7617                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7618
7619                 ext_phy_type =
7620                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7621                 switch (ext_phy_type) {
7622                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7623                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7624                                        ext_phy_type);
7625
7626                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7627                                                SUPPORTED_10baseT_Full |
7628                                                SUPPORTED_100baseT_Half |
7629                                                SUPPORTED_100baseT_Full |
7630                                                SUPPORTED_1000baseT_Full |
7631                                                SUPPORTED_2500baseX_Full |
7632                                                SUPPORTED_TP |
7633                                                SUPPORTED_FIBRE |
7634                                                SUPPORTED_Autoneg |
7635                                                SUPPORTED_Pause |
7636                                                SUPPORTED_Asym_Pause);
7637                         break;
7638
7639                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7640                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7641                                        ext_phy_type);
7642
7643                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7644                                                SUPPORTED_10baseT_Full |
7645                                                SUPPORTED_100baseT_Half |
7646                                                SUPPORTED_100baseT_Full |
7647                                                SUPPORTED_1000baseT_Full |
7648                                                SUPPORTED_TP |
7649                                                SUPPORTED_FIBRE |
7650                                                SUPPORTED_Autoneg |
7651                                                SUPPORTED_Pause |
7652                                                SUPPORTED_Asym_Pause);
7653                         break;
7654
7655                 default:
7656                         BNX2X_ERR("NVRAM config error. "
7657                                   "BAD SerDes ext_phy_config 0x%x\n",
7658                                   bp->link_params.ext_phy_config);
7659                         return;
7660                 }
7661
7662                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7663                                            port*0x10);
7664                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7665                 break;
7666
7667         case SWITCH_CFG_10G:
7668                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7669
7670                 ext_phy_type =
7671                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7672                 switch (ext_phy_type) {
7673                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7674                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7675                                        ext_phy_type);
7676
7677                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7678                                                SUPPORTED_10baseT_Full |
7679                                                SUPPORTED_100baseT_Half |
7680                                                SUPPORTED_100baseT_Full |
7681                                                SUPPORTED_1000baseT_Full |
7682                                                SUPPORTED_2500baseX_Full |
7683                                                SUPPORTED_10000baseT_Full |
7684                                                SUPPORTED_TP |
7685                                                SUPPORTED_FIBRE |
7686                                                SUPPORTED_Autoneg |
7687                                                SUPPORTED_Pause |
7688                                                SUPPORTED_Asym_Pause);
7689                         break;
7690
7691                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7692                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7693                                        ext_phy_type);
7694
7695                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7696                                                SUPPORTED_1000baseT_Full |
7697                                                SUPPORTED_FIBRE |
7698                                                SUPPORTED_Autoneg |
7699                                                SUPPORTED_Pause |
7700                                                SUPPORTED_Asym_Pause);
7701                         break;
7702
7703                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7704                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7705                                        ext_phy_type);
7706
7707                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7708                                                SUPPORTED_2500baseX_Full |
7709                                                SUPPORTED_1000baseT_Full |
7710                                                SUPPORTED_FIBRE |
7711                                                SUPPORTED_Autoneg |
7712                                                SUPPORTED_Pause |
7713                                                SUPPORTED_Asym_Pause);
7714                         break;
7715
7716                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7717                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7718                                        ext_phy_type);
7719
7720                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7721                                                SUPPORTED_FIBRE |
7722                                                SUPPORTED_Pause |
7723                                                SUPPORTED_Asym_Pause);
7724                         break;
7725
7726                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7727                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7728                                        ext_phy_type);
7729
7730                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7731                                                SUPPORTED_1000baseT_Full |
7732                                                SUPPORTED_FIBRE |
7733                                                SUPPORTED_Pause |
7734                                                SUPPORTED_Asym_Pause);
7735                         break;
7736
7737                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7738                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
7739                                        ext_phy_type);
7740
7741                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7742                                                SUPPORTED_1000baseT_Full |
7743                                                SUPPORTED_Autoneg |
7744                                                SUPPORTED_FIBRE |
7745                                                SUPPORTED_Pause |
7746                                                SUPPORTED_Asym_Pause);
7747                         break;
7748
7749                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7750                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7751                                        ext_phy_type);
7752
7753                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
7754                                                SUPPORTED_TP |
7755                                                SUPPORTED_Autoneg |
7756                                                SUPPORTED_Pause |
7757                                                SUPPORTED_Asym_Pause);
7758                         break;
7759
7760                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7761                         BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7762                                        ext_phy_type);
7763
7764                         bp->port.supported |= (SUPPORTED_10baseT_Half |
7765                                                SUPPORTED_10baseT_Full |
7766                                                SUPPORTED_100baseT_Half |
7767                                                SUPPORTED_100baseT_Full |
7768                                                SUPPORTED_1000baseT_Full |
7769                                                SUPPORTED_10000baseT_Full |
7770                                                SUPPORTED_TP |
7771                                                SUPPORTED_Autoneg |
7772                                                SUPPORTED_Pause |
7773                                                SUPPORTED_Asym_Pause);
7774                         break;
7775
7776                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7777                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7778                                   bp->link_params.ext_phy_config);
7779                         break;
7780
7781                 default:
7782                         BNX2X_ERR("NVRAM config error. "
7783                                   "BAD XGXS ext_phy_config 0x%x\n",
7784                                   bp->link_params.ext_phy_config);
7785                         return;
7786                 }
7787
7788                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7789                                            port*0x18);
7790                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7791
7792                 break;
7793
7794         default:
7795                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7796                           bp->port.link_config);
7797                 return;
7798         }
7799         bp->link_params.phy_addr = bp->port.phy_addr;
7800
7801         /* mask what we support according to speed_cap_mask */
7802         if (!(bp->link_params.speed_cap_mask &
7803                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7804                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7805
7806         if (!(bp->link_params.speed_cap_mask &
7807                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7808                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7809
7810         if (!(bp->link_params.speed_cap_mask &
7811                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7812                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7813
7814         if (!(bp->link_params.speed_cap_mask &
7815                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7816                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7817
7818         if (!(bp->link_params.speed_cap_mask &
7819                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7820                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7821                                         SUPPORTED_1000baseT_Full);
7822
7823         if (!(bp->link_params.speed_cap_mask &
7824                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7825                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7826
7827         if (!(bp->link_params.speed_cap_mask &
7828                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7829                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7830
7831         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7832 }
7833
7834 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7835 {
7836         bp->link_params.req_duplex = DUPLEX_FULL;
7837
7838         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7839         case PORT_FEATURE_LINK_SPEED_AUTO:
7840                 if (bp->port.supported & SUPPORTED_Autoneg) {
7841                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7842                         bp->port.advertising = bp->port.supported;
7843                 } else {
7844                         u32 ext_phy_type =
7845                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7846
7847                         if ((ext_phy_type ==
7848                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7849                             (ext_phy_type ==
7850                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7851                                 /* force 10G, no AN */
7852                                 bp->link_params.req_line_speed = SPEED_10000;
7853                                 bp->port.advertising =
7854                                                 (ADVERTISED_10000baseT_Full |
7855                                                  ADVERTISED_FIBRE);
7856                                 break;
7857                         }
7858                         BNX2X_ERR("NVRAM config error. "
7859                                   "Invalid link_config 0x%x"
7860                                   "  Autoneg not supported\n",
7861                                   bp->port.link_config);
7862                         return;
7863                 }
7864                 break;
7865
7866         case PORT_FEATURE_LINK_SPEED_10M_FULL:
7867                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7868                         bp->link_params.req_line_speed = SPEED_10;
7869                         bp->port.advertising = (ADVERTISED_10baseT_Full |
7870                                                 ADVERTISED_TP);
7871                 } else {
7872                         BNX2X_ERR("NVRAM config error. "
7873                                   "Invalid link_config 0x%x"
7874                                   "  speed_cap_mask 0x%x\n",
7875                                   bp->port.link_config,
7876                                   bp->link_params.speed_cap_mask);
7877                         return;
7878                 }
7879                 break;
7880
7881         case PORT_FEATURE_LINK_SPEED_10M_HALF:
7882                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7883                         bp->link_params.req_line_speed = SPEED_10;
7884                         bp->link_params.req_duplex = DUPLEX_HALF;
7885                         bp->port.advertising = (ADVERTISED_10baseT_Half |
7886                                                 ADVERTISED_TP);
7887                 } else {
7888                         BNX2X_ERR("NVRAM config error. "
7889                                   "Invalid link_config 0x%x"
7890                                   "  speed_cap_mask 0x%x\n",
7891                                   bp->port.link_config,
7892                                   bp->link_params.speed_cap_mask);
7893                         return;
7894                 }
7895                 break;
7896
7897         case PORT_FEATURE_LINK_SPEED_100M_FULL:
7898                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7899                         bp->link_params.req_line_speed = SPEED_100;
7900                         bp->port.advertising = (ADVERTISED_100baseT_Full |
7901                                                 ADVERTISED_TP);
7902                 } else {
7903                         BNX2X_ERR("NVRAM config error. "
7904                                   "Invalid link_config 0x%x"
7905                                   "  speed_cap_mask 0x%x\n",
7906                                   bp->port.link_config,
7907                                   bp->link_params.speed_cap_mask);
7908                         return;
7909                 }
7910                 break;
7911
7912         case PORT_FEATURE_LINK_SPEED_100M_HALF:
7913                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7914                         bp->link_params.req_line_speed = SPEED_100;
7915                         bp->link_params.req_duplex = DUPLEX_HALF;
7916                         bp->port.advertising = (ADVERTISED_100baseT_Half |
7917                                                 ADVERTISED_TP);
7918                 } else {
7919                         BNX2X_ERR("NVRAM config error. "
7920                                   "Invalid link_config 0x%x"
7921                                   "  speed_cap_mask 0x%x\n",
7922                                   bp->port.link_config,
7923                                   bp->link_params.speed_cap_mask);
7924                         return;
7925                 }
7926                 break;
7927
7928         case PORT_FEATURE_LINK_SPEED_1G:
7929                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7930                         bp->link_params.req_line_speed = SPEED_1000;
7931                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
7932                                                 ADVERTISED_TP);
7933                 } else {
7934                         BNX2X_ERR("NVRAM config error. "
7935                                   "Invalid link_config 0x%x"
7936                                   "  speed_cap_mask 0x%x\n",
7937                                   bp->port.link_config,
7938                                   bp->link_params.speed_cap_mask);
7939                         return;
7940                 }
7941                 break;
7942
7943         case PORT_FEATURE_LINK_SPEED_2_5G:
7944                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7945                         bp->link_params.req_line_speed = SPEED_2500;
7946                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
7947                                                 ADVERTISED_TP);
7948                 } else {
7949                         BNX2X_ERR("NVRAM config error. "
7950                                   "Invalid link_config 0x%x"
7951                                   "  speed_cap_mask 0x%x\n",
7952                                   bp->port.link_config,
7953                                   bp->link_params.speed_cap_mask);
7954                         return;
7955                 }
7956                 break;
7957
7958         case PORT_FEATURE_LINK_SPEED_10G_CX4:
7959         case PORT_FEATURE_LINK_SPEED_10G_KX4:
7960         case PORT_FEATURE_LINK_SPEED_10G_KR:
7961                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7962                         bp->link_params.req_line_speed = SPEED_10000;
7963                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
7964                                                 ADVERTISED_FIBRE);
7965                 } else {
7966                         BNX2X_ERR("NVRAM config error. "
7967                                   "Invalid link_config 0x%x"
7968                                   "  speed_cap_mask 0x%x\n",
7969                                   bp->port.link_config,
7970                                   bp->link_params.speed_cap_mask);
7971                         return;
7972                 }
7973                 break;
7974
7975         default:
7976                 BNX2X_ERR("NVRAM config error. "
7977                           "BAD link speed link_config 0x%x\n",
7978                           bp->port.link_config);
7979                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7980                 bp->port.advertising = bp->port.supported;
7981                 break;
7982         }
7983
7984         bp->link_params.req_flow_ctrl = (bp->port.link_config &
7985                                          PORT_FEATURE_FLOW_CONTROL_MASK);
7986         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
7987             !(bp->port.supported & SUPPORTED_Autoneg))
7988                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
7989
7990         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
7991                        "  advertising 0x%x\n",
7992                        bp->link_params.req_line_speed,
7993                        bp->link_params.req_duplex,
7994                        bp->link_params.req_flow_ctrl, bp->port.advertising);
7995 }
7996
7997 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7998 {
7999         int port = BP_PORT(bp);
8000         u32 val, val2;
8001         u32 config;
8002         u16 i;
8003
8004         bp->link_params.bp = bp;
8005         bp->link_params.port = port;
8006
8007         bp->link_params.lane_config =
8008                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8009         bp->link_params.ext_phy_config =
8010                 SHMEM_RD(bp,
8011                          dev_info.port_hw_config[port].external_phy_config);
8012         bp->link_params.speed_cap_mask =
8013                 SHMEM_RD(bp,
8014                          dev_info.port_hw_config[port].speed_capability_mask);
8015
8016         bp->port.link_config =
8017                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8018
8019         /* Get the 4 lanes xgxs config rx and tx */
8020         for (i = 0; i < 2; i++) {
8021                 val = SHMEM_RD(bp,
8022                            dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8023                 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8024                 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8025
8026                 val = SHMEM_RD(bp,
8027                            dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8028                 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8029                 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8030         }
8031
8032         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8033         if (config & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_ENABLED)
8034                 bp->link_params.feature_config_flags |=
8035                                 FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8036         else
8037                 bp->link_params.feature_config_flags &=
8038                                 ~FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8039
8040         /* If the device is capable of WoL, set the default state according
8041          * to the HW
8042          */
8043         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8044                    (config & PORT_FEATURE_WOL_ENABLED));
8045
8046         BNX2X_DEV_INFO("lane_config 0x%08x  ext_phy_config 0x%08x"
8047                        "  speed_cap_mask 0x%08x  link_config 0x%08x\n",
8048                        bp->link_params.lane_config,
8049                        bp->link_params.ext_phy_config,
8050                        bp->link_params.speed_cap_mask, bp->port.link_config);
8051
8052         bp->link_params.switch_cfg = (bp->port.link_config &
8053                                       PORT_FEATURE_CONNECTED_SWITCH_MASK);
8054         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8055
8056         bnx2x_link_settings_requested(bp);
8057
8058         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8059         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8060         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8061         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8062         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8063         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8064         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
8065         bp->dev->dev_addr[5] = (u8)(val & 0xff);
8066         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8067         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8068 }
8069
8070 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8071 {
8072         int func = BP_FUNC(bp);
8073         u32 val, val2;
8074         int rc = 0;
8075
8076         bnx2x_get_common_hwinfo(bp);
8077
8078         bp->e1hov = 0;
8079         bp->e1hmf = 0;
8080         if (CHIP_IS_E1H(bp)) {
8081                 bp->mf_config =
8082                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8083
8084                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8085                        FUNC_MF_CFG_E1HOV_TAG_MASK);
8086                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8087
8088                         bp->e1hov = val;
8089                         bp->e1hmf = 1;
8090                         BNX2X_DEV_INFO("MF mode  E1HOV for func %d is %d "
8091                                        "(0x%04x)\n",
8092                                        func, bp->e1hov, bp->e1hov);
8093                 } else {
8094                         BNX2X_DEV_INFO("single function mode\n");
8095                         if (BP_E1HVN(bp)) {
8096                                 BNX2X_ERR("!!!  No valid E1HOV for func %d,"
8097                                           "  aborting\n", func);
8098                                 rc = -EPERM;
8099                         }
8100                 }
8101         }
8102
8103         if (!BP_NOMCP(bp)) {
8104                 bnx2x_get_port_hwinfo(bp);
8105
8106                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8107                               DRV_MSG_SEQ_NUMBER_MASK);
8108                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8109         }
8110
8111         if (IS_E1HMF(bp)) {
8112                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8113                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
8114                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8115                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8116                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8117                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8118                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8119                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8120                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
8121                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
8122                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8123                                ETH_ALEN);
8124                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8125                                ETH_ALEN);
8126                 }
8127
8128                 return rc;
8129         }
8130
8131         if (BP_NOMCP(bp)) {
8132                 /* only supposed to happen on emulation/FPGA */
8133                 BNX2X_ERR("warning random MAC workaround active\n");
8134                 random_ether_addr(bp->dev->dev_addr);
8135                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8136         }
8137
8138         return rc;
8139 }
8140
8141 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8142 {
8143         int func = BP_FUNC(bp);
8144         int timer_interval;
8145         int rc;
8146
8147         /* Disable interrupt handling until HW is initialized */
8148         atomic_set(&bp->intr_sem, 1);
8149
8150         mutex_init(&bp->port.phy_mutex);
8151
8152         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8153         INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8154
8155         rc = bnx2x_get_hwinfo(bp);
8156
8157         /* need to reset chip if undi was active */
8158         if (!BP_NOMCP(bp))
8159                 bnx2x_undi_unload(bp);
8160
8161         if (CHIP_REV_IS_FPGA(bp))
8162                 printk(KERN_ERR PFX "FPGA detected\n");
8163
8164         if (BP_NOMCP(bp) && (func == 0))
8165                 printk(KERN_ERR PFX
8166                        "MCP disabled, must load devices in order!\n");
8167
8168         /* Set multi queue mode */
8169         if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8170             ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8171                 printk(KERN_ERR PFX
8172                       "Multi disabled since int_mode requested is not MSI-X\n");
8173                 multi_mode = ETH_RSS_MODE_DISABLED;
8174         }
8175         bp->multi_mode = multi_mode;
8176
8177
8178         /* Set TPA flags */
8179         if (disable_tpa) {
8180                 bp->flags &= ~TPA_ENABLE_FLAG;
8181                 bp->dev->features &= ~NETIF_F_LRO;
8182         } else {
8183                 bp->flags |= TPA_ENABLE_FLAG;
8184                 bp->dev->features |= NETIF_F_LRO;
8185         }
8186
8187         bp->mrrs = mrrs;
8188
8189         bp->tx_ring_size = MAX_TX_AVAIL;
8190         bp->rx_ring_size = MAX_RX_AVAIL;
8191
8192         bp->rx_csum = 1;
8193
8194         bp->tx_ticks = 50;
8195         bp->rx_ticks = 25;
8196
8197         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8198         bp->current_interval = (poll ? poll : timer_interval);
8199
8200         init_timer(&bp->timer);
8201         bp->timer.expires = jiffies + bp->current_interval;
8202         bp->timer.data = (unsigned long) bp;
8203         bp->timer.function = bnx2x_timer;
8204
8205         return rc;
8206 }
8207
8208 /*
8209  * ethtool service functions
8210  */
8211
8212 /* All ethtool functions called with rtnl_lock */
8213
8214 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8215 {
8216         struct bnx2x *bp = netdev_priv(dev);
8217
8218         cmd->supported = bp->port.supported;
8219         cmd->advertising = bp->port.advertising;
8220
8221         if (netif_carrier_ok(dev)) {
8222                 cmd->speed = bp->link_vars.line_speed;
8223                 cmd->duplex = bp->link_vars.duplex;
8224         } else {
8225                 cmd->speed = bp->link_params.req_line_speed;
8226                 cmd->duplex = bp->link_params.req_duplex;
8227         }
8228         if (IS_E1HMF(bp)) {
8229                 u16 vn_max_rate;
8230
8231                 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8232                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8233                 if (vn_max_rate < cmd->speed)
8234                         cmd->speed = vn_max_rate;
8235         }
8236
8237         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8238                 u32 ext_phy_type =
8239                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8240
8241                 switch (ext_phy_type) {
8242                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8243                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8244                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8245                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8246                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8247                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8248                         cmd->port = PORT_FIBRE;
8249                         break;
8250
8251                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8252                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8253                         cmd->port = PORT_TP;
8254                         break;
8255
8256                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8257                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8258                                   bp->link_params.ext_phy_config);
8259                         break;
8260
8261                 default:
8262                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
8263                            bp->link_params.ext_phy_config);
8264                         break;
8265                 }
8266         } else
8267                 cmd->port = PORT_TP;
8268
8269         cmd->phy_address = bp->port.phy_addr;
8270         cmd->transceiver = XCVR_INTERNAL;
8271
8272         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8273                 cmd->autoneg = AUTONEG_ENABLE;
8274         else
8275                 cmd->autoneg = AUTONEG_DISABLE;
8276
8277         cmd->maxtxpkt = 0;
8278         cmd->maxrxpkt = 0;
8279
8280         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8281            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
8282            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
8283            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
8284            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8285            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8286            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8287
8288         return 0;
8289 }
8290
8291 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8292 {
8293         struct bnx2x *bp = netdev_priv(dev);
8294         u32 advertising;
8295
8296         if (IS_E1HMF(bp))
8297                 return 0;
8298
8299         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8300            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
8301            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
8302            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
8303            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8304            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8305            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8306
8307         if (cmd->autoneg == AUTONEG_ENABLE) {
8308                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8309                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8310                         return -EINVAL;
8311                 }
8312
8313                 /* advertise the requested speed and duplex if supported */
8314                 cmd->advertising &= bp->port.supported;
8315
8316                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8317                 bp->link_params.req_duplex = DUPLEX_FULL;
8318                 bp->port.advertising |= (ADVERTISED_Autoneg |
8319                                          cmd->advertising);
8320
8321         } else { /* forced speed */
8322                 /* advertise the requested speed and duplex if supported */
8323                 switch (cmd->speed) {
8324                 case SPEED_10:
8325                         if (cmd->duplex == DUPLEX_FULL) {
8326                                 if (!(bp->port.supported &
8327                                       SUPPORTED_10baseT_Full)) {
8328                                         DP(NETIF_MSG_LINK,
8329                                            "10M full not supported\n");
8330                                         return -EINVAL;
8331                                 }
8332
8333                                 advertising = (ADVERTISED_10baseT_Full |
8334                                                ADVERTISED_TP);
8335                         } else {
8336                                 if (!(bp->port.supported &
8337                                       SUPPORTED_10baseT_Half)) {
8338                                         DP(NETIF_MSG_LINK,
8339                                            "10M half not supported\n");
8340                                         return -EINVAL;
8341                                 }
8342
8343                                 advertising = (ADVERTISED_10baseT_Half |
8344                                                ADVERTISED_TP);
8345                         }
8346                         break;
8347
8348                 case SPEED_100:
8349                         if (cmd->duplex == DUPLEX_FULL) {
8350                                 if (!(bp->port.supported &
8351                                                 SUPPORTED_100baseT_Full)) {
8352                                         DP(NETIF_MSG_LINK,
8353                                            "100M full not supported\n");
8354                                         return -EINVAL;
8355                                 }
8356
8357                                 advertising = (ADVERTISED_100baseT_Full |
8358                                                ADVERTISED_TP);
8359                         } else {
8360                                 if (!(bp->port.supported &
8361                                                 SUPPORTED_100baseT_Half)) {
8362                                         DP(NETIF_MSG_LINK,
8363                                            "100M half not supported\n");
8364                                         return -EINVAL;
8365                                 }
8366
8367                                 advertising = (ADVERTISED_100baseT_Half |
8368                                                ADVERTISED_TP);
8369                         }
8370                         break;
8371
8372                 case SPEED_1000:
8373                         if (cmd->duplex != DUPLEX_FULL) {
8374                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
8375                                 return -EINVAL;
8376                         }
8377
8378                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
8379                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
8380                                 return -EINVAL;
8381                         }
8382
8383                         advertising = (ADVERTISED_1000baseT_Full |
8384                                        ADVERTISED_TP);
8385                         break;
8386
8387                 case SPEED_2500:
8388                         if (cmd->duplex != DUPLEX_FULL) {
8389                                 DP(NETIF_MSG_LINK,
8390                                    "2.5G half not supported\n");
8391                                 return -EINVAL;
8392                         }
8393
8394                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
8395                                 DP(NETIF_MSG_LINK,
8396                                    "2.5G full not supported\n");
8397                                 return -EINVAL;
8398                         }
8399
8400                         advertising = (ADVERTISED_2500baseX_Full |
8401                                        ADVERTISED_TP);
8402                         break;
8403
8404                 case SPEED_10000:
8405                         if (cmd->duplex != DUPLEX_FULL) {
8406                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
8407                                 return -EINVAL;
8408                         }
8409
8410                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
8411                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
8412                                 return -EINVAL;
8413                         }
8414
8415                         advertising = (ADVERTISED_10000baseT_Full |
8416                                        ADVERTISED_FIBRE);
8417                         break;
8418
8419                 default:
8420                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
8421                         return -EINVAL;
8422                 }
8423
8424                 bp->link_params.req_line_speed = cmd->speed;
8425                 bp->link_params.req_duplex = cmd->duplex;
8426                 bp->port.advertising = advertising;
8427         }
8428
8429         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
8430            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
8431            bp->link_params.req_line_speed, bp->link_params.req_duplex,
8432            bp->port.advertising);
8433
8434         if (netif_running(dev)) {
8435                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8436                 bnx2x_link_set(bp);
8437         }
8438
8439         return 0;
8440 }
8441
8442 #define PHY_FW_VER_LEN                  10
8443
8444 static void bnx2x_get_drvinfo(struct net_device *dev,
8445                               struct ethtool_drvinfo *info)
8446 {
8447         struct bnx2x *bp = netdev_priv(dev);
8448         u8 phy_fw_ver[PHY_FW_VER_LEN];
8449
8450         strcpy(info->driver, DRV_MODULE_NAME);
8451         strcpy(info->version, DRV_MODULE_VERSION);
8452
8453         phy_fw_ver[0] = '\0';
8454         if (bp->port.pmf) {
8455                 bnx2x_acquire_phy_lock(bp);
8456                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8457                                              (bp->state != BNX2X_STATE_CLOSED),
8458                                              phy_fw_ver, PHY_FW_VER_LEN);
8459                 bnx2x_release_phy_lock(bp);
8460         }
8461
8462         snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8463                  (bp->common.bc_ver & 0xff0000) >> 16,
8464                  (bp->common.bc_ver & 0xff00) >> 8,
8465                  (bp->common.bc_ver & 0xff),
8466                  ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
8467         strcpy(info->bus_info, pci_name(bp->pdev));
8468         info->n_stats = BNX2X_NUM_STATS;
8469         info->testinfo_len = BNX2X_NUM_TESTS;
8470         info->eedump_len = bp->common.flash_size;
8471         info->regdump_len = 0;
8472 }
8473
8474 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8475 {
8476         struct bnx2x *bp = netdev_priv(dev);
8477
8478         if (bp->flags & NO_WOL_FLAG) {
8479                 wol->supported = 0;
8480                 wol->wolopts = 0;
8481         } else {
8482                 wol->supported = WAKE_MAGIC;
8483                 if (bp->wol)
8484                         wol->wolopts = WAKE_MAGIC;
8485                 else
8486                         wol->wolopts = 0;
8487         }
8488         memset(&wol->sopass, 0, sizeof(wol->sopass));
8489 }
8490
8491 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8492 {
8493         struct bnx2x *bp = netdev_priv(dev);
8494
8495         if (wol->wolopts & ~WAKE_MAGIC)
8496                 return -EINVAL;
8497
8498         if (wol->wolopts & WAKE_MAGIC) {
8499                 if (bp->flags & NO_WOL_FLAG)
8500                         return -EINVAL;
8501
8502                 bp->wol = 1;
8503         } else
8504                 bp->wol = 0;
8505
8506         return 0;
8507 }
8508
8509 static u32 bnx2x_get_msglevel(struct net_device *dev)
8510 {
8511         struct bnx2x *bp = netdev_priv(dev);
8512
8513         return bp->msglevel;
8514 }
8515
8516 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8517 {
8518         struct bnx2x *bp = netdev_priv(dev);
8519
8520         if (capable(CAP_NET_ADMIN))
8521                 bp->msglevel = level;
8522 }
8523
8524 static int bnx2x_nway_reset(struct net_device *dev)
8525 {
8526         struct bnx2x *bp = netdev_priv(dev);
8527
8528         if (!bp->port.pmf)
8529                 return 0;
8530
8531         if (netif_running(dev)) {
8532                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8533                 bnx2x_link_set(bp);
8534         }
8535
8536         return 0;
8537 }
8538
8539 static int bnx2x_get_eeprom_len(struct net_device *dev)
8540 {
8541         struct bnx2x *bp = netdev_priv(dev);
8542
8543         return bp->common.flash_size;
8544 }
8545
8546 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8547 {
8548         int port = BP_PORT(bp);
8549         int count, i;
8550         u32 val = 0;
8551
8552         /* adjust timeout for emulation/FPGA */
8553         count = NVRAM_TIMEOUT_COUNT;
8554         if (CHIP_REV_IS_SLOW(bp))
8555                 count *= 100;
8556
8557         /* request access to nvram interface */
8558         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8559                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8560
8561         for (i = 0; i < count*10; i++) {
8562                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8563                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8564                         break;
8565
8566                 udelay(5);
8567         }
8568
8569         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
8570                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
8571                 return -EBUSY;
8572         }
8573
8574         return 0;
8575 }
8576
8577 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8578 {
8579         int port = BP_PORT(bp);
8580         int count, i;
8581         u32 val = 0;
8582
8583         /* adjust timeout for emulation/FPGA */
8584         count = NVRAM_TIMEOUT_COUNT;
8585         if (CHIP_REV_IS_SLOW(bp))
8586                 count *= 100;
8587
8588         /* relinquish nvram interface */
8589         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8590                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8591
8592         for (i = 0; i < count*10; i++) {
8593                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8594                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8595                         break;
8596
8597                 udelay(5);
8598         }
8599
8600         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
8601                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
8602                 return -EBUSY;
8603         }
8604
8605         return 0;
8606 }
8607
8608 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8609 {
8610         u32 val;
8611
8612         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8613
8614         /* enable both bits, even on read */
8615         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8616                (val | MCPR_NVM_ACCESS_ENABLE_EN |
8617                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
8618 }
8619
8620 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8621 {
8622         u32 val;
8623
8624         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8625
8626         /* disable both bits, even after read */
8627         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8628                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8629                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8630 }
8631
8632 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
8633                                   u32 cmd_flags)
8634 {
8635         int count, i, rc;
8636         u32 val;
8637
8638         /* build the command word */
8639         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8640
8641         /* need to clear DONE bit separately */
8642         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8643
8644         /* address of the NVRAM to read from */
8645         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8646                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8647
8648         /* issue a read command */
8649         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8650
8651         /* adjust timeout for emulation/FPGA */
8652         count = NVRAM_TIMEOUT_COUNT;
8653         if (CHIP_REV_IS_SLOW(bp))
8654                 count *= 100;
8655
8656         /* wait for completion */
8657         *ret_val = 0;
8658         rc = -EBUSY;
8659         for (i = 0; i < count; i++) {
8660                 udelay(5);
8661                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8662
8663                 if (val & MCPR_NVM_COMMAND_DONE) {
8664                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8665                         /* we read nvram data in cpu order
8666                          * but ethtool sees it as an array of bytes
8667                          * converting to big-endian will do the work */
8668                         *ret_val = cpu_to_be32(val);
8669                         rc = 0;
8670                         break;
8671                 }
8672         }
8673
8674         return rc;
8675 }
8676
8677 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8678                             int buf_size)
8679 {
8680         int rc;
8681         u32 cmd_flags;
8682         __be32 val;
8683
8684         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8685                 DP(BNX2X_MSG_NVM,
8686                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8687                    offset, buf_size);
8688                 return -EINVAL;
8689         }
8690
8691         if (offset + buf_size > bp->common.flash_size) {
8692                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8693                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8694                    offset, buf_size, bp->common.flash_size);
8695                 return -EINVAL;
8696         }
8697
8698         /* request access to nvram interface */
8699         rc = bnx2x_acquire_nvram_lock(bp);
8700         if (rc)
8701                 return rc;
8702
8703         /* enable access to nvram interface */
8704         bnx2x_enable_nvram_access(bp);
8705
8706         /* read the first word(s) */
8707         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8708         while ((buf_size > sizeof(u32)) && (rc == 0)) {
8709                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8710                 memcpy(ret_buf, &val, 4);
8711
8712                 /* advance to the next dword */
8713                 offset += sizeof(u32);
8714                 ret_buf += sizeof(u32);
8715                 buf_size -= sizeof(u32);
8716                 cmd_flags = 0;
8717         }
8718
8719         if (rc == 0) {
8720                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8721                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8722                 memcpy(ret_buf, &val, 4);
8723         }
8724
8725         /* disable access to nvram interface */
8726         bnx2x_disable_nvram_access(bp);
8727         bnx2x_release_nvram_lock(bp);
8728
8729         return rc;
8730 }
8731
8732 static int bnx2x_get_eeprom(struct net_device *dev,
8733                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8734 {
8735         struct bnx2x *bp = netdev_priv(dev);
8736         int rc;
8737
8738         if (!netif_running(dev))
8739                 return -EAGAIN;
8740
8741         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8742            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8743            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8744            eeprom->len, eeprom->len);
8745
8746         /* parameters already validated in ethtool_get_eeprom */
8747
8748         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8749
8750         return rc;
8751 }
8752
8753 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8754                                    u32 cmd_flags)
8755 {
8756         int count, i, rc;
8757
8758         /* build the command word */
8759         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8760
8761         /* need to clear DONE bit separately */
8762         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8763
8764         /* write the data */
8765         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8766
8767         /* address of the NVRAM to write to */
8768         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8769                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8770
8771         /* issue the write command */
8772         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8773
8774         /* adjust timeout for emulation/FPGA */
8775         count = NVRAM_TIMEOUT_COUNT;
8776         if (CHIP_REV_IS_SLOW(bp))
8777                 count *= 100;
8778
8779         /* wait for completion */
8780         rc = -EBUSY;
8781         for (i = 0; i < count; i++) {
8782                 udelay(5);
8783                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8784                 if (val & MCPR_NVM_COMMAND_DONE) {
8785                         rc = 0;
8786                         break;
8787                 }
8788         }
8789
8790         return rc;
8791 }
8792
8793 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
8794
8795 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8796                               int buf_size)
8797 {
8798         int rc;
8799         u32 cmd_flags;
8800         u32 align_offset;
8801         __be32 val;
8802
8803         if (offset + buf_size > bp->common.flash_size) {
8804                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8805                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8806                    offset, buf_size, bp->common.flash_size);
8807                 return -EINVAL;
8808         }
8809
8810         /* request access to nvram interface */
8811         rc = bnx2x_acquire_nvram_lock(bp);
8812         if (rc)
8813                 return rc;
8814
8815         /* enable access to nvram interface */
8816         bnx2x_enable_nvram_access(bp);
8817
8818         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8819         align_offset = (offset & ~0x03);
8820         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8821
8822         if (rc == 0) {
8823                 val &= ~(0xff << BYTE_OFFSET(offset));
8824                 val |= (*data_buf << BYTE_OFFSET(offset));
8825
8826                 /* nvram data is returned as an array of bytes
8827                  * convert it back to cpu order */
8828                 val = be32_to_cpu(val);
8829
8830                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8831                                              cmd_flags);
8832         }
8833
8834         /* disable access to nvram interface */
8835         bnx2x_disable_nvram_access(bp);
8836         bnx2x_release_nvram_lock(bp);
8837
8838         return rc;
8839 }
8840
8841 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8842                              int buf_size)
8843 {
8844         int rc;
8845         u32 cmd_flags;
8846         u32 val;
8847         u32 written_so_far;
8848
8849         if (buf_size == 1)      /* ethtool */
8850                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8851
8852         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8853                 DP(BNX2X_MSG_NVM,
8854                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
8855                    offset, buf_size);
8856                 return -EINVAL;
8857         }
8858
8859         if (offset + buf_size > bp->common.flash_size) {
8860                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8861                                   " buf_size (0x%x) > flash_size (0x%x)\n",
8862                    offset, buf_size, bp->common.flash_size);
8863                 return -EINVAL;
8864         }
8865
8866         /* request access to nvram interface */
8867         rc = bnx2x_acquire_nvram_lock(bp);
8868         if (rc)
8869                 return rc;
8870
8871         /* enable access to nvram interface */
8872         bnx2x_enable_nvram_access(bp);
8873
8874         written_so_far = 0;
8875         cmd_flags = MCPR_NVM_COMMAND_FIRST;
8876         while ((written_so_far < buf_size) && (rc == 0)) {
8877                 if (written_so_far == (buf_size - sizeof(u32)))
8878                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8879                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8880                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
8881                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8882                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8883
8884                 memcpy(&val, data_buf, 4);
8885
8886                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8887
8888                 /* advance to the next dword */
8889                 offset += sizeof(u32);
8890                 data_buf += sizeof(u32);
8891                 written_so_far += sizeof(u32);
8892                 cmd_flags = 0;
8893         }
8894
8895         /* disable access to nvram interface */
8896         bnx2x_disable_nvram_access(bp);
8897         bnx2x_release_nvram_lock(bp);
8898
8899         return rc;
8900 }
8901
8902 static int bnx2x_set_eeprom(struct net_device *dev,
8903                             struct ethtool_eeprom *eeprom, u8 *eebuf)
8904 {
8905         struct bnx2x *bp = netdev_priv(dev);
8906         int rc;
8907
8908         if (!netif_running(dev))
8909                 return -EAGAIN;
8910
8911         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8912            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
8913            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8914            eeprom->len, eeprom->len);
8915
8916         /* parameters already validated in ethtool_set_eeprom */
8917
8918         /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8919         if (eeprom->magic == 0x00504859)
8920                 if (bp->port.pmf) {
8921
8922                         bnx2x_acquire_phy_lock(bp);
8923                         rc = bnx2x_flash_download(bp, BP_PORT(bp),
8924                                              bp->link_params.ext_phy_config,
8925                                              (bp->state != BNX2X_STATE_CLOSED),
8926                                              eebuf, eeprom->len);
8927                         if ((bp->state == BNX2X_STATE_OPEN) ||
8928                             (bp->state == BNX2X_STATE_DISABLED)) {
8929                                 rc |= bnx2x_link_reset(&bp->link_params,
8930                                                        &bp->link_vars, 1);
8931                                 rc |= bnx2x_phy_init(&bp->link_params,
8932                                                      &bp->link_vars);
8933                         }
8934                         bnx2x_release_phy_lock(bp);
8935
8936                 } else /* Only the PMF can access the PHY */
8937                         return -EINVAL;
8938         else
8939                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8940
8941         return rc;
8942 }
8943
8944 static int bnx2x_get_coalesce(struct net_device *dev,
8945                               struct ethtool_coalesce *coal)
8946 {
8947         struct bnx2x *bp = netdev_priv(dev);
8948
8949         memset(coal, 0, sizeof(struct ethtool_coalesce));
8950
8951         coal->rx_coalesce_usecs = bp->rx_ticks;
8952         coal->tx_coalesce_usecs = bp->tx_ticks;
8953
8954         return 0;
8955 }
8956
8957 static int bnx2x_set_coalesce(struct net_device *dev,
8958                               struct ethtool_coalesce *coal)
8959 {
8960         struct bnx2x *bp = netdev_priv(dev);
8961
8962         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8963         if (bp->rx_ticks > 3000)
8964                 bp->rx_ticks = 3000;
8965
8966         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8967         if (bp->tx_ticks > 0x3000)
8968                 bp->tx_ticks = 0x3000;
8969
8970         if (netif_running(dev))
8971                 bnx2x_update_coalesce(bp);
8972
8973         return 0;
8974 }
8975
8976 static void bnx2x_get_ringparam(struct net_device *dev,
8977                                 struct ethtool_ringparam *ering)
8978 {
8979         struct bnx2x *bp = netdev_priv(dev);
8980
8981         ering->rx_max_pending = MAX_RX_AVAIL;
8982         ering->rx_mini_max_pending = 0;
8983         ering->rx_jumbo_max_pending = 0;
8984
8985         ering->rx_pending = bp->rx_ring_size;
8986         ering->rx_mini_pending = 0;
8987         ering->rx_jumbo_pending = 0;
8988
8989         ering->tx_max_pending = MAX_TX_AVAIL;
8990         ering->tx_pending = bp->tx_ring_size;
8991 }
8992
8993 static int bnx2x_set_ringparam(struct net_device *dev,
8994                                struct ethtool_ringparam *ering)
8995 {
8996         struct bnx2x *bp = netdev_priv(dev);
8997         int rc = 0;
8998
8999         if ((ering->rx_pending > MAX_RX_AVAIL) ||
9000             (ering->tx_pending > MAX_TX_AVAIL) ||
9001             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9002                 return -EINVAL;
9003
9004         bp->rx_ring_size = ering->rx_pending;
9005         bp->tx_ring_size = ering->tx_pending;
9006
9007         if (netif_running(dev)) {
9008                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9009                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9010         }
9011
9012         return rc;
9013 }
9014
9015 static void bnx2x_get_pauseparam(struct net_device *dev,
9016                                  struct ethtool_pauseparam *epause)
9017 {
9018         struct bnx2x *bp = netdev_priv(dev);
9019
9020         epause->autoneg = (bp->link_params.req_flow_ctrl ==
9021                            BNX2X_FLOW_CTRL_AUTO) &&
9022                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9023
9024         epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9025                             BNX2X_FLOW_CTRL_RX);
9026         epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9027                             BNX2X_FLOW_CTRL_TX);
9028
9029         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9030            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9031            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9032 }
9033
9034 static int bnx2x_set_pauseparam(struct net_device *dev,
9035                                 struct ethtool_pauseparam *epause)
9036 {
9037         struct bnx2x *bp = netdev_priv(dev);
9038
9039         if (IS_E1HMF(bp))
9040                 return 0;
9041
9042         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9043            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9044            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9045
9046         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9047
9048         if (epause->rx_pause)
9049                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9050
9051         if (epause->tx_pause)
9052                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9053
9054         if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9055                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9056
9057         if (epause->autoneg) {
9058                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9059                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
9060                         return -EINVAL;
9061                 }
9062
9063                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9064                         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9065         }
9066
9067         DP(NETIF_MSG_LINK,
9068            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9069
9070         if (netif_running(dev)) {
9071                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9072                 bnx2x_link_set(bp);
9073         }
9074
9075         return 0;
9076 }
9077
9078 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9079 {
9080         struct bnx2x *bp = netdev_priv(dev);
9081         int changed = 0;
9082         int rc = 0;
9083
9084         /* TPA requires Rx CSUM offloading */
9085         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9086                 if (!(dev->features & NETIF_F_LRO)) {
9087                         dev->features |= NETIF_F_LRO;
9088                         bp->flags |= TPA_ENABLE_FLAG;
9089                         changed = 1;
9090                 }
9091
9092         } else if (dev->features & NETIF_F_LRO) {
9093                 dev->features &= ~NETIF_F_LRO;
9094                 bp->flags &= ~TPA_ENABLE_FLAG;
9095                 changed = 1;
9096         }
9097
9098         if (changed && netif_running(dev)) {
9099                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9100                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9101         }
9102
9103         return rc;
9104 }
9105
9106 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9107 {
9108         struct bnx2x *bp = netdev_priv(dev);
9109
9110         return bp->rx_csum;
9111 }
9112
9113 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9114 {
9115         struct bnx2x *bp = netdev_priv(dev);
9116         int rc = 0;
9117
9118         bp->rx_csum = data;
9119
9120         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9121            TPA'ed packets will be discarded due to wrong TCP CSUM */
9122         if (!data) {
9123                 u32 flags = ethtool_op_get_flags(dev);
9124
9125                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9126         }
9127
9128         return rc;
9129 }
9130
9131 static int bnx2x_set_tso(struct net_device *dev, u32 data)
9132 {
9133         if (data) {
9134                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9135                 dev->features |= NETIF_F_TSO6;
9136         } else {
9137                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
9138                 dev->features &= ~NETIF_F_TSO6;
9139         }
9140
9141         return 0;
9142 }
9143
9144 static const struct {
9145         char string[ETH_GSTRING_LEN];
9146 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
9147         { "register_test (offline)" },
9148         { "memory_test (offline)" },
9149         { "loopback_test (offline)" },
9150         { "nvram_test (online)" },
9151         { "interrupt_test (online)" },
9152         { "link_test (online)" },
9153         { "idle check (online)" }
9154 };
9155
9156 static int bnx2x_self_test_count(struct net_device *dev)
9157 {
9158         return BNX2X_NUM_TESTS;
9159 }
9160
9161 static int bnx2x_test_registers(struct bnx2x *bp)
9162 {
9163         int idx, i, rc = -ENODEV;
9164         u32 wr_val = 0;
9165         int port = BP_PORT(bp);
9166         static const struct {
9167                 u32  offset0;
9168                 u32  offset1;
9169                 u32  mask;
9170         } reg_tbl[] = {
9171 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
9172                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
9173                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
9174                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
9175                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
9176                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
9177                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
9178                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
9179                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
9180                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
9181 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
9182                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
9183                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
9184                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
9185                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
9186                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9187                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
9188                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
9189                 { NIG_REG_EGRESS_MNG0_FIFO,           20, 0xffffffff },
9190                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
9191 /* 20 */        { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
9192                 { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
9193                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
9194                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
9195                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
9196                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
9197                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
9198                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
9199                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
9200                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
9201 /* 30 */        { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
9202                 { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
9203                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
9204                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
9205                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9206                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
9207                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9208                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
9209
9210                 { 0xffffffff, 0, 0x00000000 }
9211         };
9212
9213         if (!netif_running(bp->dev))
9214                 return rc;
9215
9216         /* Repeat the test twice:
9217            First by writing 0x00000000, second by writing 0xffffffff */
9218         for (idx = 0; idx < 2; idx++) {
9219
9220                 switch (idx) {
9221                 case 0:
9222                         wr_val = 0;
9223                         break;
9224                 case 1:
9225                         wr_val = 0xffffffff;
9226                         break;
9227                 }
9228
9229                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9230                         u32 offset, mask, save_val, val;
9231
9232                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9233                         mask = reg_tbl[i].mask;
9234
9235                         save_val = REG_RD(bp, offset);
9236
9237                         REG_WR(bp, offset, wr_val);
9238                         val = REG_RD(bp, offset);
9239
9240                         /* Restore the original register's value */
9241                         REG_WR(bp, offset, save_val);
9242
9243                         /* verify that value is as expected value */
9244                         if ((val & mask) != (wr_val & mask))
9245                                 goto test_reg_exit;
9246                 }
9247         }
9248
9249         rc = 0;
9250
9251 test_reg_exit:
9252         return rc;
9253 }
9254
9255 static int bnx2x_test_memory(struct bnx2x *bp)
9256 {
9257         int i, j, rc = -ENODEV;
9258         u32 val;
9259         static const struct {
9260                 u32 offset;
9261                 int size;
9262         } mem_tbl[] = {
9263                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
9264                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9265                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
9266                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
9267                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
9268                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
9269                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
9270
9271                 { 0xffffffff, 0 }
9272         };
9273         static const struct {
9274                 char *name;
9275                 u32 offset;
9276                 u32 e1_mask;
9277                 u32 e1h_mask;
9278         } prty_tbl[] = {
9279                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
9280                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
9281                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
9282                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
9283                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
9284                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
9285
9286                 { NULL, 0xffffffff, 0, 0 }
9287         };
9288
9289         if (!netif_running(bp->dev))
9290                 return rc;
9291
9292         /* Go through all the memories */
9293         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9294                 for (j = 0; j < mem_tbl[i].size; j++)
9295                         REG_RD(bp, mem_tbl[i].offset + j*4);
9296
9297         /* Check the parity status */
9298         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9299                 val = REG_RD(bp, prty_tbl[i].offset);
9300                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9301                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
9302                         DP(NETIF_MSG_HW,
9303                            "%s is 0x%x\n", prty_tbl[i].name, val);
9304                         goto test_mem_exit;
9305                 }
9306         }
9307
9308         rc = 0;
9309
9310 test_mem_exit:
9311         return rc;
9312 }
9313
9314 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9315 {
9316         int cnt = 1000;
9317
9318         if (link_up)
9319                 while (bnx2x_link_test(bp) && cnt--)
9320                         msleep(10);
9321 }
9322
9323 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9324 {
9325         unsigned int pkt_size, num_pkts, i;
9326         struct sk_buff *skb;
9327         unsigned char *packet;
9328         struct bnx2x_fastpath *fp = &bp->fp[0];
9329         u16 tx_start_idx, tx_idx;
9330         u16 rx_start_idx, rx_idx;
9331         u16 pkt_prod;
9332         struct sw_tx_bd *tx_buf;
9333         struct eth_tx_bd *tx_bd;
9334         dma_addr_t mapping;
9335         union eth_rx_cqe *cqe;
9336         u8 cqe_fp_flags;
9337         struct sw_rx_bd *rx_buf;
9338         u16 len;
9339         int rc = -ENODEV;
9340
9341         /* check the loopback mode */
9342         switch (loopback_mode) {
9343         case BNX2X_PHY_LOOPBACK:
9344                 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9345                         return -EINVAL;
9346                 break;
9347         case BNX2X_MAC_LOOPBACK:
9348                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
9349                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
9350                 break;
9351         default:
9352                 return -EINVAL;
9353         }
9354
9355         /* prepare the loopback packet */
9356         pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9357                      bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
9358         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9359         if (!skb) {
9360                 rc = -ENOMEM;
9361                 goto test_loopback_exit;
9362         }
9363         packet = skb_put(skb, pkt_size);
9364         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9365         memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9366         for (i = ETH_HLEN; i < pkt_size; i++)
9367                 packet[i] = (unsigned char) (i & 0xff);
9368
9369         /* send the loopback packet */
9370         num_pkts = 0;
9371         tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9372         rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9373
9374         pkt_prod = fp->tx_pkt_prod++;
9375         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9376         tx_buf->first_bd = fp->tx_bd_prod;
9377         tx_buf->skb = skb;
9378
9379         tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9380         mapping = pci_map_single(bp->pdev, skb->data,
9381                                  skb_headlen(skb), PCI_DMA_TODEVICE);
9382         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9383         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9384         tx_bd->nbd = cpu_to_le16(1);
9385         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9386         tx_bd->vlan = cpu_to_le16(pkt_prod);
9387         tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9388                                        ETH_TX_BD_FLAGS_END_BD);
9389         tx_bd->general_data = ((UNICAST_ADDRESS <<
9390                                 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9391
9392         wmb();
9393
9394         le16_add_cpu(&fp->hw_tx_prods->bds_prod, 1);
9395         mb(); /* FW restriction: must not reorder writing nbd and packets */
9396         le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
9397         DOORBELL(bp, fp->index, 0);
9398
9399         mmiowb();
9400
9401         num_pkts++;
9402         fp->tx_bd_prod++;
9403         bp->dev->trans_start = jiffies;
9404
9405         udelay(100);
9406
9407         tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9408         if (tx_idx != tx_start_idx + num_pkts)
9409                 goto test_loopback_exit;
9410
9411         rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9412         if (rx_idx != rx_start_idx + num_pkts)
9413                 goto test_loopback_exit;
9414
9415         cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9416         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9417         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9418                 goto test_loopback_rx_exit;
9419
9420         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9421         if (len != pkt_size)
9422                 goto test_loopback_rx_exit;
9423
9424         rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9425         skb = rx_buf->skb;
9426         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9427         for (i = ETH_HLEN; i < pkt_size; i++)
9428                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9429                         goto test_loopback_rx_exit;
9430
9431         rc = 0;
9432
9433 test_loopback_rx_exit:
9434
9435         fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9436         fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9437         fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9438         fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9439
9440         /* Update producers */
9441         bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9442                              fp->rx_sge_prod);
9443
9444 test_loopback_exit:
9445         bp->link_params.loopback_mode = LOOPBACK_NONE;
9446
9447         return rc;
9448 }
9449
9450 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9451 {
9452         int rc = 0, res;
9453
9454         if (!netif_running(bp->dev))
9455                 return BNX2X_LOOPBACK_FAILED;
9456
9457         bnx2x_netif_stop(bp, 1);
9458         bnx2x_acquire_phy_lock(bp);
9459
9460         res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9461         if (res) {
9462                 DP(NETIF_MSG_PROBE, "  PHY loopback failed  (res %d)\n", res);
9463                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
9464         }
9465
9466         res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9467         if (res) {
9468                 DP(NETIF_MSG_PROBE, "  MAC loopback failed  (res %d)\n", res);
9469                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
9470         }
9471
9472         bnx2x_release_phy_lock(bp);
9473         bnx2x_netif_start(bp);
9474
9475         return rc;
9476 }
9477
9478 #define CRC32_RESIDUAL                  0xdebb20e3
9479
9480 static int bnx2x_test_nvram(struct bnx2x *bp)
9481 {
9482         static const struct {
9483                 int offset;
9484                 int size;
9485         } nvram_tbl[] = {
9486                 {     0,  0x14 }, /* bootstrap */
9487                 {  0x14,  0xec }, /* dir */
9488                 { 0x100, 0x350 }, /* manuf_info */
9489                 { 0x450,  0xf0 }, /* feature_info */
9490                 { 0x640,  0x64 }, /* upgrade_key_info */
9491                 { 0x6a4,  0x64 },
9492                 { 0x708,  0x70 }, /* manuf_key_info */
9493                 { 0x778,  0x70 },
9494                 {     0,     0 }
9495         };
9496         __be32 buf[0x350 / 4];
9497         u8 *data = (u8 *)buf;
9498         int i, rc;
9499         u32 magic, csum;
9500
9501         rc = bnx2x_nvram_read(bp, 0, data, 4);
9502         if (rc) {
9503                 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
9504                 goto test_nvram_exit;
9505         }
9506
9507         magic = be32_to_cpu(buf[0]);
9508         if (magic != 0x669955aa) {
9509                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9510                 rc = -ENODEV;
9511                 goto test_nvram_exit;
9512         }
9513
9514         for (i = 0; nvram_tbl[i].size; i++) {
9515
9516                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9517                                       nvram_tbl[i].size);
9518                 if (rc) {
9519                         DP(NETIF_MSG_PROBE,
9520                            "nvram_tbl[%d] read data (rc %d)\n", i, rc);
9521                         goto test_nvram_exit;
9522                 }
9523
9524                 csum = ether_crc_le(nvram_tbl[i].size, data);
9525                 if (csum != CRC32_RESIDUAL) {
9526                         DP(NETIF_MSG_PROBE,
9527                            "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9528                         rc = -ENODEV;
9529                         goto test_nvram_exit;
9530                 }
9531         }
9532
9533 test_nvram_exit:
9534         return rc;
9535 }
9536
9537 static int bnx2x_test_intr(struct bnx2x *bp)
9538 {
9539         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9540         int i, rc;
9541
9542         if (!netif_running(bp->dev))
9543                 return -ENODEV;
9544
9545         config->hdr.length = 0;
9546         if (CHIP_IS_E1(bp))
9547                 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9548         else
9549                 config->hdr.offset = BP_FUNC(bp);
9550         config->hdr.client_id = bp->fp->cl_id;
9551         config->hdr.reserved1 = 0;
9552
9553         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9554                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9555                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9556         if (rc == 0) {
9557                 bp->set_mac_pending++;
9558                 for (i = 0; i < 10; i++) {
9559                         if (!bp->set_mac_pending)
9560                                 break;
9561                         msleep_interruptible(10);
9562                 }
9563                 if (i == 10)
9564                         rc = -ENODEV;
9565         }
9566
9567         return rc;
9568 }
9569
9570 static void bnx2x_self_test(struct net_device *dev,
9571                             struct ethtool_test *etest, u64 *buf)
9572 {
9573         struct bnx2x *bp = netdev_priv(dev);
9574
9575         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9576
9577         if (!netif_running(dev))
9578                 return;
9579
9580         /* offline tests are not supported in MF mode */
9581         if (IS_E1HMF(bp))
9582                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9583
9584         if (etest->flags & ETH_TEST_FL_OFFLINE) {
9585                 u8 link_up;
9586
9587                 link_up = bp->link_vars.link_up;
9588                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9589                 bnx2x_nic_load(bp, LOAD_DIAG);
9590                 /* wait until link state is restored */
9591                 bnx2x_wait_for_link(bp, link_up);
9592
9593                 if (bnx2x_test_registers(bp) != 0) {
9594                         buf[0] = 1;
9595                         etest->flags |= ETH_TEST_FL_FAILED;
9596                 }
9597                 if (bnx2x_test_memory(bp) != 0) {
9598                         buf[1] = 1;
9599                         etest->flags |= ETH_TEST_FL_FAILED;
9600                 }
9601                 buf[2] = bnx2x_test_loopback(bp, link_up);
9602                 if (buf[2] != 0)
9603                         etest->flags |= ETH_TEST_FL_FAILED;
9604
9605                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9606                 bnx2x_nic_load(bp, LOAD_NORMAL);
9607                 /* wait until link state is restored */
9608                 bnx2x_wait_for_link(bp, link_up);
9609         }
9610         if (bnx2x_test_nvram(bp) != 0) {
9611                 buf[3] = 1;
9612                 etest->flags |= ETH_TEST_FL_FAILED;
9613         }
9614         if (bnx2x_test_intr(bp) != 0) {
9615                 buf[4] = 1;
9616                 etest->flags |= ETH_TEST_FL_FAILED;
9617         }
9618         if (bp->port.pmf)
9619                 if (bnx2x_link_test(bp) != 0) {
9620                         buf[5] = 1;
9621                         etest->flags |= ETH_TEST_FL_FAILED;
9622                 }
9623
9624 #ifdef BNX2X_EXTRA_DEBUG
9625         bnx2x_panic_dump(bp);
9626 #endif
9627 }
9628
9629 static const struct {
9630         long offset;
9631         int size;
9632         u8 string[ETH_GSTRING_LEN];
9633 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9634 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9635         { Q_STATS_OFFSET32(error_bytes_received_hi),
9636                                                 8, "[%d]: rx_error_bytes" },
9637         { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9638                                                 8, "[%d]: rx_ucast_packets" },
9639         { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9640                                                 8, "[%d]: rx_mcast_packets" },
9641         { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9642                                                 8, "[%d]: rx_bcast_packets" },
9643         { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9644         { Q_STATS_OFFSET32(rx_err_discard_pkt),
9645                                          4, "[%d]: rx_phy_ip_err_discards"},
9646         { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9647                                          4, "[%d]: rx_skb_alloc_discard" },
9648         { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9649
9650 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9651         { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9652                                                         8, "[%d]: tx_packets" }
9653 };
9654
9655 static const struct {
9656         long offset;
9657         int size;
9658         u32 flags;
9659 #define STATS_FLAGS_PORT                1
9660 #define STATS_FLAGS_FUNC                2
9661 #define STATS_FLAGS_BOTH                (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
9662         u8 string[ETH_GSTRING_LEN];
9663 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9664 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9665                                 8, STATS_FLAGS_BOTH, "rx_bytes" },
9666         { STATS_OFFSET32(error_bytes_received_hi),
9667                                 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
9668         { STATS_OFFSET32(total_unicast_packets_received_hi),
9669                                 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
9670         { STATS_OFFSET32(total_multicast_packets_received_hi),
9671                                 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
9672         { STATS_OFFSET32(total_broadcast_packets_received_hi),
9673                                 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
9674         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9675                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9676         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9677                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
9678         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9679                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9680         { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9681                                 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9682 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9683                                 8, STATS_FLAGS_PORT, "rx_fragments" },
9684         { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9685                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
9686         { STATS_OFFSET32(no_buff_discard_hi),
9687                                 8, STATS_FLAGS_BOTH, "rx_discards" },
9688         { STATS_OFFSET32(mac_filter_discard),
9689                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9690         { STATS_OFFSET32(xxoverflow_discard),
9691                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9692         { STATS_OFFSET32(brb_drop_hi),
9693                                 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9694         { STATS_OFFSET32(brb_truncate_hi),
9695                                 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9696         { STATS_OFFSET32(pause_frames_received_hi),
9697                                 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9698         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9699                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9700         { STATS_OFFSET32(nig_timer_max),
9701                         4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9702 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9703                                 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9704         { STATS_OFFSET32(rx_skb_alloc_failed),
9705                                 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9706         { STATS_OFFSET32(hw_csum_err),
9707                                 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9708
9709         { STATS_OFFSET32(total_bytes_transmitted_hi),
9710                                 8, STATS_FLAGS_BOTH, "tx_bytes" },
9711         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9712                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9713         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9714                                 8, STATS_FLAGS_BOTH, "tx_packets" },
9715         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9716                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9717         { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9718                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9719         { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9720                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9721         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9722                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9723 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9724                                 8, STATS_FLAGS_PORT, "tx_deferred" },
9725         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9726                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9727         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9728                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9729         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9730                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9731         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9732                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9733         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9734                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9735         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9736                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9737         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9738                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9739         { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9740                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9741         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9742                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9743 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
9744                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9745         { STATS_OFFSET32(pause_frames_sent_hi),
9746                                 8, STATS_FLAGS_PORT, "tx_pause_frames" }
9747 };
9748
9749 #define IS_PORT_STAT(i) \
9750         ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9751 #define IS_FUNC_STAT(i)         (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9752 #define IS_E1HMF_MODE_STAT(bp) \
9753                         (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
9754
9755 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9756 {
9757         struct bnx2x *bp = netdev_priv(dev);
9758         int i, j, k;
9759
9760         switch (stringset) {
9761         case ETH_SS_STATS:
9762                 if (is_multi(bp)) {
9763                         k = 0;
9764                         for_each_queue(bp, i) {
9765                                 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9766                                         sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9767                                                 bnx2x_q_stats_arr[j].string, i);
9768                                 k += BNX2X_NUM_Q_STATS;
9769                         }
9770                         if (IS_E1HMF_MODE_STAT(bp))
9771                                 break;
9772                         for (j = 0; j < BNX2X_NUM_STATS; j++)
9773                                 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9774                                        bnx2x_stats_arr[j].string);
9775                 } else {
9776                         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9777                                 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9778                                         continue;
9779                                 strcpy(buf + j*ETH_GSTRING_LEN,
9780                                        bnx2x_stats_arr[i].string);
9781                                 j++;
9782                         }
9783                 }
9784                 break;
9785
9786         case ETH_SS_TEST:
9787                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9788                 break;
9789         }
9790 }
9791
9792 static int bnx2x_get_stats_count(struct net_device *dev)
9793 {
9794         struct bnx2x *bp = netdev_priv(dev);
9795         int i, num_stats;
9796
9797         if (is_multi(bp)) {
9798                 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9799                 if (!IS_E1HMF_MODE_STAT(bp))
9800                         num_stats += BNX2X_NUM_STATS;
9801         } else {
9802                 if (IS_E1HMF_MODE_STAT(bp)) {
9803                         num_stats = 0;
9804                         for (i = 0; i < BNX2X_NUM_STATS; i++)
9805                                 if (IS_FUNC_STAT(i))
9806                                         num_stats++;
9807                 } else
9808                         num_stats = BNX2X_NUM_STATS;
9809         }
9810
9811         return num_stats;
9812 }
9813
9814 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9815                                     struct ethtool_stats *stats, u64 *buf)
9816 {
9817         struct bnx2x *bp = netdev_priv(dev);
9818         u32 *hw_stats, *offset;
9819         int i, j, k;
9820
9821         if (is_multi(bp)) {
9822                 k = 0;
9823                 for_each_queue(bp, i) {
9824                         hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9825                         for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9826                                 if (bnx2x_q_stats_arr[j].size == 0) {
9827                                         /* skip this counter */
9828                                         buf[k + j] = 0;
9829                                         continue;
9830                                 }
9831                                 offset = (hw_stats +
9832                                           bnx2x_q_stats_arr[j].offset);
9833                                 if (bnx2x_q_stats_arr[j].size == 4) {
9834                                         /* 4-byte counter */
9835                                         buf[k + j] = (u64) *offset;
9836                                         continue;
9837                                 }
9838                                 /* 8-byte counter */
9839                                 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9840                         }
9841                         k += BNX2X_NUM_Q_STATS;
9842                 }
9843                 if (IS_E1HMF_MODE_STAT(bp))
9844                         return;
9845                 hw_stats = (u32 *)&bp->eth_stats;
9846                 for (j = 0; j < BNX2X_NUM_STATS; j++) {
9847                         if (bnx2x_stats_arr[j].size == 0) {
9848                                 /* skip this counter */
9849                                 buf[k + j] = 0;
9850                                 continue;
9851                         }
9852                         offset = (hw_stats + bnx2x_stats_arr[j].offset);
9853                         if (bnx2x_stats_arr[j].size == 4) {
9854                                 /* 4-byte counter */
9855                                 buf[k + j] = (u64) *offset;
9856                                 continue;
9857                         }
9858                         /* 8-byte counter */
9859                         buf[k + j] = HILO_U64(*offset, *(offset + 1));
9860                 }
9861         } else {
9862                 hw_stats = (u32 *)&bp->eth_stats;
9863                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9864                         if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9865                                 continue;
9866                         if (bnx2x_stats_arr[i].size == 0) {
9867                                 /* skip this counter */
9868                                 buf[j] = 0;
9869                                 j++;
9870                                 continue;
9871                         }
9872                         offset = (hw_stats + bnx2x_stats_arr[i].offset);
9873                         if (bnx2x_stats_arr[i].size == 4) {
9874                                 /* 4-byte counter */
9875                                 buf[j] = (u64) *offset;
9876                                 j++;
9877                                 continue;
9878                         }
9879                         /* 8-byte counter */
9880                         buf[j] = HILO_U64(*offset, *(offset + 1));
9881                         j++;
9882                 }
9883         }
9884 }
9885
9886 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9887 {
9888         struct bnx2x *bp = netdev_priv(dev);
9889         int port = BP_PORT(bp);
9890         int i;
9891
9892         if (!netif_running(dev))
9893                 return 0;
9894
9895         if (!bp->port.pmf)
9896                 return 0;
9897
9898         if (data == 0)
9899                 data = 2;
9900
9901         for (i = 0; i < (data * 2); i++) {
9902                 if ((i % 2) == 0)
9903                         bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9904                                       bp->link_params.hw_led_mode,
9905                                       bp->link_params.chip_id);
9906                 else
9907                         bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9908                                       bp->link_params.hw_led_mode,
9909                                       bp->link_params.chip_id);
9910
9911                 msleep_interruptible(500);
9912                 if (signal_pending(current))
9913                         break;
9914         }
9915
9916         if (bp->link_vars.link_up)
9917                 bnx2x_set_led(bp, port, LED_MODE_OPER,
9918                               bp->link_vars.line_speed,
9919                               bp->link_params.hw_led_mode,
9920                               bp->link_params.chip_id);
9921
9922         return 0;
9923 }
9924
9925 static struct ethtool_ops bnx2x_ethtool_ops = {
9926         .get_settings           = bnx2x_get_settings,
9927         .set_settings           = bnx2x_set_settings,
9928         .get_drvinfo            = bnx2x_get_drvinfo,
9929         .get_wol                = bnx2x_get_wol,
9930         .set_wol                = bnx2x_set_wol,
9931         .get_msglevel           = bnx2x_get_msglevel,
9932         .set_msglevel           = bnx2x_set_msglevel,
9933         .nway_reset             = bnx2x_nway_reset,
9934         .get_link               = ethtool_op_get_link,
9935         .get_eeprom_len         = bnx2x_get_eeprom_len,
9936         .get_eeprom             = bnx2x_get_eeprom,
9937         .set_eeprom             = bnx2x_set_eeprom,
9938         .get_coalesce           = bnx2x_get_coalesce,
9939         .set_coalesce           = bnx2x_set_coalesce,
9940         .get_ringparam          = bnx2x_get_ringparam,
9941         .set_ringparam          = bnx2x_set_ringparam,
9942         .get_pauseparam         = bnx2x_get_pauseparam,
9943         .set_pauseparam         = bnx2x_set_pauseparam,
9944         .get_rx_csum            = bnx2x_get_rx_csum,
9945         .set_rx_csum            = bnx2x_set_rx_csum,
9946         .get_tx_csum            = ethtool_op_get_tx_csum,
9947         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
9948         .set_flags              = bnx2x_set_flags,
9949         .get_flags              = ethtool_op_get_flags,
9950         .get_sg                 = ethtool_op_get_sg,
9951         .set_sg                 = ethtool_op_set_sg,
9952         .get_tso                = ethtool_op_get_tso,
9953         .set_tso                = bnx2x_set_tso,
9954         .self_test_count        = bnx2x_self_test_count,
9955         .self_test              = bnx2x_self_test,
9956         .get_strings            = bnx2x_get_strings,
9957         .phys_id                = bnx2x_phys_id,
9958         .get_stats_count        = bnx2x_get_stats_count,
9959         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
9960 };
9961
9962 /* end of ethtool_ops */
9963
9964 /****************************************************************************
9965 * General service functions
9966 ****************************************************************************/
9967
9968 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9969 {
9970         u16 pmcsr;
9971
9972         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9973
9974         switch (state) {
9975         case PCI_D0:
9976                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9977                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9978                                        PCI_PM_CTRL_PME_STATUS));
9979
9980                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9981                         /* delay required during transition out of D3hot */
9982                         msleep(20);
9983                 break;
9984
9985         case PCI_D3hot:
9986                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9987                 pmcsr |= 3;
9988
9989                 if (bp->wol)
9990                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9991
9992                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9993                                       pmcsr);
9994
9995                 /* No more memory access after this point until
9996                 * device is brought back to D0.
9997                 */
9998                 break;
9999
10000         default:
10001                 return -EINVAL;
10002         }
10003         return 0;
10004 }
10005
10006 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10007 {
10008         u16 rx_cons_sb;
10009
10010         /* Tell compiler that status block fields can change */
10011         barrier();
10012         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10013         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10014                 rx_cons_sb++;
10015         return (fp->rx_comp_cons != rx_cons_sb);
10016 }
10017
10018 /*
10019  * net_device service functions
10020  */
10021
10022 static int bnx2x_poll(struct napi_struct *napi, int budget)
10023 {
10024         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10025                                                  napi);
10026         struct bnx2x *bp = fp->bp;
10027         int work_done = 0;
10028
10029 #ifdef BNX2X_STOP_ON_ERROR
10030         if (unlikely(bp->panic))
10031                 goto poll_panic;
10032 #endif
10033
10034         prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
10035         prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10036         prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10037
10038         bnx2x_update_fpsb_idx(fp);
10039
10040         if (bnx2x_has_tx_work(fp))
10041                 bnx2x_tx_int(fp);
10042
10043         if (bnx2x_has_rx_work(fp)) {
10044                 work_done = bnx2x_rx_int(fp, budget);
10045
10046                 /* must not complete if we consumed full budget */
10047                 if (work_done >= budget)
10048                         goto poll_again;
10049         }
10050
10051         /* BNX2X_HAS_WORK() reads the status block, thus we need to
10052          * ensure that status block indices have been actually read
10053          * (bnx2x_update_fpsb_idx) prior to this check (BNX2X_HAS_WORK)
10054          * so that we won't write the "newer" value of the status block to IGU
10055          * (if there was a DMA right after BNX2X_HAS_WORK and
10056          * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10057          * may be postponed to right before bnx2x_ack_sb). In this case
10058          * there will never be another interrupt until there is another update
10059          * of the status block, while there is still unhandled work.
10060          */
10061         rmb();
10062
10063         if (!BNX2X_HAS_WORK(fp)) {
10064 #ifdef BNX2X_STOP_ON_ERROR
10065 poll_panic:
10066 #endif
10067                 napi_complete(napi);
10068
10069                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10070                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
10071                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10072                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10073         }
10074
10075 poll_again:
10076         return work_done;
10077 }
10078
10079
10080 /* we split the first BD into headers and data BDs
10081  * to ease the pain of our fellow microcode engineers
10082  * we use one mapping for both BDs
10083  * So far this has only been observed to happen
10084  * in Other Operating Systems(TM)
10085  */
10086 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10087                                    struct bnx2x_fastpath *fp,
10088                                    struct eth_tx_bd **tx_bd, u16 hlen,
10089                                    u16 bd_prod, int nbd)
10090 {
10091         struct eth_tx_bd *h_tx_bd = *tx_bd;
10092         struct eth_tx_bd *d_tx_bd;
10093         dma_addr_t mapping;
10094         int old_len = le16_to_cpu(h_tx_bd->nbytes);
10095
10096         /* first fix first BD */
10097         h_tx_bd->nbd = cpu_to_le16(nbd);
10098         h_tx_bd->nbytes = cpu_to_le16(hlen);
10099
10100         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10101            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10102            h_tx_bd->addr_lo, h_tx_bd->nbd);
10103
10104         /* now get a new data BD
10105          * (after the pbd) and fill it */
10106         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10107         d_tx_bd = &fp->tx_desc_ring[bd_prod];
10108
10109         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10110                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10111
10112         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10113         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10114         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10115         d_tx_bd->vlan = 0;
10116         /* this marks the BD as one that has no individual mapping
10117          * the FW ignores this flag in a BD not marked start
10118          */
10119         d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10120         DP(NETIF_MSG_TX_QUEUED,
10121            "TSO split data size is %d (%x:%x)\n",
10122            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10123
10124         /* update tx_bd for marking the last BD flag */
10125         *tx_bd = d_tx_bd;
10126
10127         return bd_prod;
10128 }
10129
10130 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10131 {
10132         if (fix > 0)
10133                 csum = (u16) ~csum_fold(csum_sub(csum,
10134                                 csum_partial(t_header - fix, fix, 0)));
10135
10136         else if (fix < 0)
10137                 csum = (u16) ~csum_fold(csum_add(csum,
10138                                 csum_partial(t_header, -fix, 0)));
10139
10140         return swab16(csum);
10141 }
10142
10143 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10144 {
10145         u32 rc;
10146
10147         if (skb->ip_summed != CHECKSUM_PARTIAL)
10148                 rc = XMIT_PLAIN;
10149
10150         else {
10151                 if (skb->protocol == htons(ETH_P_IPV6)) {
10152                         rc = XMIT_CSUM_V6;
10153                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10154                                 rc |= XMIT_CSUM_TCP;
10155
10156                 } else {
10157                         rc = XMIT_CSUM_V4;
10158                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10159                                 rc |= XMIT_CSUM_TCP;
10160                 }
10161         }
10162
10163         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10164                 rc |= XMIT_GSO_V4;
10165
10166         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10167                 rc |= XMIT_GSO_V6;
10168
10169         return rc;
10170 }
10171
10172 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10173 /* check if packet requires linearization (packet is too fragmented)
10174    no need to check fragmentation if page size > 8K (there will be no
10175    violation to FW restrictions) */
10176 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10177                              u32 xmit_type)
10178 {
10179         int to_copy = 0;
10180         int hlen = 0;
10181         int first_bd_sz = 0;
10182
10183         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10184         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10185
10186                 if (xmit_type & XMIT_GSO) {
10187                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10188                         /* Check if LSO packet needs to be copied:
10189                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10190                         int wnd_size = MAX_FETCH_BD - 3;
10191                         /* Number of windows to check */
10192                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10193                         int wnd_idx = 0;
10194                         int frag_idx = 0;
10195                         u32 wnd_sum = 0;
10196
10197                         /* Headers length */
10198                         hlen = (int)(skb_transport_header(skb) - skb->data) +
10199                                 tcp_hdrlen(skb);
10200
10201                         /* Amount of data (w/o headers) on linear part of SKB*/
10202                         first_bd_sz = skb_headlen(skb) - hlen;
10203
10204                         wnd_sum  = first_bd_sz;
10205
10206                         /* Calculate the first sum - it's special */
10207                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10208                                 wnd_sum +=
10209                                         skb_shinfo(skb)->frags[frag_idx].size;
10210
10211                         /* If there was data on linear skb data - check it */
10212                         if (first_bd_sz > 0) {
10213                                 if (unlikely(wnd_sum < lso_mss)) {
10214                                         to_copy = 1;
10215                                         goto exit_lbl;
10216                                 }
10217
10218                                 wnd_sum -= first_bd_sz;
10219                         }
10220
10221                         /* Others are easier: run through the frag list and
10222                            check all windows */
10223                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10224                                 wnd_sum +=
10225                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10226
10227                                 if (unlikely(wnd_sum < lso_mss)) {
10228                                         to_copy = 1;
10229                                         break;
10230                                 }
10231                                 wnd_sum -=
10232                                         skb_shinfo(skb)->frags[wnd_idx].size;
10233                         }
10234                 } else {
10235                         /* in non-LSO too fragmented packet should always
10236                            be linearized */
10237                         to_copy = 1;
10238                 }
10239         }
10240
10241 exit_lbl:
10242         if (unlikely(to_copy))
10243                 DP(NETIF_MSG_TX_QUEUED,
10244                    "Linearization IS REQUIRED for %s packet. "
10245                    "num_frags %d  hlen %d  first_bd_sz %d\n",
10246                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10247                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10248
10249         return to_copy;
10250 }
10251 #endif
10252
10253 /* called with netif_tx_lock
10254  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
10255  * netif_wake_queue()
10256  */
10257 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10258 {
10259         struct bnx2x *bp = netdev_priv(dev);
10260         struct bnx2x_fastpath *fp;
10261         struct netdev_queue *txq;
10262         struct sw_tx_bd *tx_buf;
10263         struct eth_tx_bd *tx_bd;
10264         struct eth_tx_parse_bd *pbd = NULL;
10265         u16 pkt_prod, bd_prod;
10266         int nbd, fp_index;
10267         dma_addr_t mapping;
10268         u32 xmit_type = bnx2x_xmit_type(bp, skb);
10269         int vlan_off = (bp->e1hov ? 4 : 0);
10270         int i;
10271         u8 hlen = 0;
10272
10273 #ifdef BNX2X_STOP_ON_ERROR
10274         if (unlikely(bp->panic))
10275                 return NETDEV_TX_BUSY;
10276 #endif
10277
10278         fp_index = skb_get_queue_mapping(skb);
10279         txq = netdev_get_tx_queue(dev, fp_index);
10280
10281         fp = &bp->fp[fp_index];
10282
10283         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
10284                 fp->eth_q_stats.driver_xoff++,
10285                 netif_tx_stop_queue(txq);
10286                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10287                 return NETDEV_TX_BUSY;
10288         }
10289
10290         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
10291            "  gso type %x  xmit_type %x\n",
10292            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10293            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10294
10295 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10296         /* First, check if we need to linearize the skb (due to FW
10297            restrictions). No need to check fragmentation if page size > 8K
10298            (there will be no violation to FW restrictions) */
10299         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10300                 /* Statistics of linearization */
10301                 bp->lin_cnt++;
10302                 if (skb_linearize(skb) != 0) {
10303                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10304                            "silently dropping this SKB\n");
10305                         dev_kfree_skb_any(skb);
10306                         return NETDEV_TX_OK;
10307                 }
10308         }
10309 #endif
10310
10311         /*
10312         Please read carefully. First we use one BD which we mark as start,
10313         then for TSO or xsum we have a parsing info BD,
10314         and only then we have the rest of the TSO BDs.
10315         (don't forget to mark the last one as last,
10316         and to unmap only AFTER you write to the BD ...)
10317         And above all, all pdb sizes are in words - NOT DWORDS!
10318         */
10319
10320         pkt_prod = fp->tx_pkt_prod++;
10321         bd_prod = TX_BD(fp->tx_bd_prod);
10322
10323         /* get a tx_buf and first BD */
10324         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10325         tx_bd = &fp->tx_desc_ring[bd_prod];
10326
10327         tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10328         tx_bd->general_data = (UNICAST_ADDRESS <<
10329                                ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
10330         /* header nbd */
10331         tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
10332
10333         /* remember the first BD of the packet */
10334         tx_buf->first_bd = fp->tx_bd_prod;
10335         tx_buf->skb = skb;
10336
10337         DP(NETIF_MSG_TX_QUEUED,
10338            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
10339            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10340
10341 #ifdef BCM_VLAN
10342         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10343             (bp->flags & HW_VLAN_TX_FLAG)) {
10344                 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10345                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10346                 vlan_off += 4;
10347         } else
10348 #endif
10349                 tx_bd->vlan = cpu_to_le16(pkt_prod);
10350
10351         if (xmit_type) {
10352                 /* turn on parsing and get a BD */
10353                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10354                 pbd = (void *)&fp->tx_desc_ring[bd_prod];
10355
10356                 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10357         }
10358
10359         if (xmit_type & XMIT_CSUM) {
10360                 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
10361
10362                 /* for now NS flag is not used in Linux */
10363                 pbd->global_data =
10364                         (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10365                                  ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
10366
10367                 pbd->ip_hlen = (skb_transport_header(skb) -
10368                                 skb_network_header(skb)) / 2;
10369
10370                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
10371
10372                 pbd->total_hlen = cpu_to_le16(hlen);
10373                 hlen = hlen*2 - vlan_off;
10374
10375                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10376
10377                 if (xmit_type & XMIT_CSUM_V4)
10378                         tx_bd->bd_flags.as_bitfield |=
10379                                                 ETH_TX_BD_FLAGS_IP_CSUM;
10380                 else
10381                         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10382
10383                 if (xmit_type & XMIT_CSUM_TCP) {
10384                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10385
10386                 } else {
10387                         s8 fix = SKB_CS_OFF(skb); /* signed! */
10388
10389                         pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
10390                         pbd->cs_offset = fix / 2;
10391
10392                         DP(NETIF_MSG_TX_QUEUED,
10393                            "hlen %d  offset %d  fix %d  csum before fix %x\n",
10394                            le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10395                            SKB_CS(skb));
10396
10397                         /* HW bug: fixup the CSUM */
10398                         pbd->tcp_pseudo_csum =
10399                                 bnx2x_csum_fix(skb_transport_header(skb),
10400                                                SKB_CS(skb), fix);
10401
10402                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10403                            pbd->tcp_pseudo_csum);
10404                 }
10405         }
10406
10407         mapping = pci_map_single(bp->pdev, skb->data,
10408                                  skb_headlen(skb), PCI_DMA_TODEVICE);
10409
10410         tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10411         tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10412         nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
10413         tx_bd->nbd = cpu_to_le16(nbd);
10414         tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10415
10416         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
10417            "  nbytes %d  flags %x  vlan %x\n",
10418            tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10419            le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10420            le16_to_cpu(tx_bd->vlan));
10421
10422         if (xmit_type & XMIT_GSO) {
10423
10424                 DP(NETIF_MSG_TX_QUEUED,
10425                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
10426                    skb->len, hlen, skb_headlen(skb),
10427                    skb_shinfo(skb)->gso_size);
10428
10429                 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10430
10431                 if (unlikely(skb_headlen(skb) > hlen))
10432                         bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10433                                                  bd_prod, ++nbd);
10434
10435                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10436                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
10437                 pbd->tcp_flags = pbd_tcp_flags(skb);
10438
10439                 if (xmit_type & XMIT_GSO_V4) {
10440                         pbd->ip_id = swab16(ip_hdr(skb)->id);
10441                         pbd->tcp_pseudo_csum =
10442                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10443                                                           ip_hdr(skb)->daddr,
10444                                                           0, IPPROTO_TCP, 0));
10445
10446                 } else
10447                         pbd->tcp_pseudo_csum =
10448                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10449                                                         &ipv6_hdr(skb)->daddr,
10450                                                         0, IPPROTO_TCP, 0));
10451
10452                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10453         }
10454
10455         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10456                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
10457
10458                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10459                 tx_bd = &fp->tx_desc_ring[bd_prod];
10460
10461                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10462                                        frag->size, PCI_DMA_TODEVICE);
10463
10464                 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10465                 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10466                 tx_bd->nbytes = cpu_to_le16(frag->size);
10467                 tx_bd->vlan = cpu_to_le16(pkt_prod);
10468                 tx_bd->bd_flags.as_bitfield = 0;
10469
10470                 DP(NETIF_MSG_TX_QUEUED,
10471                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d  flags %x\n",
10472                    i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10473                    le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
10474         }
10475
10476         /* now at last mark the BD as the last BD */
10477         tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10478
10479         DP(NETIF_MSG_TX_QUEUED, "last bd @%p  flags %x\n",
10480            tx_bd, tx_bd->bd_flags.as_bitfield);
10481
10482         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10483
10484         /* now send a tx doorbell, counting the next BD
10485          * if the packet contains or ends with it
10486          */
10487         if (TX_BD_POFF(bd_prod) < nbd)
10488                 nbd++;
10489
10490         if (pbd)
10491                 DP(NETIF_MSG_TX_QUEUED,
10492                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
10493                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
10494                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10495                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
10496                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
10497
10498         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
10499
10500         /*
10501          * Make sure that the BD data is updated before updating the producer
10502          * since FW might read the BD right after the producer is updated.
10503          * This is only applicable for weak-ordered memory model archs such
10504          * as IA-64. The following barrier is also mandatory since FW will
10505          * assumes packets must have BDs.
10506          */
10507         wmb();
10508
10509         le16_add_cpu(&fp->hw_tx_prods->bds_prod, nbd);
10510         mb(); /* FW restriction: must not reorder writing nbd and packets */
10511         le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
10512         DOORBELL(bp, fp->index, 0);
10513
10514         mmiowb();
10515
10516         fp->tx_bd_prod += nbd;
10517         dev->trans_start = jiffies;
10518
10519         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
10520                 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10521                    if we put Tx into XOFF state. */
10522                 smp_mb();
10523                 netif_tx_stop_queue(txq);
10524                 fp->eth_q_stats.driver_xoff++;
10525                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
10526                         netif_tx_wake_queue(txq);
10527         }
10528         fp->tx_pkt++;
10529
10530         return NETDEV_TX_OK;
10531 }
10532
10533 /* called with rtnl_lock */
10534 static int bnx2x_open(struct net_device *dev)
10535 {
10536         struct bnx2x *bp = netdev_priv(dev);
10537
10538         netif_carrier_off(dev);
10539
10540         bnx2x_set_power_state(bp, PCI_D0);
10541
10542         return bnx2x_nic_load(bp, LOAD_OPEN);
10543 }
10544
10545 /* called with rtnl_lock */
10546 static int bnx2x_close(struct net_device *dev)
10547 {
10548         struct bnx2x *bp = netdev_priv(dev);
10549
10550         /* Unload the driver, release IRQs */
10551         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10552         if (atomic_read(&bp->pdev->enable_cnt) == 1)
10553                 if (!CHIP_REV_IS_SLOW(bp))
10554                         bnx2x_set_power_state(bp, PCI_D3hot);
10555
10556         return 0;
10557 }
10558
10559 /* called with netif_tx_lock from dev_mcast.c */
10560 static void bnx2x_set_rx_mode(struct net_device *dev)
10561 {
10562         struct bnx2x *bp = netdev_priv(dev);
10563         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10564         int port = BP_PORT(bp);
10565
10566         if (bp->state != BNX2X_STATE_OPEN) {
10567                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10568                 return;
10569         }
10570
10571         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10572
10573         if (dev->flags & IFF_PROMISC)
10574                 rx_mode = BNX2X_RX_MODE_PROMISC;
10575
10576         else if ((dev->flags & IFF_ALLMULTI) ||
10577                  ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10578                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10579
10580         else { /* some multicasts */
10581                 if (CHIP_IS_E1(bp)) {
10582                         int i, old, offset;
10583                         struct dev_mc_list *mclist;
10584                         struct mac_configuration_cmd *config =
10585                                                 bnx2x_sp(bp, mcast_config);
10586
10587                         for (i = 0, mclist = dev->mc_list;
10588                              mclist && (i < dev->mc_count);
10589                              i++, mclist = mclist->next) {
10590
10591                                 config->config_table[i].
10592                                         cam_entry.msb_mac_addr =
10593                                         swab16(*(u16 *)&mclist->dmi_addr[0]);
10594                                 config->config_table[i].
10595                                         cam_entry.middle_mac_addr =
10596                                         swab16(*(u16 *)&mclist->dmi_addr[2]);
10597                                 config->config_table[i].
10598                                         cam_entry.lsb_mac_addr =
10599                                         swab16(*(u16 *)&mclist->dmi_addr[4]);
10600                                 config->config_table[i].cam_entry.flags =
10601                                                         cpu_to_le16(port);
10602                                 config->config_table[i].
10603                                         target_table_entry.flags = 0;
10604                                 config->config_table[i].
10605                                         target_table_entry.client_id = 0;
10606                                 config->config_table[i].
10607                                         target_table_entry.vlan_id = 0;
10608
10609                                 DP(NETIF_MSG_IFUP,
10610                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10611                                    config->config_table[i].
10612                                                 cam_entry.msb_mac_addr,
10613                                    config->config_table[i].
10614                                                 cam_entry.middle_mac_addr,
10615                                    config->config_table[i].
10616                                                 cam_entry.lsb_mac_addr);
10617                         }
10618                         old = config->hdr.length;
10619                         if (old > i) {
10620                                 for (; i < old; i++) {
10621                                         if (CAM_IS_INVALID(config->
10622                                                            config_table[i])) {
10623                                                 /* already invalidated */
10624                                                 break;
10625                                         }
10626                                         /* invalidate */
10627                                         CAM_INVALIDATE(config->
10628                                                        config_table[i]);
10629                                 }
10630                         }
10631
10632                         if (CHIP_REV_IS_SLOW(bp))
10633                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10634                         else
10635                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
10636
10637                         config->hdr.length = i;
10638                         config->hdr.offset = offset;
10639                         config->hdr.client_id = bp->fp->cl_id;
10640                         config->hdr.reserved1 = 0;
10641
10642                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10643                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10644                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10645                                       0);
10646                 } else { /* E1H */
10647                         /* Accept one or more multicasts */
10648                         struct dev_mc_list *mclist;
10649                         u32 mc_filter[MC_HASH_SIZE];
10650                         u32 crc, bit, regidx;
10651                         int i;
10652
10653                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10654
10655                         for (i = 0, mclist = dev->mc_list;
10656                              mclist && (i < dev->mc_count);
10657                              i++, mclist = mclist->next) {
10658
10659                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10660                                    mclist->dmi_addr);
10661
10662                                 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10663                                 bit = (crc >> 24) & 0xff;
10664                                 regidx = bit >> 5;
10665                                 bit &= 0x1f;
10666                                 mc_filter[regidx] |= (1 << bit);
10667                         }
10668
10669                         for (i = 0; i < MC_HASH_SIZE; i++)
10670                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10671                                        mc_filter[i]);
10672                 }
10673         }
10674
10675         bp->rx_mode = rx_mode;
10676         bnx2x_set_storm_rx_mode(bp);
10677 }
10678
10679 /* called with rtnl_lock */
10680 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10681 {
10682         struct sockaddr *addr = p;
10683         struct bnx2x *bp = netdev_priv(dev);
10684
10685         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
10686                 return -EINVAL;
10687
10688         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
10689         if (netif_running(dev)) {
10690                 if (CHIP_IS_E1(bp))
10691                         bnx2x_set_mac_addr_e1(bp, 1);
10692                 else
10693                         bnx2x_set_mac_addr_e1h(bp, 1);
10694         }
10695
10696         return 0;
10697 }
10698
10699 /* called with rtnl_lock */
10700 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10701 {
10702         struct mii_ioctl_data *data = if_mii(ifr);
10703         struct bnx2x *bp = netdev_priv(dev);
10704         int port = BP_PORT(bp);
10705         int err;
10706
10707         switch (cmd) {
10708         case SIOCGMIIPHY:
10709                 data->phy_id = bp->port.phy_addr;
10710
10711                 /* fallthrough */
10712
10713         case SIOCGMIIREG: {
10714                 u16 mii_regval;
10715
10716                 if (!netif_running(dev))
10717                         return -EAGAIN;
10718
10719                 mutex_lock(&bp->port.phy_mutex);
10720                 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
10721                                       DEFAULT_PHY_DEV_ADDR,
10722                                       (data->reg_num & 0x1f), &mii_regval);
10723                 data->val_out = mii_regval;
10724                 mutex_unlock(&bp->port.phy_mutex);
10725                 return err;
10726         }
10727
10728         case SIOCSMIIREG:
10729                 if (!capable(CAP_NET_ADMIN))
10730                         return -EPERM;
10731
10732                 if (!netif_running(dev))
10733                         return -EAGAIN;
10734
10735                 mutex_lock(&bp->port.phy_mutex);
10736                 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
10737                                        DEFAULT_PHY_DEV_ADDR,
10738                                        (data->reg_num & 0x1f), data->val_in);
10739                 mutex_unlock(&bp->port.phy_mutex);
10740                 return err;
10741
10742         default:
10743                 /* do nothing */
10744                 break;
10745         }
10746
10747         return -EOPNOTSUPP;
10748 }
10749
10750 /* called with rtnl_lock */
10751 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10752 {
10753         struct bnx2x *bp = netdev_priv(dev);
10754         int rc = 0;
10755
10756         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10757             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10758                 return -EINVAL;
10759
10760         /* This does not race with packet allocation
10761          * because the actual alloc size is
10762          * only updated as part of load
10763          */
10764         dev->mtu = new_mtu;
10765
10766         if (netif_running(dev)) {
10767                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10768                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10769         }
10770
10771         return rc;
10772 }
10773
10774 static void bnx2x_tx_timeout(struct net_device *dev)
10775 {
10776         struct bnx2x *bp = netdev_priv(dev);
10777
10778 #ifdef BNX2X_STOP_ON_ERROR
10779         if (!bp->panic)
10780                 bnx2x_panic();
10781 #endif
10782         /* This allows the netif to be shutdown gracefully before resetting */
10783         schedule_work(&bp->reset_task);
10784 }
10785
10786 #ifdef BCM_VLAN
10787 /* called with rtnl_lock */
10788 static void bnx2x_vlan_rx_register(struct net_device *dev,
10789                                    struct vlan_group *vlgrp)
10790 {
10791         struct bnx2x *bp = netdev_priv(dev);
10792
10793         bp->vlgrp = vlgrp;
10794
10795         /* Set flags according to the required capabilities */
10796         bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10797
10798         if (dev->features & NETIF_F_HW_VLAN_TX)
10799                 bp->flags |= HW_VLAN_TX_FLAG;
10800
10801         if (dev->features & NETIF_F_HW_VLAN_RX)
10802                 bp->flags |= HW_VLAN_RX_FLAG;
10803
10804         if (netif_running(dev))
10805                 bnx2x_set_client_config(bp);
10806 }
10807
10808 #endif
10809
10810 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10811 static void poll_bnx2x(struct net_device *dev)
10812 {
10813         struct bnx2x *bp = netdev_priv(dev);
10814
10815         disable_irq(bp->pdev->irq);
10816         bnx2x_interrupt(bp->pdev->irq, dev);
10817         enable_irq(bp->pdev->irq);
10818 }
10819 #endif
10820
10821 static const struct net_device_ops bnx2x_netdev_ops = {
10822         .ndo_open               = bnx2x_open,
10823         .ndo_stop               = bnx2x_close,
10824         .ndo_start_xmit         = bnx2x_start_xmit,
10825         .ndo_set_multicast_list = bnx2x_set_rx_mode,
10826         .ndo_set_mac_address    = bnx2x_change_mac_addr,
10827         .ndo_validate_addr      = eth_validate_addr,
10828         .ndo_do_ioctl           = bnx2x_ioctl,
10829         .ndo_change_mtu         = bnx2x_change_mtu,
10830         .ndo_tx_timeout         = bnx2x_tx_timeout,
10831 #ifdef BCM_VLAN
10832         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
10833 #endif
10834 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10835         .ndo_poll_controller    = poll_bnx2x,
10836 #endif
10837 };
10838
10839 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10840                                     struct net_device *dev)
10841 {
10842         struct bnx2x *bp;
10843         int rc;
10844
10845         SET_NETDEV_DEV(dev, &pdev->dev);
10846         bp = netdev_priv(dev);
10847
10848         bp->dev = dev;
10849         bp->pdev = pdev;
10850         bp->flags = 0;
10851         bp->func = PCI_FUNC(pdev->devfn);
10852
10853         rc = pci_enable_device(pdev);
10854         if (rc) {
10855                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10856                 goto err_out;
10857         }
10858
10859         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10860                 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10861                        " aborting\n");
10862                 rc = -ENODEV;
10863                 goto err_out_disable;
10864         }
10865
10866         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10867                 printk(KERN_ERR PFX "Cannot find second PCI device"
10868                        " base address, aborting\n");
10869                 rc = -ENODEV;
10870                 goto err_out_disable;
10871         }
10872
10873         if (atomic_read(&pdev->enable_cnt) == 1) {
10874                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10875                 if (rc) {
10876                         printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10877                                " aborting\n");
10878                         goto err_out_disable;
10879                 }
10880
10881                 pci_set_master(pdev);
10882                 pci_save_state(pdev);
10883         }
10884
10885         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10886         if (bp->pm_cap == 0) {
10887                 printk(KERN_ERR PFX "Cannot find power management"
10888                        " capability, aborting\n");
10889                 rc = -EIO;
10890                 goto err_out_release;
10891         }
10892
10893         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10894         if (bp->pcie_cap == 0) {
10895                 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10896                        " aborting\n");
10897                 rc = -EIO;
10898                 goto err_out_release;
10899         }
10900
10901         if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10902                 bp->flags |= USING_DAC_FLAG;
10903                 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10904                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10905                                " failed, aborting\n");
10906                         rc = -EIO;
10907                         goto err_out_release;
10908                 }
10909
10910         } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10911                 printk(KERN_ERR PFX "System does not support DMA,"
10912                        " aborting\n");
10913                 rc = -EIO;
10914                 goto err_out_release;
10915         }
10916
10917         dev->mem_start = pci_resource_start(pdev, 0);
10918         dev->base_addr = dev->mem_start;
10919         dev->mem_end = pci_resource_end(pdev, 0);
10920
10921         dev->irq = pdev->irq;
10922
10923         bp->regview = pci_ioremap_bar(pdev, 0);
10924         if (!bp->regview) {
10925                 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10926                 rc = -ENOMEM;
10927                 goto err_out_release;
10928         }
10929
10930         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10931                                         min_t(u64, BNX2X_DB_SIZE,
10932                                               pci_resource_len(pdev, 2)));
10933         if (!bp->doorbells) {
10934                 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10935                 rc = -ENOMEM;
10936                 goto err_out_unmap;
10937         }
10938
10939         bnx2x_set_power_state(bp, PCI_D0);
10940
10941         /* clean indirect addresses */
10942         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10943                                PCICFG_VENDOR_ID_OFFSET);
10944         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10945         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10946         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10947         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10948
10949         dev->watchdog_timeo = TX_TIMEOUT;
10950
10951         dev->netdev_ops = &bnx2x_netdev_ops;
10952         dev->ethtool_ops = &bnx2x_ethtool_ops;
10953         dev->features |= NETIF_F_SG;
10954         dev->features |= NETIF_F_HW_CSUM;
10955         if (bp->flags & USING_DAC_FLAG)
10956                 dev->features |= NETIF_F_HIGHDMA;
10957 #ifdef BCM_VLAN
10958         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10959         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10960 #endif
10961         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10962         dev->features |= NETIF_F_TSO6;
10963
10964         return 0;
10965
10966 err_out_unmap:
10967         if (bp->regview) {
10968                 iounmap(bp->regview);
10969                 bp->regview = NULL;
10970         }
10971         if (bp->doorbells) {
10972                 iounmap(bp->doorbells);
10973                 bp->doorbells = NULL;
10974         }
10975
10976 err_out_release:
10977         if (atomic_read(&pdev->enable_cnt) == 1)
10978                 pci_release_regions(pdev);
10979
10980 err_out_disable:
10981         pci_disable_device(pdev);
10982         pci_set_drvdata(pdev, NULL);
10983
10984 err_out:
10985         return rc;
10986 }
10987
10988 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10989 {
10990         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10991
10992         val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10993         return val;
10994 }
10995
10996 /* return value of 1=2.5GHz 2=5GHz */
10997 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10998 {
10999         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11000
11001         val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11002         return val;
11003 }
11004
11005 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11006                                     const struct pci_device_id *ent)
11007 {
11008         static int version_printed;
11009         struct net_device *dev = NULL;
11010         struct bnx2x *bp;
11011         int rc;
11012
11013         if (version_printed++ == 0)
11014                 printk(KERN_INFO "%s", version);
11015
11016         /* dev zeroed in init_etherdev */
11017         dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
11018         if (!dev) {
11019                 printk(KERN_ERR PFX "Cannot allocate net device\n");
11020                 return -ENOMEM;
11021         }
11022
11023         bp = netdev_priv(dev);
11024         bp->msglevel = debug;
11025
11026         rc = bnx2x_init_dev(pdev, dev);
11027         if (rc < 0) {
11028                 free_netdev(dev);
11029                 return rc;
11030         }
11031
11032         pci_set_drvdata(pdev, dev);
11033
11034         rc = bnx2x_init_bp(bp);
11035         if (rc)
11036                 goto init_one_exit;
11037
11038         rc = register_netdev(dev);
11039         if (rc) {
11040                 dev_err(&pdev->dev, "Cannot register net device\n");
11041                 goto init_one_exit;
11042         }
11043
11044         printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
11045                " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
11046                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
11047                bnx2x_get_pcie_width(bp),
11048                (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11049                dev->base_addr, bp->pdev->irq);
11050         printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
11051
11052         return 0;
11053
11054 init_one_exit:
11055         if (bp->regview)
11056                 iounmap(bp->regview);
11057
11058         if (bp->doorbells)
11059                 iounmap(bp->doorbells);
11060
11061         free_netdev(dev);
11062
11063         if (atomic_read(&pdev->enable_cnt) == 1)
11064                 pci_release_regions(pdev);
11065
11066         pci_disable_device(pdev);
11067         pci_set_drvdata(pdev, NULL);
11068
11069         return rc;
11070 }
11071
11072 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11073 {
11074         struct net_device *dev = pci_get_drvdata(pdev);
11075         struct bnx2x *bp;
11076
11077         if (!dev) {
11078                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11079                 return;
11080         }
11081         bp = netdev_priv(dev);
11082
11083         unregister_netdev(dev);
11084
11085         if (bp->regview)
11086                 iounmap(bp->regview);
11087
11088         if (bp->doorbells)
11089                 iounmap(bp->doorbells);
11090
11091         free_netdev(dev);
11092
11093         if (atomic_read(&pdev->enable_cnt) == 1)
11094                 pci_release_regions(pdev);
11095
11096         pci_disable_device(pdev);
11097         pci_set_drvdata(pdev, NULL);
11098 }
11099
11100 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11101 {
11102         struct net_device *dev = pci_get_drvdata(pdev);
11103         struct bnx2x *bp;
11104
11105         if (!dev) {
11106                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11107                 return -ENODEV;
11108         }
11109         bp = netdev_priv(dev);
11110
11111         rtnl_lock();
11112
11113         pci_save_state(pdev);
11114
11115         if (!netif_running(dev)) {
11116                 rtnl_unlock();
11117                 return 0;
11118         }
11119
11120         netif_device_detach(dev);
11121
11122         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11123
11124         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
11125
11126         rtnl_unlock();
11127
11128         return 0;
11129 }
11130
11131 static int bnx2x_resume(struct pci_dev *pdev)
11132 {
11133         struct net_device *dev = pci_get_drvdata(pdev);
11134         struct bnx2x *bp;
11135         int rc;
11136
11137         if (!dev) {
11138                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11139                 return -ENODEV;
11140         }
11141         bp = netdev_priv(dev);
11142
11143         rtnl_lock();
11144
11145         pci_restore_state(pdev);
11146
11147         if (!netif_running(dev)) {
11148                 rtnl_unlock();
11149                 return 0;
11150         }
11151
11152         bnx2x_set_power_state(bp, PCI_D0);
11153         netif_device_attach(dev);
11154
11155         rc = bnx2x_nic_load(bp, LOAD_OPEN);
11156
11157         rtnl_unlock();
11158
11159         return rc;
11160 }
11161
11162 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11163 {
11164         int i;
11165
11166         bp->state = BNX2X_STATE_ERROR;
11167
11168         bp->rx_mode = BNX2X_RX_MODE_NONE;
11169
11170         bnx2x_netif_stop(bp, 0);
11171
11172         del_timer_sync(&bp->timer);
11173         bp->stats_state = STATS_STATE_DISABLED;
11174         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11175
11176         /* Release IRQs */
11177         bnx2x_free_irq(bp);
11178
11179         if (CHIP_IS_E1(bp)) {
11180                 struct mac_configuration_cmd *config =
11181                                                 bnx2x_sp(bp, mcast_config);
11182
11183                 for (i = 0; i < config->hdr.length; i++)
11184                         CAM_INVALIDATE(config->config_table[i]);
11185         }
11186
11187         /* Free SKBs, SGEs, TPA pool and driver internals */
11188         bnx2x_free_skbs(bp);
11189         for_each_rx_queue(bp, i)
11190                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
11191         for_each_rx_queue(bp, i)
11192                 netif_napi_del(&bnx2x_fp(bp, i, napi));
11193         bnx2x_free_mem(bp);
11194
11195         bp->state = BNX2X_STATE_CLOSED;
11196
11197         netif_carrier_off(bp->dev);
11198
11199         return 0;
11200 }
11201
11202 static void bnx2x_eeh_recover(struct bnx2x *bp)
11203 {
11204         u32 val;
11205
11206         mutex_init(&bp->port.phy_mutex);
11207
11208         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11209         bp->link_params.shmem_base = bp->common.shmem_base;
11210         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11211
11212         if (!bp->common.shmem_base ||
11213             (bp->common.shmem_base < 0xA0000) ||
11214             (bp->common.shmem_base >= 0xC0000)) {
11215                 BNX2X_DEV_INFO("MCP not active\n");
11216                 bp->flags |= NO_MCP_FLAG;
11217                 return;
11218         }
11219
11220         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11221         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11222                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11223                 BNX2X_ERR("BAD MCP validity signature\n");
11224
11225         if (!BP_NOMCP(bp)) {
11226                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11227                               & DRV_MSG_SEQ_NUMBER_MASK);
11228                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11229         }
11230 }
11231
11232 /**
11233  * bnx2x_io_error_detected - called when PCI error is detected
11234  * @pdev: Pointer to PCI device
11235  * @state: The current pci connection state
11236  *
11237  * This function is called after a PCI bus error affecting
11238  * this device has been detected.
11239  */
11240 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11241                                                 pci_channel_state_t state)
11242 {
11243         struct net_device *dev = pci_get_drvdata(pdev);
11244         struct bnx2x *bp = netdev_priv(dev);
11245
11246         rtnl_lock();
11247
11248         netif_device_detach(dev);
11249
11250         if (netif_running(dev))
11251                 bnx2x_eeh_nic_unload(bp);
11252
11253         pci_disable_device(pdev);
11254
11255         rtnl_unlock();
11256
11257         /* Request a slot reset */
11258         return PCI_ERS_RESULT_NEED_RESET;
11259 }
11260
11261 /**
11262  * bnx2x_io_slot_reset - called after the PCI bus has been reset
11263  * @pdev: Pointer to PCI device
11264  *
11265  * Restart the card from scratch, as if from a cold-boot.
11266  */
11267 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11268 {
11269         struct net_device *dev = pci_get_drvdata(pdev);
11270         struct bnx2x *bp = netdev_priv(dev);
11271
11272         rtnl_lock();
11273
11274         if (pci_enable_device(pdev)) {
11275                 dev_err(&pdev->dev,
11276                         "Cannot re-enable PCI device after reset\n");
11277                 rtnl_unlock();
11278                 return PCI_ERS_RESULT_DISCONNECT;
11279         }
11280
11281         pci_set_master(pdev);
11282         pci_restore_state(pdev);
11283
11284         if (netif_running(dev))
11285                 bnx2x_set_power_state(bp, PCI_D0);
11286
11287         rtnl_unlock();
11288
11289         return PCI_ERS_RESULT_RECOVERED;
11290 }
11291
11292 /**
11293  * bnx2x_io_resume - called when traffic can start flowing again
11294  * @pdev: Pointer to PCI device
11295  *
11296  * This callback is called when the error recovery driver tells us that
11297  * its OK to resume normal operation.
11298  */
11299 static void bnx2x_io_resume(struct pci_dev *pdev)
11300 {
11301         struct net_device *dev = pci_get_drvdata(pdev);
11302         struct bnx2x *bp = netdev_priv(dev);
11303
11304         rtnl_lock();
11305
11306         bnx2x_eeh_recover(bp);
11307
11308         if (netif_running(dev))
11309                 bnx2x_nic_load(bp, LOAD_NORMAL);
11310
11311         netif_device_attach(dev);
11312
11313         rtnl_unlock();
11314 }
11315
11316 static struct pci_error_handlers bnx2x_err_handler = {
11317         .error_detected = bnx2x_io_error_detected,
11318         .slot_reset     = bnx2x_io_slot_reset,
11319         .resume         = bnx2x_io_resume,
11320 };
11321
11322 static struct pci_driver bnx2x_pci_driver = {
11323         .name        = DRV_MODULE_NAME,
11324         .id_table    = bnx2x_pci_tbl,
11325         .probe       = bnx2x_init_one,
11326         .remove      = __devexit_p(bnx2x_remove_one),
11327         .suspend     = bnx2x_suspend,
11328         .resume      = bnx2x_resume,
11329         .err_handler = &bnx2x_err_handler,
11330 };
11331
11332 static int __init bnx2x_init(void)
11333 {
11334         bnx2x_wq = create_singlethread_workqueue("bnx2x");
11335         if (bnx2x_wq == NULL) {
11336                 printk(KERN_ERR PFX "Cannot create workqueue\n");
11337                 return -ENOMEM;
11338         }
11339
11340         return pci_register_driver(&bnx2x_pci_driver);
11341 }
11342
11343 static void __exit bnx2x_cleanup(void)
11344 {
11345         pci_unregister_driver(&bnx2x_pci_driver);
11346
11347         destroy_workqueue(bnx2x_wq);
11348 }
11349
11350 module_init(bnx2x_init);
11351 module_exit(bnx2x_cleanup);
11352