bnx2x: Combine get_pcie_width and get_pcie_speed
[safe/jmp/linux-2.6] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52
53
54 #include "bnx2x.h"
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_dump.h"
58
59 #define DRV_MODULE_VERSION      "1.48.114-1"
60 #define DRV_MODULE_RELDATE      "2009/07/29"
61 #define BNX2X_BC_VER            0x040200
62
63 #include <linux/firmware.h>
64 #include "bnx2x_fw_file_hdr.h"
65 /* FW files */
66 #define FW_FILE_PREFIX_E1               "bnx2x-e1-"
67 #define FW_FILE_PREFIX_E1H              "bnx2x-e1h-"
68
69 /* Time in jiffies before concluding the transmitter is hung */
70 #define TX_TIMEOUT              (5*HZ)
71
72 static char version[] __devinitdata =
73         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
74         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
76 MODULE_AUTHOR("Eliezer Tamir");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
80
81 static int multi_mode = 1;
82 module_param(multi_mode, int, 0);
83 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84                              "(0 Disable; 1 Enable (default))");
85
86 static int num_rx_queues;
87 module_param(num_rx_queues, int, 0);
88 MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89                                 " (default is half number of CPUs)");
90
91 static int num_tx_queues;
92 module_param(num_tx_queues, int, 0);
93 MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94                                 " (default is half number of CPUs)");
95
96 static int disable_tpa;
97 module_param(disable_tpa, int, 0);
98 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
99
100 static int int_mode;
101 module_param(int_mode, int, 0);
102 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
103
104 static int dropless_fc;
105 module_param(dropless_fc, int, 0);
106 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
107
108 static int poll;
109 module_param(poll, int, 0);
110 MODULE_PARM_DESC(poll, " Use polling (for debug)");
111
112 static int mrrs = -1;
113 module_param(mrrs, int, 0);
114 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
115
116 static int debug;
117 module_param(debug, int, 0);
118 MODULE_PARM_DESC(debug, " Default debug msglevel");
119
120 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
121
122 static struct workqueue_struct *bnx2x_wq;
123
124 enum bnx2x_board_type {
125         BCM57710 = 0,
126         BCM57711 = 1,
127         BCM57711E = 2,
128 };
129
130 /* indexed by board_type, above */
131 static struct {
132         char *name;
133 } board_info[] __devinitdata = {
134         { "Broadcom NetXtreme II BCM57710 XGb" },
135         { "Broadcom NetXtreme II BCM57711 XGb" },
136         { "Broadcom NetXtreme II BCM57711E XGb" }
137 };
138
139
140 static const struct pci_device_id bnx2x_pci_tbl[] = {
141         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
142                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
143         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
144                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
145         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
146                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
147         { 0 }
148 };
149
150 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
151
152 /****************************************************************************
153 * General service functions
154 ****************************************************************************/
155
156 /* used only at init
157  * locking is done by mcp
158  */
159 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
160 {
161         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
162         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
163         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
164                                PCICFG_VENDOR_ID_OFFSET);
165 }
166
167 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
168 {
169         u32 val;
170
171         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
172         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
173         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
174                                PCICFG_VENDOR_ID_OFFSET);
175
176         return val;
177 }
178
179 static const u32 dmae_reg_go_c[] = {
180         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
181         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
182         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
183         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
184 };
185
186 /* copy command into DMAE command memory and set DMAE command go */
187 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
188                             int idx)
189 {
190         u32 cmd_offset;
191         int i;
192
193         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
194         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
195                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
196
197                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
198                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
199         }
200         REG_WR(bp, dmae_reg_go_c[idx], 1);
201 }
202
203 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
204                       u32 len32)
205 {
206         struct dmae_command *dmae = &bp->init_dmae;
207         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
208         int cnt = 200;
209
210         if (!bp->dmae_ready) {
211                 u32 *data = bnx2x_sp(bp, wb_data[0]);
212
213                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
214                    "  using indirect\n", dst_addr, len32);
215                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
216                 return;
217         }
218
219         mutex_lock(&bp->dmae_mutex);
220
221         memset(dmae, 0, sizeof(struct dmae_command));
222
223         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
224                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
225                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
226 #ifdef __BIG_ENDIAN
227                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
228 #else
229                         DMAE_CMD_ENDIANITY_DW_SWAP |
230 #endif
231                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
232                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
233         dmae->src_addr_lo = U64_LO(dma_addr);
234         dmae->src_addr_hi = U64_HI(dma_addr);
235         dmae->dst_addr_lo = dst_addr >> 2;
236         dmae->dst_addr_hi = 0;
237         dmae->len = len32;
238         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
239         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
240         dmae->comp_val = DMAE_COMP_VAL;
241
242         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
243            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
244                     "dst_addr [%x:%08x (%08x)]\n"
245            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
246            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
247            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
248            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
249         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
250            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
251            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
252
253         *wb_comp = 0;
254
255         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
256
257         udelay(5);
258
259         while (*wb_comp != DMAE_COMP_VAL) {
260                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
261
262                 if (!cnt) {
263                         BNX2X_ERR("DMAE timeout!\n");
264                         break;
265                 }
266                 cnt--;
267                 /* adjust delay for emulation/FPGA */
268                 if (CHIP_REV_IS_SLOW(bp))
269                         msleep(100);
270                 else
271                         udelay(5);
272         }
273
274         mutex_unlock(&bp->dmae_mutex);
275 }
276
277 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
278 {
279         struct dmae_command *dmae = &bp->init_dmae;
280         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
281         int cnt = 200;
282
283         if (!bp->dmae_ready) {
284                 u32 *data = bnx2x_sp(bp, wb_data[0]);
285                 int i;
286
287                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
288                    "  using indirect\n", src_addr, len32);
289                 for (i = 0; i < len32; i++)
290                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
291                 return;
292         }
293
294         mutex_lock(&bp->dmae_mutex);
295
296         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
297         memset(dmae, 0, sizeof(struct dmae_command));
298
299         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
300                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
301                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
302 #ifdef __BIG_ENDIAN
303                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
304 #else
305                         DMAE_CMD_ENDIANITY_DW_SWAP |
306 #endif
307                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
308                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
309         dmae->src_addr_lo = src_addr >> 2;
310         dmae->src_addr_hi = 0;
311         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
312         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
313         dmae->len = len32;
314         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
315         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
316         dmae->comp_val = DMAE_COMP_VAL;
317
318         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
319            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
320                     "dst_addr [%x:%08x (%08x)]\n"
321            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
322            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
323            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
324            dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
325
326         *wb_comp = 0;
327
328         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
329
330         udelay(5);
331
332         while (*wb_comp != DMAE_COMP_VAL) {
333
334                 if (!cnt) {
335                         BNX2X_ERR("DMAE timeout!\n");
336                         break;
337                 }
338                 cnt--;
339                 /* adjust delay for emulation/FPGA */
340                 if (CHIP_REV_IS_SLOW(bp))
341                         msleep(100);
342                 else
343                         udelay(5);
344         }
345         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
346            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
347            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
348
349         mutex_unlock(&bp->dmae_mutex);
350 }
351
352 /* used only for slowpath so not inlined */
353 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
354 {
355         u32 wb_write[2];
356
357         wb_write[0] = val_hi;
358         wb_write[1] = val_lo;
359         REG_WR_DMAE(bp, reg, wb_write, 2);
360 }
361
362 #ifdef USE_WB_RD
363 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
364 {
365         u32 wb_data[2];
366
367         REG_RD_DMAE(bp, reg, wb_data, 2);
368
369         return HILO_U64(wb_data[0], wb_data[1]);
370 }
371 #endif
372
373 static int bnx2x_mc_assert(struct bnx2x *bp)
374 {
375         char last_idx;
376         int i, rc = 0;
377         u32 row0, row1, row2, row3;
378
379         /* XSTORM */
380         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
381                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
382         if (last_idx)
383                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
384
385         /* print the asserts */
386         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
387
388                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
389                               XSTORM_ASSERT_LIST_OFFSET(i));
390                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
391                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
392                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
393                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
394                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
395                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
396
397                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
398                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
399                                   " 0x%08x 0x%08x 0x%08x\n",
400                                   i, row3, row2, row1, row0);
401                         rc++;
402                 } else {
403                         break;
404                 }
405         }
406
407         /* TSTORM */
408         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
409                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
410         if (last_idx)
411                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
412
413         /* print the asserts */
414         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
415
416                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
417                               TSTORM_ASSERT_LIST_OFFSET(i));
418                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
419                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
420                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
421                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
422                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
423                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
424
425                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
426                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
427                                   " 0x%08x 0x%08x 0x%08x\n",
428                                   i, row3, row2, row1, row0);
429                         rc++;
430                 } else {
431                         break;
432                 }
433         }
434
435         /* CSTORM */
436         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
437                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
438         if (last_idx)
439                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
440
441         /* print the asserts */
442         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
443
444                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
445                               CSTORM_ASSERT_LIST_OFFSET(i));
446                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
447                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
448                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
449                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
450                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
451                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
452
453                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
454                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
455                                   " 0x%08x 0x%08x 0x%08x\n",
456                                   i, row3, row2, row1, row0);
457                         rc++;
458                 } else {
459                         break;
460                 }
461         }
462
463         /* USTORM */
464         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
465                            USTORM_ASSERT_LIST_INDEX_OFFSET);
466         if (last_idx)
467                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
468
469         /* print the asserts */
470         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
471
472                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
473                               USTORM_ASSERT_LIST_OFFSET(i));
474                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
475                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
476                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
477                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
478                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
479                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
480
481                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
482                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
483                                   " 0x%08x 0x%08x 0x%08x\n",
484                                   i, row3, row2, row1, row0);
485                         rc++;
486                 } else {
487                         break;
488                 }
489         }
490
491         return rc;
492 }
493
494 static void bnx2x_fw_dump(struct bnx2x *bp)
495 {
496         u32 mark, offset;
497         __be32 data[9];
498         int word;
499
500         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
501         mark = ((mark + 0x3) & ~0x3);
502         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
503
504         printk(KERN_ERR PFX);
505         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
506                 for (word = 0; word < 8; word++)
507                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
508                                                   offset + 4*word));
509                 data[8] = 0x0;
510                 printk(KERN_CONT "%s", (char *)data);
511         }
512         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
513                 for (word = 0; word < 8; word++)
514                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
515                                                   offset + 4*word));
516                 data[8] = 0x0;
517                 printk(KERN_CONT "%s", (char *)data);
518         }
519         printk(KERN_ERR PFX "end of fw dump\n");
520 }
521
522 static void bnx2x_panic_dump(struct bnx2x *bp)
523 {
524         int i;
525         u16 j, start, end;
526
527         bp->stats_state = STATS_STATE_DISABLED;
528         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
529
530         BNX2X_ERR("begin crash dump -----------------\n");
531
532         /* Indices */
533         /* Common */
534         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
535                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
536                   "  spq_prod_idx(%u)\n",
537                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
538                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
539
540         /* Rx */
541         for_each_rx_queue(bp, i) {
542                 struct bnx2x_fastpath *fp = &bp->fp[i];
543
544                 BNX2X_ERR("fp%d: rx_bd_prod(%x)  rx_bd_cons(%x)"
545                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
546                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
547                           i, fp->rx_bd_prod, fp->rx_bd_cons,
548                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
549                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
550                 BNX2X_ERR("      rx_sge_prod(%x)  last_max_sge(%x)"
551                           "  fp_u_idx(%x) *sb_u_idx(%x)\n",
552                           fp->rx_sge_prod, fp->last_max_sge,
553                           le16_to_cpu(fp->fp_u_idx),
554                           fp->status_blk->u_status_block.status_block_index);
555         }
556
557         /* Tx */
558         for_each_tx_queue(bp, i) {
559                 struct bnx2x_fastpath *fp = &bp->fp[i];
560
561                 BNX2X_ERR("fp%d: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
562                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
563                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
564                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
565                 BNX2X_ERR("      fp_c_idx(%x)  *sb_c_idx(%x)"
566                           "  tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
567                           fp->status_blk->c_status_block.status_block_index,
568                           fp->tx_db.data.prod);
569         }
570
571         /* Rings */
572         /* Rx */
573         for_each_rx_queue(bp, i) {
574                 struct bnx2x_fastpath *fp = &bp->fp[i];
575
576                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
577                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
578                 for (j = start; j != end; j = RX_BD(j + 1)) {
579                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
580                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
581
582                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
583                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
584                 }
585
586                 start = RX_SGE(fp->rx_sge_prod);
587                 end = RX_SGE(fp->last_max_sge);
588                 for (j = start; j != end; j = RX_SGE(j + 1)) {
589                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
590                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
591
592                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
593                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
594                 }
595
596                 start = RCQ_BD(fp->rx_comp_cons - 10);
597                 end = RCQ_BD(fp->rx_comp_cons + 503);
598                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
599                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
600
601                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
602                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
603                 }
604         }
605
606         /* Tx */
607         for_each_tx_queue(bp, i) {
608                 struct bnx2x_fastpath *fp = &bp->fp[i];
609
610                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
611                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
612                 for (j = start; j != end; j = TX_BD(j + 1)) {
613                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
614
615                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
616                                   i, j, sw_bd->skb, sw_bd->first_bd);
617                 }
618
619                 start = TX_BD(fp->tx_bd_cons - 10);
620                 end = TX_BD(fp->tx_bd_cons + 254);
621                 for (j = start; j != end; j = TX_BD(j + 1)) {
622                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
623
624                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
625                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
626                 }
627         }
628
629         bnx2x_fw_dump(bp);
630         bnx2x_mc_assert(bp);
631         BNX2X_ERR("end crash dump -----------------\n");
632 }
633
634 static void bnx2x_int_enable(struct bnx2x *bp)
635 {
636         int port = BP_PORT(bp);
637         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
638         u32 val = REG_RD(bp, addr);
639         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
640         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
641
642         if (msix) {
643                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
644                          HC_CONFIG_0_REG_INT_LINE_EN_0);
645                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
646                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
647         } else if (msi) {
648                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
649                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
650                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
651                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
652         } else {
653                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
654                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
655                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
656                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
657
658                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
659                    val, port, addr);
660
661                 REG_WR(bp, addr, val);
662
663                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
664         }
665
666         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
667            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
668
669         REG_WR(bp, addr, val);
670         /*
671          * Ensure that HC_CONFIG is written before leading/trailing edge config
672          */
673         mmiowb();
674         barrier();
675
676         if (CHIP_IS_E1H(bp)) {
677                 /* init leading/trailing edge */
678                 if (IS_E1HMF(bp)) {
679                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
680                         if (bp->port.pmf)
681                                 /* enable nig and gpio3 attention */
682                                 val |= 0x1100;
683                 } else
684                         val = 0xffff;
685
686                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
687                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
688         }
689
690         /* Make sure that interrupts are indeed enabled from here on */
691         mmiowb();
692 }
693
694 static void bnx2x_int_disable(struct bnx2x *bp)
695 {
696         int port = BP_PORT(bp);
697         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
698         u32 val = REG_RD(bp, addr);
699
700         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
701                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
702                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
703                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
704
705         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
706            val, port, addr);
707
708         /* flush all outstanding writes */
709         mmiowb();
710
711         REG_WR(bp, addr, val);
712         if (REG_RD(bp, addr) != val)
713                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
714
715 }
716
717 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
718 {
719         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
720         int i, offset;
721
722         /* disable interrupt handling */
723         atomic_inc(&bp->intr_sem);
724         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
725
726         if (disable_hw)
727                 /* prevent the HW from sending interrupts */
728                 bnx2x_int_disable(bp);
729
730         /* make sure all ISRs are done */
731         if (msix) {
732                 synchronize_irq(bp->msix_table[0].vector);
733                 offset = 1;
734                 for_each_queue(bp, i)
735                         synchronize_irq(bp->msix_table[i + offset].vector);
736         } else
737                 synchronize_irq(bp->pdev->irq);
738
739         /* make sure sp_task is not running */
740         cancel_delayed_work(&bp->sp_task);
741         flush_workqueue(bnx2x_wq);
742 }
743
744 /* fast path */
745
746 /*
747  * General service functions
748  */
749
750 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
751                                 u8 storm, u16 index, u8 op, u8 update)
752 {
753         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
754                        COMMAND_REG_INT_ACK);
755         struct igu_ack_register igu_ack;
756
757         igu_ack.status_block_index = index;
758         igu_ack.sb_id_and_flags =
759                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
760                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
761                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
762                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
763
764         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
765            (*(u32 *)&igu_ack), hc_addr);
766         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
767
768         /* Make sure that ACK is written */
769         mmiowb();
770         barrier();
771 }
772
773 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
774 {
775         struct host_status_block *fpsb = fp->status_blk;
776         u16 rc = 0;
777
778         barrier(); /* status block is written to by the chip */
779         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
780                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
781                 rc |= 1;
782         }
783         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
784                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
785                 rc |= 2;
786         }
787         return rc;
788 }
789
790 static u16 bnx2x_ack_int(struct bnx2x *bp)
791 {
792         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
793                        COMMAND_REG_SIMD_MASK);
794         u32 result = REG_RD(bp, hc_addr);
795
796         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
797            result, hc_addr);
798
799         return result;
800 }
801
802
803 /*
804  * fast path service functions
805  */
806
807 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
808 {
809         /* Tell compiler that consumer and producer can change */
810         barrier();
811         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
812 }
813
814 /* free skb in the packet ring at pos idx
815  * return idx of last bd freed
816  */
817 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
818                              u16 idx)
819 {
820         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
821         struct eth_tx_start_bd *tx_start_bd;
822         struct eth_tx_bd *tx_data_bd;
823         struct sk_buff *skb = tx_buf->skb;
824         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
825         int nbd;
826
827         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
828            idx, tx_buf, skb);
829
830         /* unmap first bd */
831         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
832         tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
833         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
834                          BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
835
836         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
837 #ifdef BNX2X_STOP_ON_ERROR
838         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
839                 BNX2X_ERR("BAD nbd!\n");
840                 bnx2x_panic();
841         }
842 #endif
843         new_cons = nbd + tx_buf->first_bd;
844
845         /* Get the next bd */
846         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
847
848         /* Skip a parse bd... */
849         --nbd;
850         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
851
852         /* ...and the TSO split header bd since they have no mapping */
853         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
854                 --nbd;
855                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
856         }
857
858         /* now free frags */
859         while (nbd > 0) {
860
861                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
862                 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
863                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
864                                BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
865                 if (--nbd)
866                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
867         }
868
869         /* release skb */
870         WARN_ON(!skb);
871         dev_kfree_skb_any(skb);
872         tx_buf->first_bd = 0;
873         tx_buf->skb = NULL;
874
875         return new_cons;
876 }
877
878 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
879 {
880         s16 used;
881         u16 prod;
882         u16 cons;
883
884         barrier(); /* Tell compiler that prod and cons can change */
885         prod = fp->tx_bd_prod;
886         cons = fp->tx_bd_cons;
887
888         /* NUM_TX_RINGS = number of "next-page" entries
889            It will be used as a threshold */
890         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
891
892 #ifdef BNX2X_STOP_ON_ERROR
893         WARN_ON(used < 0);
894         WARN_ON(used > fp->bp->tx_ring_size);
895         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
896 #endif
897
898         return (s16)(fp->bp->tx_ring_size) - used;
899 }
900
901 static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
902 {
903         struct bnx2x *bp = fp->bp;
904         struct netdev_queue *txq;
905         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
906         int done = 0;
907
908 #ifdef BNX2X_STOP_ON_ERROR
909         if (unlikely(bp->panic))
910                 return;
911 #endif
912
913         txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
914         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
915         sw_cons = fp->tx_pkt_cons;
916
917         while (sw_cons != hw_cons) {
918                 u16 pkt_cons;
919
920                 pkt_cons = TX_BD(sw_cons);
921
922                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
923
924                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
925                    hw_cons, sw_cons, pkt_cons);
926
927 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
928                         rmb();
929                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
930                 }
931 */
932                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
933                 sw_cons++;
934                 done++;
935         }
936
937         fp->tx_pkt_cons = sw_cons;
938         fp->tx_bd_cons = bd_cons;
939
940         /* TBD need a thresh? */
941         if (unlikely(netif_tx_queue_stopped(txq))) {
942
943                 /* Need to make the tx_bd_cons update visible to start_xmit()
944                  * before checking for netif_tx_queue_stopped().  Without the
945                  * memory barrier, there is a small possibility that
946                  * start_xmit() will miss it and cause the queue to be stopped
947                  * forever.
948                  */
949                 smp_mb();
950
951                 if ((netif_tx_queue_stopped(txq)) &&
952                     (bp->state == BNX2X_STATE_OPEN) &&
953                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
954                         netif_tx_wake_queue(txq);
955         }
956 }
957
958
959 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
960                            union eth_rx_cqe *rr_cqe)
961 {
962         struct bnx2x *bp = fp->bp;
963         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
964         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
965
966         DP(BNX2X_MSG_SP,
967            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
968            fp->index, cid, command, bp->state,
969            rr_cqe->ramrod_cqe.ramrod_type);
970
971         bp->spq_left++;
972
973         if (fp->index) {
974                 switch (command | fp->state) {
975                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
976                                                 BNX2X_FP_STATE_OPENING):
977                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
978                            cid);
979                         fp->state = BNX2X_FP_STATE_OPEN;
980                         break;
981
982                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
983                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
984                            cid);
985                         fp->state = BNX2X_FP_STATE_HALTED;
986                         break;
987
988                 default:
989                         BNX2X_ERR("unexpected MC reply (%d)  "
990                                   "fp->state is %x\n", command, fp->state);
991                         break;
992                 }
993                 mb(); /* force bnx2x_wait_ramrod() to see the change */
994                 return;
995         }
996
997         switch (command | bp->state) {
998         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
999                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1000                 bp->state = BNX2X_STATE_OPEN;
1001                 break;
1002
1003         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1004                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1005                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1006                 fp->state = BNX2X_FP_STATE_HALTED;
1007                 break;
1008
1009         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1010                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1011                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1012                 break;
1013
1014
1015         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1016         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1017                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1018                 bp->set_mac_pending = 0;
1019                 break;
1020
1021         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1022         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED):
1023                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1024                 break;
1025
1026         default:
1027                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
1028                           command, bp->state);
1029                 break;
1030         }
1031         mb(); /* force bnx2x_wait_ramrod() to see the change */
1032 }
1033
1034 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1035                                      struct bnx2x_fastpath *fp, u16 index)
1036 {
1037         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1038         struct page *page = sw_buf->page;
1039         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1040
1041         /* Skip "next page" elements */
1042         if (!page)
1043                 return;
1044
1045         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1046                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1047         __free_pages(page, PAGES_PER_SGE_SHIFT);
1048
1049         sw_buf->page = NULL;
1050         sge->addr_hi = 0;
1051         sge->addr_lo = 0;
1052 }
1053
1054 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1055                                            struct bnx2x_fastpath *fp, int last)
1056 {
1057         int i;
1058
1059         for (i = 0; i < last; i++)
1060                 bnx2x_free_rx_sge(bp, fp, i);
1061 }
1062
1063 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1064                                      struct bnx2x_fastpath *fp, u16 index)
1065 {
1066         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1067         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1068         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1069         dma_addr_t mapping;
1070
1071         if (unlikely(page == NULL))
1072                 return -ENOMEM;
1073
1074         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1075                                PCI_DMA_FROMDEVICE);
1076         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1077                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1078                 return -ENOMEM;
1079         }
1080
1081         sw_buf->page = page;
1082         pci_unmap_addr_set(sw_buf, mapping, mapping);
1083
1084         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1085         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1086
1087         return 0;
1088 }
1089
1090 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1091                                      struct bnx2x_fastpath *fp, u16 index)
1092 {
1093         struct sk_buff *skb;
1094         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1095         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1096         dma_addr_t mapping;
1097
1098         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1099         if (unlikely(skb == NULL))
1100                 return -ENOMEM;
1101
1102         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1103                                  PCI_DMA_FROMDEVICE);
1104         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1105                 dev_kfree_skb(skb);
1106                 return -ENOMEM;
1107         }
1108
1109         rx_buf->skb = skb;
1110         pci_unmap_addr_set(rx_buf, mapping, mapping);
1111
1112         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1113         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1114
1115         return 0;
1116 }
1117
1118 /* note that we are not allocating a new skb,
1119  * we are just moving one from cons to prod
1120  * we are not creating a new mapping,
1121  * so there is no need to check for dma_mapping_error().
1122  */
1123 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1124                                struct sk_buff *skb, u16 cons, u16 prod)
1125 {
1126         struct bnx2x *bp = fp->bp;
1127         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1128         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1129         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1130         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1131
1132         pci_dma_sync_single_for_device(bp->pdev,
1133                                        pci_unmap_addr(cons_rx_buf, mapping),
1134                                        RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1135
1136         prod_rx_buf->skb = cons_rx_buf->skb;
1137         pci_unmap_addr_set(prod_rx_buf, mapping,
1138                            pci_unmap_addr(cons_rx_buf, mapping));
1139         *prod_bd = *cons_bd;
1140 }
1141
1142 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1143                                              u16 idx)
1144 {
1145         u16 last_max = fp->last_max_sge;
1146
1147         if (SUB_S16(idx, last_max) > 0)
1148                 fp->last_max_sge = idx;
1149 }
1150
1151 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1152 {
1153         int i, j;
1154
1155         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1156                 int idx = RX_SGE_CNT * i - 1;
1157
1158                 for (j = 0; j < 2; j++) {
1159                         SGE_MASK_CLEAR_BIT(fp, idx);
1160                         idx--;
1161                 }
1162         }
1163 }
1164
1165 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1166                                   struct eth_fast_path_rx_cqe *fp_cqe)
1167 {
1168         struct bnx2x *bp = fp->bp;
1169         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1170                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1171                       SGE_PAGE_SHIFT;
1172         u16 last_max, last_elem, first_elem;
1173         u16 delta = 0;
1174         u16 i;
1175
1176         if (!sge_len)
1177                 return;
1178
1179         /* First mark all used pages */
1180         for (i = 0; i < sge_len; i++)
1181                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1182
1183         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1184            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1185
1186         /* Here we assume that the last SGE index is the biggest */
1187         prefetch((void *)(fp->sge_mask));
1188         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1189
1190         last_max = RX_SGE(fp->last_max_sge);
1191         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1192         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1193
1194         /* If ring is not full */
1195         if (last_elem + 1 != first_elem)
1196                 last_elem++;
1197
1198         /* Now update the prod */
1199         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1200                 if (likely(fp->sge_mask[i]))
1201                         break;
1202
1203                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1204                 delta += RX_SGE_MASK_ELEM_SZ;
1205         }
1206
1207         if (delta > 0) {
1208                 fp->rx_sge_prod += delta;
1209                 /* clear page-end entries */
1210                 bnx2x_clear_sge_mask_next_elems(fp);
1211         }
1212
1213         DP(NETIF_MSG_RX_STATUS,
1214            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1215            fp->last_max_sge, fp->rx_sge_prod);
1216 }
1217
1218 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1219 {
1220         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1221         memset(fp->sge_mask, 0xff,
1222                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1223
1224         /* Clear the two last indices in the page to 1:
1225            these are the indices that correspond to the "next" element,
1226            hence will never be indicated and should be removed from
1227            the calculations. */
1228         bnx2x_clear_sge_mask_next_elems(fp);
1229 }
1230
1231 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1232                             struct sk_buff *skb, u16 cons, u16 prod)
1233 {
1234         struct bnx2x *bp = fp->bp;
1235         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1236         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1237         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1238         dma_addr_t mapping;
1239
1240         /* move empty skb from pool to prod and map it */
1241         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1242         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1243                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1244         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1245
1246         /* move partial skb from cons to pool (don't unmap yet) */
1247         fp->tpa_pool[queue] = *cons_rx_buf;
1248
1249         /* mark bin state as start - print error if current state != stop */
1250         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1251                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1252
1253         fp->tpa_state[queue] = BNX2X_TPA_START;
1254
1255         /* point prod_bd to new skb */
1256         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1257         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1258
1259 #ifdef BNX2X_STOP_ON_ERROR
1260         fp->tpa_queue_used |= (1 << queue);
1261 #ifdef __powerpc64__
1262         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1263 #else
1264         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1265 #endif
1266            fp->tpa_queue_used);
1267 #endif
1268 }
1269
1270 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1271                                struct sk_buff *skb,
1272                                struct eth_fast_path_rx_cqe *fp_cqe,
1273                                u16 cqe_idx)
1274 {
1275         struct sw_rx_page *rx_pg, old_rx_pg;
1276         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1277         u32 i, frag_len, frag_size, pages;
1278         int err;
1279         int j;
1280
1281         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1282         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1283
1284         /* This is needed in order to enable forwarding support */
1285         if (frag_size)
1286                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1287                                                max(frag_size, (u32)len_on_bd));
1288
1289 #ifdef BNX2X_STOP_ON_ERROR
1290         if (pages >
1291             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1292                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1293                           pages, cqe_idx);
1294                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1295                           fp_cqe->pkt_len, len_on_bd);
1296                 bnx2x_panic();
1297                 return -EINVAL;
1298         }
1299 #endif
1300
1301         /* Run through the SGL and compose the fragmented skb */
1302         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1303                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1304
1305                 /* FW gives the indices of the SGE as if the ring is an array
1306                    (meaning that "next" element will consume 2 indices) */
1307                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1308                 rx_pg = &fp->rx_page_ring[sge_idx];
1309                 old_rx_pg = *rx_pg;
1310
1311                 /* If we fail to allocate a substitute page, we simply stop
1312                    where we are and drop the whole packet */
1313                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1314                 if (unlikely(err)) {
1315                         fp->eth_q_stats.rx_skb_alloc_failed++;
1316                         return err;
1317                 }
1318
1319                 /* Unmap the page as we r going to pass it to the stack */
1320                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1321                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1322
1323                 /* Add one frag and update the appropriate fields in the skb */
1324                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1325
1326                 skb->data_len += frag_len;
1327                 skb->truesize += frag_len;
1328                 skb->len += frag_len;
1329
1330                 frag_size -= frag_len;
1331         }
1332
1333         return 0;
1334 }
1335
1336 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1337                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1338                            u16 cqe_idx)
1339 {
1340         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1341         struct sk_buff *skb = rx_buf->skb;
1342         /* alloc new skb */
1343         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1344
1345         /* Unmap skb in the pool anyway, as we are going to change
1346            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1347            fails. */
1348         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1349                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1350
1351         if (likely(new_skb)) {
1352                 /* fix ip xsum and give it to the stack */
1353                 /* (no need to map the new skb) */
1354 #ifdef BCM_VLAN
1355                 int is_vlan_cqe =
1356                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1357                          PARSING_FLAGS_VLAN);
1358                 int is_not_hwaccel_vlan_cqe =
1359                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1360 #endif
1361
1362                 prefetch(skb);
1363                 prefetch(((char *)(skb)) + 128);
1364
1365 #ifdef BNX2X_STOP_ON_ERROR
1366                 if (pad + len > bp->rx_buf_size) {
1367                         BNX2X_ERR("skb_put is about to fail...  "
1368                                   "pad %d  len %d  rx_buf_size %d\n",
1369                                   pad, len, bp->rx_buf_size);
1370                         bnx2x_panic();
1371                         return;
1372                 }
1373 #endif
1374
1375                 skb_reserve(skb, pad);
1376                 skb_put(skb, len);
1377
1378                 skb->protocol = eth_type_trans(skb, bp->dev);
1379                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1380
1381                 {
1382                         struct iphdr *iph;
1383
1384                         iph = (struct iphdr *)skb->data;
1385 #ifdef BCM_VLAN
1386                         /* If there is no Rx VLAN offloading -
1387                            take VLAN tag into an account */
1388                         if (unlikely(is_not_hwaccel_vlan_cqe))
1389                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1390 #endif
1391                         iph->check = 0;
1392                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1393                 }
1394
1395                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1396                                          &cqe->fast_path_cqe, cqe_idx)) {
1397 #ifdef BCM_VLAN
1398                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1399                             (!is_not_hwaccel_vlan_cqe))
1400                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1401                                                 le16_to_cpu(cqe->fast_path_cqe.
1402                                                             vlan_tag));
1403                         else
1404 #endif
1405                                 netif_receive_skb(skb);
1406                 } else {
1407                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1408                            " - dropping packet!\n");
1409                         dev_kfree_skb(skb);
1410                 }
1411
1412
1413                 /* put new skb in bin */
1414                 fp->tpa_pool[queue].skb = new_skb;
1415
1416         } else {
1417                 /* else drop the packet and keep the buffer in the bin */
1418                 DP(NETIF_MSG_RX_STATUS,
1419                    "Failed to allocate new skb - dropping packet!\n");
1420                 fp->eth_q_stats.rx_skb_alloc_failed++;
1421         }
1422
1423         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1424 }
1425
1426 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1427                                         struct bnx2x_fastpath *fp,
1428                                         u16 bd_prod, u16 rx_comp_prod,
1429                                         u16 rx_sge_prod)
1430 {
1431         struct ustorm_eth_rx_producers rx_prods = {0};
1432         int i;
1433
1434         /* Update producers */
1435         rx_prods.bd_prod = bd_prod;
1436         rx_prods.cqe_prod = rx_comp_prod;
1437         rx_prods.sge_prod = rx_sge_prod;
1438
1439         /*
1440          * Make sure that the BD and SGE data is updated before updating the
1441          * producers since FW might read the BD/SGE right after the producer
1442          * is updated.
1443          * This is only applicable for weak-ordered memory model archs such
1444          * as IA-64. The following barrier is also mandatory since FW will
1445          * assumes BDs must have buffers.
1446          */
1447         wmb();
1448
1449         for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1450                 REG_WR(bp, BAR_USTRORM_INTMEM +
1451                        USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1452                        ((u32 *)&rx_prods)[i]);
1453
1454         mmiowb(); /* keep prod updates ordered */
1455
1456         DP(NETIF_MSG_RX_STATUS,
1457            "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
1458            fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1459 }
1460
1461 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1462 {
1463         struct bnx2x *bp = fp->bp;
1464         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1465         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1466         int rx_pkt = 0;
1467
1468 #ifdef BNX2X_STOP_ON_ERROR
1469         if (unlikely(bp->panic))
1470                 return 0;
1471 #endif
1472
1473         /* CQ "next element" is of the size of the regular element,
1474            that's why it's ok here */
1475         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1476         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1477                 hw_comp_cons++;
1478
1479         bd_cons = fp->rx_bd_cons;
1480         bd_prod = fp->rx_bd_prod;
1481         bd_prod_fw = bd_prod;
1482         sw_comp_cons = fp->rx_comp_cons;
1483         sw_comp_prod = fp->rx_comp_prod;
1484
1485         /* Memory barrier necessary as speculative reads of the rx
1486          * buffer can be ahead of the index in the status block
1487          */
1488         rmb();
1489
1490         DP(NETIF_MSG_RX_STATUS,
1491            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1492            fp->index, hw_comp_cons, sw_comp_cons);
1493
1494         while (sw_comp_cons != hw_comp_cons) {
1495                 struct sw_rx_bd *rx_buf = NULL;
1496                 struct sk_buff *skb;
1497                 union eth_rx_cqe *cqe;
1498                 u8 cqe_fp_flags;
1499                 u16 len, pad;
1500
1501                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1502                 bd_prod = RX_BD(bd_prod);
1503                 bd_cons = RX_BD(bd_cons);
1504
1505                 /* Prefetch the page containing the BD descriptor
1506                    at producer's index. It will be needed when new skb is
1507                    allocated */
1508                 prefetch((void *)(PAGE_ALIGN((unsigned long)
1509                                              (&fp->rx_desc_ring[bd_prod])) -
1510                                   PAGE_SIZE + 1));
1511
1512                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1513                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1514
1515                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1516                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1517                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1518                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1519                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1520                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1521
1522                 /* is this a slowpath msg? */
1523                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1524                         bnx2x_sp_event(fp, cqe);
1525                         goto next_cqe;
1526
1527                 /* this is an rx packet */
1528                 } else {
1529                         rx_buf = &fp->rx_buf_ring[bd_cons];
1530                         skb = rx_buf->skb;
1531                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1532                         pad = cqe->fast_path_cqe.placement_offset;
1533
1534                         /* If CQE is marked both TPA_START and TPA_END
1535                            it is a non-TPA CQE */
1536                         if ((!fp->disable_tpa) &&
1537                             (TPA_TYPE(cqe_fp_flags) !=
1538                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1539                                 u16 queue = cqe->fast_path_cqe.queue_index;
1540
1541                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1542                                         DP(NETIF_MSG_RX_STATUS,
1543                                            "calling tpa_start on queue %d\n",
1544                                            queue);
1545
1546                                         bnx2x_tpa_start(fp, queue, skb,
1547                                                         bd_cons, bd_prod);
1548                                         goto next_rx;
1549                                 }
1550
1551                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1552                                         DP(NETIF_MSG_RX_STATUS,
1553                                            "calling tpa_stop on queue %d\n",
1554                                            queue);
1555
1556                                         if (!BNX2X_RX_SUM_FIX(cqe))
1557                                                 BNX2X_ERR("STOP on none TCP "
1558                                                           "data\n");
1559
1560                                         /* This is a size of the linear data
1561                                            on this skb */
1562                                         len = le16_to_cpu(cqe->fast_path_cqe.
1563                                                                 len_on_bd);
1564                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1565                                                     len, cqe, comp_ring_cons);
1566 #ifdef BNX2X_STOP_ON_ERROR
1567                                         if (bp->panic)
1568                                                 return 0;
1569 #endif
1570
1571                                         bnx2x_update_sge_prod(fp,
1572                                                         &cqe->fast_path_cqe);
1573                                         goto next_cqe;
1574                                 }
1575                         }
1576
1577                         pci_dma_sync_single_for_device(bp->pdev,
1578                                         pci_unmap_addr(rx_buf, mapping),
1579                                                        pad + RX_COPY_THRESH,
1580                                                        PCI_DMA_FROMDEVICE);
1581                         prefetch(skb);
1582                         prefetch(((char *)(skb)) + 128);
1583
1584                         /* is this an error packet? */
1585                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1586                                 DP(NETIF_MSG_RX_ERR,
1587                                    "ERROR  flags %x  rx packet %u\n",
1588                                    cqe_fp_flags, sw_comp_cons);
1589                                 fp->eth_q_stats.rx_err_discard_pkt++;
1590                                 goto reuse_rx;
1591                         }
1592
1593                         /* Since we don't have a jumbo ring
1594                          * copy small packets if mtu > 1500
1595                          */
1596                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1597                             (len <= RX_COPY_THRESH)) {
1598                                 struct sk_buff *new_skb;
1599
1600                                 new_skb = netdev_alloc_skb(bp->dev,
1601                                                            len + pad);
1602                                 if (new_skb == NULL) {
1603                                         DP(NETIF_MSG_RX_ERR,
1604                                            "ERROR  packet dropped "
1605                                            "because of alloc failure\n");
1606                                         fp->eth_q_stats.rx_skb_alloc_failed++;
1607                                         goto reuse_rx;
1608                                 }
1609
1610                                 /* aligned copy */
1611                                 skb_copy_from_linear_data_offset(skb, pad,
1612                                                     new_skb->data + pad, len);
1613                                 skb_reserve(new_skb, pad);
1614                                 skb_put(new_skb, len);
1615
1616                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1617
1618                                 skb = new_skb;
1619
1620                         } else
1621                         if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1622                                 pci_unmap_single(bp->pdev,
1623                                         pci_unmap_addr(rx_buf, mapping),
1624                                                  bp->rx_buf_size,
1625                                                  PCI_DMA_FROMDEVICE);
1626                                 skb_reserve(skb, pad);
1627                                 skb_put(skb, len);
1628
1629                         } else {
1630                                 DP(NETIF_MSG_RX_ERR,
1631                                    "ERROR  packet dropped because "
1632                                    "of alloc failure\n");
1633                                 fp->eth_q_stats.rx_skb_alloc_failed++;
1634 reuse_rx:
1635                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1636                                 goto next_rx;
1637                         }
1638
1639                         skb->protocol = eth_type_trans(skb, bp->dev);
1640
1641                         skb->ip_summed = CHECKSUM_NONE;
1642                         if (bp->rx_csum) {
1643                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1644                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1645                                 else
1646                                         fp->eth_q_stats.hw_csum_err++;
1647                         }
1648                 }
1649
1650                 skb_record_rx_queue(skb, fp->index);
1651 #ifdef BCM_VLAN
1652                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1653                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1654                      PARSING_FLAGS_VLAN))
1655                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1656                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1657                 else
1658 #endif
1659                         netif_receive_skb(skb);
1660
1661
1662 next_rx:
1663                 rx_buf->skb = NULL;
1664
1665                 bd_cons = NEXT_RX_IDX(bd_cons);
1666                 bd_prod = NEXT_RX_IDX(bd_prod);
1667                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1668                 rx_pkt++;
1669 next_cqe:
1670                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1671                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1672
1673                 if (rx_pkt == budget)
1674                         break;
1675         } /* while */
1676
1677         fp->rx_bd_cons = bd_cons;
1678         fp->rx_bd_prod = bd_prod_fw;
1679         fp->rx_comp_cons = sw_comp_cons;
1680         fp->rx_comp_prod = sw_comp_prod;
1681
1682         /* Update producers */
1683         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1684                              fp->rx_sge_prod);
1685
1686         fp->rx_pkt += rx_pkt;
1687         fp->rx_calls++;
1688
1689         return rx_pkt;
1690 }
1691
1692 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1693 {
1694         struct bnx2x_fastpath *fp = fp_cookie;
1695         struct bnx2x *bp = fp->bp;
1696
1697         /* Return here if interrupt is disabled */
1698         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1699                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1700                 return IRQ_HANDLED;
1701         }
1702
1703         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1704            fp->index, fp->sb_id);
1705         bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1706
1707 #ifdef BNX2X_STOP_ON_ERROR
1708         if (unlikely(bp->panic))
1709                 return IRQ_HANDLED;
1710 #endif
1711         /* Handle Rx or Tx according to MSI-X vector */
1712         if (fp->is_rx_queue) {
1713                 prefetch(fp->rx_cons_sb);
1714                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1715
1716                 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1717
1718         } else {
1719                 prefetch(fp->tx_cons_sb);
1720                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1721
1722                 bnx2x_update_fpsb_idx(fp);
1723                 rmb();
1724                 bnx2x_tx_int(fp);
1725
1726                 /* Re-enable interrupts */
1727                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1728                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1729                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1730                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1731         }
1732
1733         return IRQ_HANDLED;
1734 }
1735
1736 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1737 {
1738         struct bnx2x *bp = netdev_priv(dev_instance);
1739         u16 status = bnx2x_ack_int(bp);
1740         u16 mask;
1741         int i;
1742
1743         /* Return here if interrupt is shared and it's not for us */
1744         if (unlikely(status == 0)) {
1745                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1746                 return IRQ_NONE;
1747         }
1748         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1749
1750         /* Return here if interrupt is disabled */
1751         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1752                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1753                 return IRQ_HANDLED;
1754         }
1755
1756 #ifdef BNX2X_STOP_ON_ERROR
1757         if (unlikely(bp->panic))
1758                 return IRQ_HANDLED;
1759 #endif
1760
1761         for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1762                 struct bnx2x_fastpath *fp = &bp->fp[i];
1763
1764                 mask = 0x2 << fp->sb_id;
1765                 if (status & mask) {
1766                         /* Handle Rx or Tx according to SB id */
1767                         if (fp->is_rx_queue) {
1768                                 prefetch(fp->rx_cons_sb);
1769                                 prefetch(&fp->status_blk->u_status_block.
1770                                                         status_block_index);
1771
1772                                 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1773
1774                         } else {
1775                                 prefetch(fp->tx_cons_sb);
1776                                 prefetch(&fp->status_blk->c_status_block.
1777                                                         status_block_index);
1778
1779                                 bnx2x_update_fpsb_idx(fp);
1780                                 rmb();
1781                                 bnx2x_tx_int(fp);
1782
1783                                 /* Re-enable interrupts */
1784                                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1785                                              le16_to_cpu(fp->fp_u_idx),
1786                                              IGU_INT_NOP, 1);
1787                                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1788                                              le16_to_cpu(fp->fp_c_idx),
1789                                              IGU_INT_ENABLE, 1);
1790                         }
1791                         status &= ~mask;
1792                 }
1793         }
1794
1795
1796         if (unlikely(status & 0x1)) {
1797                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1798
1799                 status &= ~0x1;
1800                 if (!status)
1801                         return IRQ_HANDLED;
1802         }
1803
1804         if (status)
1805                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1806                    status);
1807
1808         return IRQ_HANDLED;
1809 }
1810
1811 /* end of fast path */
1812
1813 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1814
1815 /* Link */
1816
1817 /*
1818  * General service functions
1819  */
1820
1821 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1822 {
1823         u32 lock_status;
1824         u32 resource_bit = (1 << resource);
1825         int func = BP_FUNC(bp);
1826         u32 hw_lock_control_reg;
1827         int cnt;
1828
1829         /* Validating that the resource is within range */
1830         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1831                 DP(NETIF_MSG_HW,
1832                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1833                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1834                 return -EINVAL;
1835         }
1836
1837         if (func <= 5) {
1838                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1839         } else {
1840                 hw_lock_control_reg =
1841                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1842         }
1843
1844         /* Validating that the resource is not already taken */
1845         lock_status = REG_RD(bp, hw_lock_control_reg);
1846         if (lock_status & resource_bit) {
1847                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1848                    lock_status, resource_bit);
1849                 return -EEXIST;
1850         }
1851
1852         /* Try for 5 second every 5ms */
1853         for (cnt = 0; cnt < 1000; cnt++) {
1854                 /* Try to acquire the lock */
1855                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1856                 lock_status = REG_RD(bp, hw_lock_control_reg);
1857                 if (lock_status & resource_bit)
1858                         return 0;
1859
1860                 msleep(5);
1861         }
1862         DP(NETIF_MSG_HW, "Timeout\n");
1863         return -EAGAIN;
1864 }
1865
1866 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1867 {
1868         u32 lock_status;
1869         u32 resource_bit = (1 << resource);
1870         int func = BP_FUNC(bp);
1871         u32 hw_lock_control_reg;
1872
1873         /* Validating that the resource is within range */
1874         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1875                 DP(NETIF_MSG_HW,
1876                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1877                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1878                 return -EINVAL;
1879         }
1880
1881         if (func <= 5) {
1882                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1883         } else {
1884                 hw_lock_control_reg =
1885                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1886         }
1887
1888         /* Validating that the resource is currently taken */
1889         lock_status = REG_RD(bp, hw_lock_control_reg);
1890         if (!(lock_status & resource_bit)) {
1891                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1892                    lock_status, resource_bit);
1893                 return -EFAULT;
1894         }
1895
1896         REG_WR(bp, hw_lock_control_reg, resource_bit);
1897         return 0;
1898 }
1899
1900 /* HW Lock for shared dual port PHYs */
1901 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1902 {
1903         mutex_lock(&bp->port.phy_mutex);
1904
1905         if (bp->port.need_hw_lock)
1906                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1907 }
1908
1909 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1910 {
1911         if (bp->port.need_hw_lock)
1912                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1913
1914         mutex_unlock(&bp->port.phy_mutex);
1915 }
1916
1917 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1918 {
1919         /* The GPIO should be swapped if swap register is set and active */
1920         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1921                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1922         int gpio_shift = gpio_num +
1923                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1924         u32 gpio_mask = (1 << gpio_shift);
1925         u32 gpio_reg;
1926         int value;
1927
1928         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1929                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1930                 return -EINVAL;
1931         }
1932
1933         /* read GPIO value */
1934         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1935
1936         /* get the requested pin value */
1937         if ((gpio_reg & gpio_mask) == gpio_mask)
1938                 value = 1;
1939         else
1940                 value = 0;
1941
1942         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1943
1944         return value;
1945 }
1946
1947 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1948 {
1949         /* The GPIO should be swapped if swap register is set and active */
1950         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1951                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1952         int gpio_shift = gpio_num +
1953                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1954         u32 gpio_mask = (1 << gpio_shift);
1955         u32 gpio_reg;
1956
1957         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1958                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1959                 return -EINVAL;
1960         }
1961
1962         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1963         /* read GPIO and mask except the float bits */
1964         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1965
1966         switch (mode) {
1967         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1968                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1969                    gpio_num, gpio_shift);
1970                 /* clear FLOAT and set CLR */
1971                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1972                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1973                 break;
1974
1975         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1976                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1977                    gpio_num, gpio_shift);
1978                 /* clear FLOAT and set SET */
1979                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1980                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1981                 break;
1982
1983         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1984                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1985                    gpio_num, gpio_shift);
1986                 /* set FLOAT */
1987                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1988                 break;
1989
1990         default:
1991                 break;
1992         }
1993
1994         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1995         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1996
1997         return 0;
1998 }
1999
2000 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2001 {
2002         /* The GPIO should be swapped if swap register is set and active */
2003         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2004                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2005         int gpio_shift = gpio_num +
2006                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2007         u32 gpio_mask = (1 << gpio_shift);
2008         u32 gpio_reg;
2009
2010         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2011                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2012                 return -EINVAL;
2013         }
2014
2015         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2016         /* read GPIO int */
2017         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2018
2019         switch (mode) {
2020         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2021                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2022                                    "output low\n", gpio_num, gpio_shift);
2023                 /* clear SET and set CLR */
2024                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2025                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2026                 break;
2027
2028         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2029                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2030                                    "output high\n", gpio_num, gpio_shift);
2031                 /* clear CLR and set SET */
2032                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2033                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2034                 break;
2035
2036         default:
2037                 break;
2038         }
2039
2040         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2041         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2042
2043         return 0;
2044 }
2045
2046 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2047 {
2048         u32 spio_mask = (1 << spio_num);
2049         u32 spio_reg;
2050
2051         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2052             (spio_num > MISC_REGISTERS_SPIO_7)) {
2053                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2054                 return -EINVAL;
2055         }
2056
2057         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2058         /* read SPIO and mask except the float bits */
2059         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2060
2061         switch (mode) {
2062         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2063                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2064                 /* clear FLOAT and set CLR */
2065                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2066                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2067                 break;
2068
2069         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2070                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2071                 /* clear FLOAT and set SET */
2072                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2073                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2074                 break;
2075
2076         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2077                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2078                 /* set FLOAT */
2079                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2080                 break;
2081
2082         default:
2083                 break;
2084         }
2085
2086         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2087         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2088
2089         return 0;
2090 }
2091
2092 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2093 {
2094         switch (bp->link_vars.ieee_fc &
2095                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2096         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2097                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2098                                           ADVERTISED_Pause);
2099                 break;
2100
2101         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2102                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2103                                          ADVERTISED_Pause);
2104                 break;
2105
2106         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2107                 bp->port.advertising |= ADVERTISED_Asym_Pause;
2108                 break;
2109
2110         default:
2111                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2112                                           ADVERTISED_Pause);
2113                 break;
2114         }
2115 }
2116
2117 static void bnx2x_link_report(struct bnx2x *bp)
2118 {
2119         if (bp->state == BNX2X_STATE_DISABLED) {
2120                 netif_carrier_off(bp->dev);
2121                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2122                 return;
2123         }
2124
2125         if (bp->link_vars.link_up) {
2126                 if (bp->state == BNX2X_STATE_OPEN)
2127                         netif_carrier_on(bp->dev);
2128                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2129
2130                 printk("%d Mbps ", bp->link_vars.line_speed);
2131
2132                 if (bp->link_vars.duplex == DUPLEX_FULL)
2133                         printk("full duplex");
2134                 else
2135                         printk("half duplex");
2136
2137                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2138                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2139                                 printk(", receive ");
2140                                 if (bp->link_vars.flow_ctrl &
2141                                     BNX2X_FLOW_CTRL_TX)
2142                                         printk("& transmit ");
2143                         } else {
2144                                 printk(", transmit ");
2145                         }
2146                         printk("flow control ON");
2147                 }
2148                 printk("\n");
2149
2150         } else { /* link_down */
2151                 netif_carrier_off(bp->dev);
2152                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2153         }
2154 }
2155
2156 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2157 {
2158         if (!BP_NOMCP(bp)) {
2159                 u8 rc;
2160
2161                 /* Initialize link parameters structure variables */
2162                 /* It is recommended to turn off RX FC for jumbo frames
2163                    for better performance */
2164                 if (bp->dev->mtu > 5000)
2165                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2166                 else
2167                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2168
2169                 bnx2x_acquire_phy_lock(bp);
2170
2171                 if (load_mode == LOAD_DIAG)
2172                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2173
2174                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2175
2176                 bnx2x_release_phy_lock(bp);
2177
2178                 bnx2x_calc_fc_adv(bp);
2179
2180                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2181                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2182                         bnx2x_link_report(bp);
2183                 }
2184
2185                 return rc;
2186         }
2187         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2188         return -EINVAL;
2189 }
2190
2191 static void bnx2x_link_set(struct bnx2x *bp)
2192 {
2193         if (!BP_NOMCP(bp)) {
2194                 bnx2x_acquire_phy_lock(bp);
2195                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2196                 bnx2x_release_phy_lock(bp);
2197
2198                 bnx2x_calc_fc_adv(bp);
2199         } else
2200                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2201 }
2202
2203 static void bnx2x__link_reset(struct bnx2x *bp)
2204 {
2205         if (!BP_NOMCP(bp)) {
2206                 bnx2x_acquire_phy_lock(bp);
2207                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2208                 bnx2x_release_phy_lock(bp);
2209         } else
2210                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2211 }
2212
2213 static u8 bnx2x_link_test(struct bnx2x *bp)
2214 {
2215         u8 rc;
2216
2217         bnx2x_acquire_phy_lock(bp);
2218         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2219         bnx2x_release_phy_lock(bp);
2220
2221         return rc;
2222 }
2223
2224 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2225 {
2226         u32 r_param = bp->link_vars.line_speed / 8;
2227         u32 fair_periodic_timeout_usec;
2228         u32 t_fair;
2229
2230         memset(&(bp->cmng.rs_vars), 0,
2231                sizeof(struct rate_shaping_vars_per_port));
2232         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2233
2234         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2235         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2236
2237         /* this is the threshold below which no timer arming will occur
2238            1.25 coefficient is for the threshold to be a little bigger
2239            than the real time, to compensate for timer in-accuracy */
2240         bp->cmng.rs_vars.rs_threshold =
2241                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2242
2243         /* resolution of fairness timer */
2244         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2245         /* for 10G it is 1000usec. for 1G it is 10000usec. */
2246         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2247
2248         /* this is the threshold below which we won't arm the timer anymore */
2249         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2250
2251         /* we multiply by 1e3/8 to get bytes/msec.
2252            We don't want the credits to pass a credit
2253            of the t_fair*FAIR_MEM (algorithm resolution) */
2254         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2255         /* since each tick is 4 usec */
2256         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2257 }
2258
2259 /* Calculates the sum of vn_min_rates.
2260    It's needed for further normalizing of the min_rates.
2261    Returns:
2262      sum of vn_min_rates.
2263        or
2264      0 - if all the min_rates are 0.
2265      In the later case fainess algorithm should be deactivated.
2266      If not all min_rates are zero then those that are zeroes will be set to 1.
2267  */
2268 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2269 {
2270         int all_zero = 1;
2271         int port = BP_PORT(bp);
2272         int vn;
2273
2274         bp->vn_weight_sum = 0;
2275         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2276                 int func = 2*vn + port;
2277                 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2278                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2279                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2280
2281                 /* Skip hidden vns */
2282                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2283                         continue;
2284
2285                 /* If min rate is zero - set it to 1 */
2286                 if (!vn_min_rate)
2287                         vn_min_rate = DEF_MIN_RATE;
2288                 else
2289                         all_zero = 0;
2290
2291                 bp->vn_weight_sum += vn_min_rate;
2292         }
2293
2294         /* ... only if all min rates are zeros - disable fairness */
2295         if (all_zero)
2296                 bp->vn_weight_sum = 0;
2297 }
2298
2299 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2300 {
2301         struct rate_shaping_vars_per_vn m_rs_vn;
2302         struct fairness_vars_per_vn m_fair_vn;
2303         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2304         u16 vn_min_rate, vn_max_rate;
2305         int i;
2306
2307         /* If function is hidden - set min and max to zeroes */
2308         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2309                 vn_min_rate = 0;
2310                 vn_max_rate = 0;
2311
2312         } else {
2313                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2314                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2315                 /* If fairness is enabled (not all min rates are zeroes) and
2316                    if current min rate is zero - set it to 1.
2317                    This is a requirement of the algorithm. */
2318                 if (bp->vn_weight_sum && (vn_min_rate == 0))
2319                         vn_min_rate = DEF_MIN_RATE;
2320                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2321                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2322         }
2323
2324         DP(NETIF_MSG_IFUP,
2325            "func %d: vn_min_rate=%d  vn_max_rate=%d  vn_weight_sum=%d\n",
2326            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2327
2328         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2329         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2330
2331         /* global vn counter - maximal Mbps for this vn */
2332         m_rs_vn.vn_counter.rate = vn_max_rate;
2333
2334         /* quota - number of bytes transmitted in this period */
2335         m_rs_vn.vn_counter.quota =
2336                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2337
2338         if (bp->vn_weight_sum) {
2339                 /* credit for each period of the fairness algorithm:
2340                    number of bytes in T_FAIR (the vn share the port rate).
2341                    vn_weight_sum should not be larger than 10000, thus
2342                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2343                    than zero */
2344                 m_fair_vn.vn_credit_delta =
2345                         max((u32)(vn_min_rate * (T_FAIR_COEF /
2346                                                  (8 * bp->vn_weight_sum))),
2347                             (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2348                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2349                    m_fair_vn.vn_credit_delta);
2350         }
2351
2352         /* Store it to internal memory */
2353         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2354                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2355                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2356                        ((u32 *)(&m_rs_vn))[i]);
2357
2358         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2359                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2360                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2361                        ((u32 *)(&m_fair_vn))[i]);
2362 }
2363
2364
2365 /* This function is called upon link interrupt */
2366 static void bnx2x_link_attn(struct bnx2x *bp)
2367 {
2368         /* Make sure that we are synced with the current statistics */
2369         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2370
2371         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2372
2373         if (bp->link_vars.link_up) {
2374
2375                 /* dropless flow control */
2376                 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
2377                         int port = BP_PORT(bp);
2378                         u32 pause_enabled = 0;
2379
2380                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2381                                 pause_enabled = 1;
2382
2383                         REG_WR(bp, BAR_USTRORM_INTMEM +
2384                                USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2385                                pause_enabled);
2386                 }
2387
2388                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2389                         struct host_port_stats *pstats;
2390
2391                         pstats = bnx2x_sp(bp, port_stats);
2392                         /* reset old bmac stats */
2393                         memset(&(pstats->mac_stx[0]), 0,
2394                                sizeof(struct mac_stx));
2395                 }
2396                 if ((bp->state == BNX2X_STATE_OPEN) ||
2397                     (bp->state == BNX2X_STATE_DISABLED))
2398                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2399         }
2400
2401         /* indicate link status */
2402         bnx2x_link_report(bp);
2403
2404         if (IS_E1HMF(bp)) {
2405                 int port = BP_PORT(bp);
2406                 int func;
2407                 int vn;
2408
2409                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2410                         if (vn == BP_E1HVN(bp))
2411                                 continue;
2412
2413                         func = ((vn << 1) | port);
2414
2415                         /* Set the attention towards other drivers
2416                            on the same port */
2417                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2418                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2419                 }
2420
2421                 if (bp->link_vars.link_up) {
2422                         int i;
2423
2424                         /* Init rate shaping and fairness contexts */
2425                         bnx2x_init_port_minmax(bp);
2426
2427                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2428                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2429
2430                         /* Store it to internal memory */
2431                         for (i = 0;
2432                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
2433                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2434                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2435                                        ((u32 *)(&bp->cmng))[i]);
2436                 }
2437         }
2438 }
2439
2440 static void bnx2x__link_status_update(struct bnx2x *bp)
2441 {
2442         int func = BP_FUNC(bp);
2443
2444         if (bp->state != BNX2X_STATE_OPEN)
2445                 return;
2446
2447         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2448
2449         if (bp->link_vars.link_up)
2450                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2451         else
2452                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2453
2454         bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2455         bnx2x_calc_vn_weight_sum(bp);
2456
2457         /* indicate link status */
2458         bnx2x_link_report(bp);
2459 }
2460
2461 static void bnx2x_pmf_update(struct bnx2x *bp)
2462 {
2463         int port = BP_PORT(bp);
2464         u32 val;
2465
2466         bp->port.pmf = 1;
2467         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2468
2469         /* enable nig attention */
2470         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2471         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2472         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2473
2474         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2475 }
2476
2477 /* end of Link */
2478
2479 /* slow path */
2480
2481 /*
2482  * General service functions
2483  */
2484
2485 /* send the MCP a request, block until there is a reply */
2486 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2487 {
2488         int func = BP_FUNC(bp);
2489         u32 seq = ++bp->fw_seq;
2490         u32 rc = 0;
2491         u32 cnt = 1;
2492         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2493
2494         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2495         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2496
2497         do {
2498                 /* let the FW do it's magic ... */
2499                 msleep(delay);
2500
2501                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2502
2503                 /* Give the FW up to 2 second (200*10ms) */
2504         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
2505
2506         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2507            cnt*delay, rc, seq);
2508
2509         /* is this a reply to our command? */
2510         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2511                 rc &= FW_MSG_CODE_MASK;
2512         else {
2513                 /* FW BUG! */
2514                 BNX2X_ERR("FW failed to respond!\n");
2515                 bnx2x_fw_dump(bp);
2516                 rc = 0;
2517         }
2518
2519         return rc;
2520 }
2521
2522 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2523 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set);
2524 static void bnx2x_set_rx_mode(struct net_device *dev);
2525
2526 static void bnx2x_e1h_disable(struct bnx2x *bp)
2527 {
2528         int port = BP_PORT(bp);
2529         int i;
2530
2531         bp->rx_mode = BNX2X_RX_MODE_NONE;
2532         bnx2x_set_storm_rx_mode(bp);
2533
2534         netif_tx_disable(bp->dev);
2535         bp->dev->trans_start = jiffies; /* prevent tx timeout */
2536
2537         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2538
2539         bnx2x_set_mac_addr_e1h(bp, 0);
2540
2541         for (i = 0; i < MC_HASH_SIZE; i++)
2542                 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
2543
2544         netif_carrier_off(bp->dev);
2545 }
2546
2547 static void bnx2x_e1h_enable(struct bnx2x *bp)
2548 {
2549         int port = BP_PORT(bp);
2550
2551         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2552
2553         bnx2x_set_mac_addr_e1h(bp, 1);
2554
2555         /* Tx queue should be only reenabled */
2556         netif_tx_wake_all_queues(bp->dev);
2557
2558         /* Initialize the receive filter. */
2559         bnx2x_set_rx_mode(bp->dev);
2560 }
2561
2562 static void bnx2x_update_min_max(struct bnx2x *bp)
2563 {
2564         int port = BP_PORT(bp);
2565         int vn, i;
2566
2567         /* Init rate shaping and fairness contexts */
2568         bnx2x_init_port_minmax(bp);
2569
2570         bnx2x_calc_vn_weight_sum(bp);
2571
2572         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2573                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2574
2575         if (bp->port.pmf) {
2576                 int func;
2577
2578                 /* Set the attention towards other drivers on the same port */
2579                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2580                         if (vn == BP_E1HVN(bp))
2581                                 continue;
2582
2583                         func = ((vn << 1) | port);
2584                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2585                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2586                 }
2587
2588                 /* Store it to internal memory */
2589                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2590                         REG_WR(bp, BAR_XSTRORM_INTMEM +
2591                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2592                                ((u32 *)(&bp->cmng))[i]);
2593         }
2594 }
2595
2596 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2597 {
2598         int func = BP_FUNC(bp);
2599
2600         DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2601         bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2602
2603         if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2604
2605                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2606                         DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2607                         bp->state = BNX2X_STATE_DISABLED;
2608
2609                         bnx2x_e1h_disable(bp);
2610                 } else {
2611                         DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2612                         bp->state = BNX2X_STATE_OPEN;
2613
2614                         bnx2x_e1h_enable(bp);
2615                 }
2616                 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2617         }
2618         if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2619
2620                 bnx2x_update_min_max(bp);
2621                 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2622         }
2623
2624         /* Report results to MCP */
2625         if (dcc_event)
2626                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2627         else
2628                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2629 }
2630
2631 /* the slow path queue is odd since completions arrive on the fastpath ring */
2632 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2633                          u32 data_hi, u32 data_lo, int common)
2634 {
2635         int func = BP_FUNC(bp);
2636
2637         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2638            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2639            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2640            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2641            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2642
2643 #ifdef BNX2X_STOP_ON_ERROR
2644         if (unlikely(bp->panic))
2645                 return -EIO;
2646 #endif
2647
2648         spin_lock_bh(&bp->spq_lock);
2649
2650         if (!bp->spq_left) {
2651                 BNX2X_ERR("BUG! SPQ ring full!\n");
2652                 spin_unlock_bh(&bp->spq_lock);
2653                 bnx2x_panic();
2654                 return -EBUSY;
2655         }
2656
2657         /* CID needs port number to be encoded int it */
2658         bp->spq_prod_bd->hdr.conn_and_cmd_data =
2659                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2660                                      HW_CID(bp, cid)));
2661         bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2662         if (common)
2663                 bp->spq_prod_bd->hdr.type |=
2664                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2665
2666         bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2667         bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2668
2669         bp->spq_left--;
2670
2671         if (bp->spq_prod_bd == bp->spq_last_bd) {
2672                 bp->spq_prod_bd = bp->spq;
2673                 bp->spq_prod_idx = 0;
2674                 DP(NETIF_MSG_TIMER, "end of spq\n");
2675
2676         } else {
2677                 bp->spq_prod_bd++;
2678                 bp->spq_prod_idx++;
2679         }
2680
2681         /* Make sure that BD data is updated before writing the producer */
2682         wmb();
2683
2684         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2685                bp->spq_prod_idx);
2686
2687         mmiowb();
2688
2689         spin_unlock_bh(&bp->spq_lock);
2690         return 0;
2691 }
2692
2693 /* acquire split MCP access lock register */
2694 static int bnx2x_acquire_alr(struct bnx2x *bp)
2695 {
2696         u32 i, j, val;
2697         int rc = 0;
2698
2699         might_sleep();
2700         i = 100;
2701         for (j = 0; j < i*10; j++) {
2702                 val = (1UL << 31);
2703                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2704                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2705                 if (val & (1L << 31))
2706                         break;
2707
2708                 msleep(5);
2709         }
2710         if (!(val & (1L << 31))) {
2711                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2712                 rc = -EBUSY;
2713         }
2714
2715         return rc;
2716 }
2717
2718 /* release split MCP access lock register */
2719 static void bnx2x_release_alr(struct bnx2x *bp)
2720 {
2721         u32 val = 0;
2722
2723         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2724 }
2725
2726 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2727 {
2728         struct host_def_status_block *def_sb = bp->def_status_blk;
2729         u16 rc = 0;
2730
2731         barrier(); /* status block is written to by the chip */
2732         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2733                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2734                 rc |= 1;
2735         }
2736         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2737                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2738                 rc |= 2;
2739         }
2740         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2741                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2742                 rc |= 4;
2743         }
2744         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2745                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2746                 rc |= 8;
2747         }
2748         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2749                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2750                 rc |= 16;
2751         }
2752         return rc;
2753 }
2754
2755 /*
2756  * slow path service functions
2757  */
2758
2759 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2760 {
2761         int port = BP_PORT(bp);
2762         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2763                        COMMAND_REG_ATTN_BITS_SET);
2764         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2765                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2766         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2767                                        NIG_REG_MASK_INTERRUPT_PORT0;
2768         u32 aeu_mask;
2769         u32 nig_mask = 0;
2770
2771         if (bp->attn_state & asserted)
2772                 BNX2X_ERR("IGU ERROR\n");
2773
2774         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2775         aeu_mask = REG_RD(bp, aeu_addr);
2776
2777         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2778            aeu_mask, asserted);
2779         aeu_mask &= ~(asserted & 0xff);
2780         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2781
2782         REG_WR(bp, aeu_addr, aeu_mask);
2783         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2784
2785         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2786         bp->attn_state |= asserted;
2787         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2788
2789         if (asserted & ATTN_HARD_WIRED_MASK) {
2790                 if (asserted & ATTN_NIG_FOR_FUNC) {
2791
2792                         bnx2x_acquire_phy_lock(bp);
2793
2794                         /* save nig interrupt mask */
2795                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2796                         REG_WR(bp, nig_int_mask_addr, 0);
2797
2798                         bnx2x_link_attn(bp);
2799
2800                         /* handle unicore attn? */
2801                 }
2802                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2803                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2804
2805                 if (asserted & GPIO_2_FUNC)
2806                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2807
2808                 if (asserted & GPIO_3_FUNC)
2809                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2810
2811                 if (asserted & GPIO_4_FUNC)
2812                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2813
2814                 if (port == 0) {
2815                         if (asserted & ATTN_GENERAL_ATTN_1) {
2816                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2817                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2818                         }
2819                         if (asserted & ATTN_GENERAL_ATTN_2) {
2820                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2821                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2822                         }
2823                         if (asserted & ATTN_GENERAL_ATTN_3) {
2824                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2825                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2826                         }
2827                 } else {
2828                         if (asserted & ATTN_GENERAL_ATTN_4) {
2829                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2830                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2831                         }
2832                         if (asserted & ATTN_GENERAL_ATTN_5) {
2833                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2834                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2835                         }
2836                         if (asserted & ATTN_GENERAL_ATTN_6) {
2837                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2838                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2839                         }
2840                 }
2841
2842         } /* if hardwired */
2843
2844         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2845            asserted, hc_addr);
2846         REG_WR(bp, hc_addr, asserted);
2847
2848         /* now set back the mask */
2849         if (asserted & ATTN_NIG_FOR_FUNC) {
2850                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2851                 bnx2x_release_phy_lock(bp);
2852         }
2853 }
2854
2855 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2856 {
2857         int port = BP_PORT(bp);
2858
2859         /* mark the failure */
2860         bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2861         bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2862         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2863                  bp->link_params.ext_phy_config);
2864
2865         /* log the failure */
2866         printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2867                " the driver to shutdown the card to prevent permanent"
2868                " damage.  Please contact Dell Support for assistance\n",
2869                bp->dev->name);
2870 }
2871 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2872 {
2873         int port = BP_PORT(bp);
2874         int reg_offset;
2875         u32 val, swap_val, swap_override;
2876
2877         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2878                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2879
2880         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2881
2882                 val = REG_RD(bp, reg_offset);
2883                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2884                 REG_WR(bp, reg_offset, val);
2885
2886                 BNX2X_ERR("SPIO5 hw attention\n");
2887
2888                 /* Fan failure attention */
2889                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2890                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2891                         /* Low power mode is controlled by GPIO 2 */
2892                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2893                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2894                         /* The PHY reset is controlled by GPIO 1 */
2895                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2896                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2897                         break;
2898
2899                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2900                         /* The PHY reset is controlled by GPIO 1 */
2901                         /* fake the port number to cancel the swap done in
2902                            set_gpio() */
2903                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2904                         swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2905                         port = (swap_val && swap_override) ^ 1;
2906                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2907                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2908                         break;
2909
2910                 default:
2911                         break;
2912                 }
2913                 bnx2x_fan_failure(bp);
2914         }
2915
2916         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2917                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2918                 bnx2x_acquire_phy_lock(bp);
2919                 bnx2x_handle_module_detect_int(&bp->link_params);
2920                 bnx2x_release_phy_lock(bp);
2921         }
2922
2923         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2924
2925                 val = REG_RD(bp, reg_offset);
2926                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2927                 REG_WR(bp, reg_offset, val);
2928
2929                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2930                           (attn & HW_INTERRUT_ASSERT_SET_0));
2931                 bnx2x_panic();
2932         }
2933 }
2934
2935 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2936 {
2937         u32 val;
2938
2939         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2940
2941                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2942                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2943                 /* DORQ discard attention */
2944                 if (val & 0x2)
2945                         BNX2X_ERR("FATAL error from DORQ\n");
2946         }
2947
2948         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2949
2950                 int port = BP_PORT(bp);
2951                 int reg_offset;
2952
2953                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2954                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2955
2956                 val = REG_RD(bp, reg_offset);
2957                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2958                 REG_WR(bp, reg_offset, val);
2959
2960                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2961                           (attn & HW_INTERRUT_ASSERT_SET_1));
2962                 bnx2x_panic();
2963         }
2964 }
2965
2966 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2967 {
2968         u32 val;
2969
2970         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2971
2972                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2973                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2974                 /* CFC error attention */
2975                 if (val & 0x2)
2976                         BNX2X_ERR("FATAL error from CFC\n");
2977         }
2978
2979         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2980
2981                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2982                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2983                 /* RQ_USDMDP_FIFO_OVERFLOW */
2984                 if (val & 0x18000)
2985                         BNX2X_ERR("FATAL error from PXP\n");
2986         }
2987
2988         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2989
2990                 int port = BP_PORT(bp);
2991                 int reg_offset;
2992
2993                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2994                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2995
2996                 val = REG_RD(bp, reg_offset);
2997                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2998                 REG_WR(bp, reg_offset, val);
2999
3000                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3001                           (attn & HW_INTERRUT_ASSERT_SET_2));
3002                 bnx2x_panic();
3003         }
3004 }
3005
3006 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3007 {
3008         u32 val;
3009
3010         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3011
3012                 if (attn & BNX2X_PMF_LINK_ASSERT) {
3013                         int func = BP_FUNC(bp);
3014
3015                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3016                         val = SHMEM_RD(bp, func_mb[func].drv_status);
3017                         if (val & DRV_STATUS_DCC_EVENT_MASK)
3018                                 bnx2x_dcc_event(bp,
3019                                             (val & DRV_STATUS_DCC_EVENT_MASK));
3020                         bnx2x__link_status_update(bp);
3021                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3022                                 bnx2x_pmf_update(bp);
3023
3024                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3025
3026                         BNX2X_ERR("MC assert!\n");
3027                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3028                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3029                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3030                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3031                         bnx2x_panic();
3032
3033                 } else if (attn & BNX2X_MCP_ASSERT) {
3034
3035                         BNX2X_ERR("MCP assert!\n");
3036                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3037                         bnx2x_fw_dump(bp);
3038
3039                 } else
3040                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3041         }
3042
3043         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3044                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3045                 if (attn & BNX2X_GRC_TIMEOUT) {
3046                         val = CHIP_IS_E1H(bp) ?
3047                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3048                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
3049                 }
3050                 if (attn & BNX2X_GRC_RSV) {
3051                         val = CHIP_IS_E1H(bp) ?
3052                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3053                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
3054                 }
3055                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3056         }
3057 }
3058
3059 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3060 {
3061         struct attn_route attn;
3062         struct attn_route group_mask;
3063         int port = BP_PORT(bp);
3064         int index;
3065         u32 reg_addr;
3066         u32 val;
3067         u32 aeu_mask;
3068
3069         /* need to take HW lock because MCP or other port might also
3070            try to handle this event */
3071         bnx2x_acquire_alr(bp);
3072
3073         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3074         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3075         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3076         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3077         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3078            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3079
3080         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3081                 if (deasserted & (1 << index)) {
3082                         group_mask = bp->attn_group[index];
3083
3084                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3085                            index, group_mask.sig[0], group_mask.sig[1],
3086                            group_mask.sig[2], group_mask.sig[3]);
3087
3088                         bnx2x_attn_int_deasserted3(bp,
3089                                         attn.sig[3] & group_mask.sig[3]);
3090                         bnx2x_attn_int_deasserted1(bp,
3091                                         attn.sig[1] & group_mask.sig[1]);
3092                         bnx2x_attn_int_deasserted2(bp,
3093                                         attn.sig[2] & group_mask.sig[2]);
3094                         bnx2x_attn_int_deasserted0(bp,
3095                                         attn.sig[0] & group_mask.sig[0]);
3096
3097                         if ((attn.sig[0] & group_mask.sig[0] &
3098                                                 HW_PRTY_ASSERT_SET_0) ||
3099                             (attn.sig[1] & group_mask.sig[1] &
3100                                                 HW_PRTY_ASSERT_SET_1) ||
3101                             (attn.sig[2] & group_mask.sig[2] &
3102                                                 HW_PRTY_ASSERT_SET_2))
3103                                 BNX2X_ERR("FATAL HW block parity attention\n");
3104                 }
3105         }
3106
3107         bnx2x_release_alr(bp);
3108
3109         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3110
3111         val = ~deasserted;
3112         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3113            val, reg_addr);
3114         REG_WR(bp, reg_addr, val);
3115
3116         if (~bp->attn_state & deasserted)
3117                 BNX2X_ERR("IGU ERROR\n");
3118
3119         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3120                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
3121
3122         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3123         aeu_mask = REG_RD(bp, reg_addr);
3124
3125         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
3126            aeu_mask, deasserted);
3127         aeu_mask |= (deasserted & 0xff);
3128         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3129
3130         REG_WR(bp, reg_addr, aeu_mask);
3131         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3132
3133         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3134         bp->attn_state &= ~deasserted;
3135         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3136 }
3137
3138 static void bnx2x_attn_int(struct bnx2x *bp)
3139 {
3140         /* read local copy of bits */
3141         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3142                                                                 attn_bits);
3143         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3144                                                                 attn_bits_ack);
3145         u32 attn_state = bp->attn_state;
3146
3147         /* look for changed bits */
3148         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
3149         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
3150
3151         DP(NETIF_MSG_HW,
3152            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
3153            attn_bits, attn_ack, asserted, deasserted);
3154
3155         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3156                 BNX2X_ERR("BAD attention state\n");
3157
3158         /* handle bits that were raised */
3159         if (asserted)
3160                 bnx2x_attn_int_asserted(bp, asserted);
3161
3162         if (deasserted)
3163                 bnx2x_attn_int_deasserted(bp, deasserted);
3164 }
3165
3166 static void bnx2x_sp_task(struct work_struct *work)
3167 {
3168         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3169         u16 status;
3170
3171
3172         /* Return here if interrupt is disabled */
3173         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3174                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3175                 return;
3176         }
3177
3178         status = bnx2x_update_dsb_idx(bp);
3179 /*      if (status == 0)                                     */
3180 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
3181
3182         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
3183
3184         /* HW attentions */
3185         if (status & 0x1)
3186                 bnx2x_attn_int(bp);
3187
3188         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3189                      IGU_INT_NOP, 1);
3190         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3191                      IGU_INT_NOP, 1);
3192         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3193                      IGU_INT_NOP, 1);
3194         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3195                      IGU_INT_NOP, 1);
3196         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3197                      IGU_INT_ENABLE, 1);
3198
3199 }
3200
3201 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3202 {
3203         struct net_device *dev = dev_instance;
3204         struct bnx2x *bp = netdev_priv(dev);
3205
3206         /* Return here if interrupt is disabled */
3207         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3208                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3209                 return IRQ_HANDLED;
3210         }
3211
3212         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3213
3214 #ifdef BNX2X_STOP_ON_ERROR
3215         if (unlikely(bp->panic))
3216                 return IRQ_HANDLED;
3217 #endif
3218
3219         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3220
3221         return IRQ_HANDLED;
3222 }
3223
3224 /* end of slow path */
3225
3226 /* Statistics */
3227
3228 /****************************************************************************
3229 * Macros
3230 ****************************************************************************/
3231
3232 /* sum[hi:lo] += add[hi:lo] */
3233 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3234         do { \
3235                 s_lo += a_lo; \
3236                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3237         } while (0)
3238
3239 /* difference = minuend - subtrahend */
3240 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3241         do { \
3242                 if (m_lo < s_lo) { \
3243                         /* underflow */ \
3244                         d_hi = m_hi - s_hi; \
3245                         if (d_hi > 0) { \
3246                                 /* we can 'loan' 1 */ \
3247                                 d_hi--; \
3248                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3249                         } else { \
3250                                 /* m_hi <= s_hi */ \
3251                                 d_hi = 0; \
3252                                 d_lo = 0; \
3253                         } \
3254                 } else { \
3255                         /* m_lo >= s_lo */ \
3256                         if (m_hi < s_hi) { \
3257                                 d_hi = 0; \
3258                                 d_lo = 0; \
3259                         } else { \
3260                                 /* m_hi >= s_hi */ \
3261                                 d_hi = m_hi - s_hi; \
3262                                 d_lo = m_lo - s_lo; \
3263                         } \
3264                 } \
3265         } while (0)
3266
3267 #define UPDATE_STAT64(s, t) \
3268         do { \
3269                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3270                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3271                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3272                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3273                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3274                        pstats->mac_stx[1].t##_lo, diff.lo); \
3275         } while (0)
3276
3277 #define UPDATE_STAT64_NIG(s, t) \
3278         do { \
3279                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3280                         diff.lo, new->s##_lo, old->s##_lo); \
3281                 ADD_64(estats->t##_hi, diff.hi, \
3282                        estats->t##_lo, diff.lo); \
3283         } while (0)
3284
3285 /* sum[hi:lo] += add */
3286 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3287         do { \
3288                 s_lo += a; \
3289                 s_hi += (s_lo < a) ? 1 : 0; \
3290         } while (0)
3291
3292 #define UPDATE_EXTEND_STAT(s) \
3293         do { \
3294                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3295                               pstats->mac_stx[1].s##_lo, \
3296                               new->s); \
3297         } while (0)
3298
3299 #define UPDATE_EXTEND_TSTAT(s, t) \
3300         do { \
3301                 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3302                 old_tclient->s = tclient->s; \
3303                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3304         } while (0)
3305
3306 #define UPDATE_EXTEND_USTAT(s, t) \
3307         do { \
3308                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3309                 old_uclient->s = uclient->s; \
3310                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3311         } while (0)
3312
3313 #define UPDATE_EXTEND_XSTAT(s, t) \
3314         do { \
3315                 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3316                 old_xclient->s = xclient->s; \
3317                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3318         } while (0)
3319
3320 /* minuend -= subtrahend */
3321 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3322         do { \
3323                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3324         } while (0)
3325
3326 /* minuend[hi:lo] -= subtrahend */
3327 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3328         do { \
3329                 SUB_64(m_hi, 0, m_lo, s); \
3330         } while (0)
3331
3332 #define SUB_EXTEND_USTAT(s, t) \
3333         do { \
3334                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3335                 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3336         } while (0)
3337
3338 /*
3339  * General service functions
3340  */
3341
3342 static inline long bnx2x_hilo(u32 *hiref)
3343 {
3344         u32 lo = *(hiref + 1);
3345 #if (BITS_PER_LONG == 64)
3346         u32 hi = *hiref;
3347
3348         return HILO_U64(hi, lo);
3349 #else
3350         return lo;
3351 #endif
3352 }
3353
3354 /*
3355  * Init service functions
3356  */
3357
3358 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3359 {
3360         if (!bp->stats_pending) {
3361                 struct eth_query_ramrod_data ramrod_data = {0};
3362                 int i, rc;
3363
3364                 ramrod_data.drv_counter = bp->stats_counter++;
3365                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3366                 for_each_queue(bp, i)
3367                         ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3368
3369                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3370                                    ((u32 *)&ramrod_data)[1],
3371                                    ((u32 *)&ramrod_data)[0], 0);
3372                 if (rc == 0) {
3373                         /* stats ramrod has it's own slot on the spq */
3374                         bp->spq_left++;
3375                         bp->stats_pending = 1;
3376                 }
3377         }
3378 }
3379
3380 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3381 {
3382         struct dmae_command *dmae = &bp->stats_dmae;
3383         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3384
3385         *stats_comp = DMAE_COMP_VAL;
3386         if (CHIP_REV_IS_SLOW(bp))
3387                 return;
3388
3389         /* loader */
3390         if (bp->executer_idx) {
3391                 int loader_idx = PMF_DMAE_C(bp);
3392
3393                 memset(dmae, 0, sizeof(struct dmae_command));
3394
3395                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3396                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3397                                 DMAE_CMD_DST_RESET |
3398 #ifdef __BIG_ENDIAN
3399                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3400 #else
3401                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3402 #endif
3403                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3404                                                DMAE_CMD_PORT_0) |
3405                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3406                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3407                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3408                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3409                                      sizeof(struct dmae_command) *
3410                                      (loader_idx + 1)) >> 2;
3411                 dmae->dst_addr_hi = 0;
3412                 dmae->len = sizeof(struct dmae_command) >> 2;
3413                 if (CHIP_IS_E1(bp))
3414                         dmae->len--;
3415                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3416                 dmae->comp_addr_hi = 0;
3417                 dmae->comp_val = 1;
3418
3419                 *stats_comp = 0;
3420                 bnx2x_post_dmae(bp, dmae, loader_idx);
3421
3422         } else if (bp->func_stx) {
3423                 *stats_comp = 0;
3424                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3425         }
3426 }
3427
3428 static int bnx2x_stats_comp(struct bnx2x *bp)
3429 {
3430         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3431         int cnt = 10;
3432
3433         might_sleep();
3434         while (*stats_comp != DMAE_COMP_VAL) {
3435                 if (!cnt) {
3436                         BNX2X_ERR("timeout waiting for stats finished\n");
3437                         break;
3438                 }
3439                 cnt--;
3440                 msleep(1);
3441         }
3442         return 1;
3443 }
3444
3445 /*
3446  * Statistics service functions
3447  */
3448
3449 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3450 {
3451         struct dmae_command *dmae;
3452         u32 opcode;
3453         int loader_idx = PMF_DMAE_C(bp);
3454         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3455
3456         /* sanity */
3457         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3458                 BNX2X_ERR("BUG!\n");
3459                 return;
3460         }
3461
3462         bp->executer_idx = 0;
3463
3464         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3465                   DMAE_CMD_C_ENABLE |
3466                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3467 #ifdef __BIG_ENDIAN
3468                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3469 #else
3470                   DMAE_CMD_ENDIANITY_DW_SWAP |
3471 #endif
3472                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3473                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3474
3475         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3476         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3477         dmae->src_addr_lo = bp->port.port_stx >> 2;
3478         dmae->src_addr_hi = 0;
3479         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3480         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3481         dmae->len = DMAE_LEN32_RD_MAX;
3482         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3483         dmae->comp_addr_hi = 0;
3484         dmae->comp_val = 1;
3485
3486         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3487         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3488         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3489         dmae->src_addr_hi = 0;
3490         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3491                                    DMAE_LEN32_RD_MAX * 4);
3492         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3493                                    DMAE_LEN32_RD_MAX * 4);
3494         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3495         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3496         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3497         dmae->comp_val = DMAE_COMP_VAL;
3498
3499         *stats_comp = 0;
3500         bnx2x_hw_stats_post(bp);
3501         bnx2x_stats_comp(bp);
3502 }
3503
3504 static void bnx2x_port_stats_init(struct bnx2x *bp)
3505 {
3506         struct dmae_command *dmae;
3507         int port = BP_PORT(bp);
3508         int vn = BP_E1HVN(bp);
3509         u32 opcode;
3510         int loader_idx = PMF_DMAE_C(bp);
3511         u32 mac_addr;
3512         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3513
3514         /* sanity */
3515         if (!bp->link_vars.link_up || !bp->port.pmf) {
3516                 BNX2X_ERR("BUG!\n");
3517                 return;
3518         }
3519
3520         bp->executer_idx = 0;
3521
3522         /* MCP */
3523         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3524                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3525                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3526 #ifdef __BIG_ENDIAN
3527                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3528 #else
3529                   DMAE_CMD_ENDIANITY_DW_SWAP |
3530 #endif
3531                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3532                   (vn << DMAE_CMD_E1HVN_SHIFT));
3533
3534         if (bp->port.port_stx) {
3535
3536                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3537                 dmae->opcode = opcode;
3538                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3539                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3540                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3541                 dmae->dst_addr_hi = 0;
3542                 dmae->len = sizeof(struct host_port_stats) >> 2;
3543                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3544                 dmae->comp_addr_hi = 0;
3545                 dmae->comp_val = 1;
3546         }
3547
3548         if (bp->func_stx) {
3549
3550                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3551                 dmae->opcode = opcode;
3552                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3553                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3554                 dmae->dst_addr_lo = bp->func_stx >> 2;
3555                 dmae->dst_addr_hi = 0;
3556                 dmae->len = sizeof(struct host_func_stats) >> 2;
3557                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3558                 dmae->comp_addr_hi = 0;
3559                 dmae->comp_val = 1;
3560         }
3561
3562         /* MAC */
3563         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3564                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3565                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3566 #ifdef __BIG_ENDIAN
3567                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3568 #else
3569                   DMAE_CMD_ENDIANITY_DW_SWAP |
3570 #endif
3571                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3572                   (vn << DMAE_CMD_E1HVN_SHIFT));
3573
3574         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3575
3576                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3577                                    NIG_REG_INGRESS_BMAC0_MEM);
3578
3579                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3580                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3581                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3582                 dmae->opcode = opcode;
3583                 dmae->src_addr_lo = (mac_addr +
3584                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3585                 dmae->src_addr_hi = 0;
3586                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3587                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3588                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3589                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3590                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3591                 dmae->comp_addr_hi = 0;
3592                 dmae->comp_val = 1;
3593
3594                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3595                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3596                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3597                 dmae->opcode = opcode;
3598                 dmae->src_addr_lo = (mac_addr +
3599                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3600                 dmae->src_addr_hi = 0;
3601                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3602                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3603                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3604                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3605                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3606                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3607                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3608                 dmae->comp_addr_hi = 0;
3609                 dmae->comp_val = 1;
3610
3611         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3612
3613                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3614
3615                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3616                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3617                 dmae->opcode = opcode;
3618                 dmae->src_addr_lo = (mac_addr +
3619                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3620                 dmae->src_addr_hi = 0;
3621                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3622                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3623                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3624                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3625                 dmae->comp_addr_hi = 0;
3626                 dmae->comp_val = 1;
3627
3628                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3629                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3630                 dmae->opcode = opcode;
3631                 dmae->src_addr_lo = (mac_addr +
3632                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3633                 dmae->src_addr_hi = 0;
3634                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3635                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3636                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3637                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3638                 dmae->len = 1;
3639                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3640                 dmae->comp_addr_hi = 0;
3641                 dmae->comp_val = 1;
3642
3643                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3644                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3645                 dmae->opcode = opcode;
3646                 dmae->src_addr_lo = (mac_addr +
3647                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3648                 dmae->src_addr_hi = 0;
3649                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3650                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3651                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3652                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3653                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3654                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3655                 dmae->comp_addr_hi = 0;
3656                 dmae->comp_val = 1;
3657         }
3658
3659         /* NIG */
3660         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3661         dmae->opcode = opcode;
3662         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3663                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3664         dmae->src_addr_hi = 0;
3665         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3666         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3667         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3668         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3669         dmae->comp_addr_hi = 0;
3670         dmae->comp_val = 1;
3671
3672         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3673         dmae->opcode = opcode;
3674         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3675                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3676         dmae->src_addr_hi = 0;
3677         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3678                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3679         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3680                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3681         dmae->len = (2*sizeof(u32)) >> 2;
3682         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3683         dmae->comp_addr_hi = 0;
3684         dmae->comp_val = 1;
3685
3686         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3687         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3688                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3689                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3690 #ifdef __BIG_ENDIAN
3691                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3692 #else
3693                         DMAE_CMD_ENDIANITY_DW_SWAP |
3694 #endif
3695                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3696                         (vn << DMAE_CMD_E1HVN_SHIFT));
3697         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3698                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3699         dmae->src_addr_hi = 0;
3700         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3701                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3702         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3703                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3704         dmae->len = (2*sizeof(u32)) >> 2;
3705         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3706         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3707         dmae->comp_val = DMAE_COMP_VAL;
3708
3709         *stats_comp = 0;
3710 }
3711
3712 static void bnx2x_func_stats_init(struct bnx2x *bp)
3713 {
3714         struct dmae_command *dmae = &bp->stats_dmae;
3715         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3716
3717         /* sanity */
3718         if (!bp->func_stx) {
3719                 BNX2X_ERR("BUG!\n");
3720                 return;
3721         }
3722
3723         bp->executer_idx = 0;
3724         memset(dmae, 0, sizeof(struct dmae_command));
3725
3726         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3727                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3728                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3729 #ifdef __BIG_ENDIAN
3730                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3731 #else
3732                         DMAE_CMD_ENDIANITY_DW_SWAP |
3733 #endif
3734                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3735                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3736         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3737         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3738         dmae->dst_addr_lo = bp->func_stx >> 2;
3739         dmae->dst_addr_hi = 0;
3740         dmae->len = sizeof(struct host_func_stats) >> 2;
3741         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3742         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3743         dmae->comp_val = DMAE_COMP_VAL;
3744
3745         *stats_comp = 0;
3746 }
3747
3748 static void bnx2x_stats_start(struct bnx2x *bp)
3749 {
3750         if (bp->port.pmf)
3751                 bnx2x_port_stats_init(bp);
3752
3753         else if (bp->func_stx)
3754                 bnx2x_func_stats_init(bp);
3755
3756         bnx2x_hw_stats_post(bp);
3757         bnx2x_storm_stats_post(bp);
3758 }
3759
3760 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3761 {
3762         bnx2x_stats_comp(bp);
3763         bnx2x_stats_pmf_update(bp);
3764         bnx2x_stats_start(bp);
3765 }
3766
3767 static void bnx2x_stats_restart(struct bnx2x *bp)
3768 {
3769         bnx2x_stats_comp(bp);
3770         bnx2x_stats_start(bp);
3771 }
3772
3773 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3774 {
3775         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3776         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3777         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3778         struct {
3779                 u32 lo;
3780                 u32 hi;
3781         } diff;
3782
3783         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3784         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3785         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3786         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3787         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3788         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3789         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3790         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3791         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3792         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3793         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3794         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3795         UPDATE_STAT64(tx_stat_gt127,
3796                                 tx_stat_etherstatspkts65octetsto127octets);
3797         UPDATE_STAT64(tx_stat_gt255,
3798                                 tx_stat_etherstatspkts128octetsto255octets);
3799         UPDATE_STAT64(tx_stat_gt511,
3800                                 tx_stat_etherstatspkts256octetsto511octets);
3801         UPDATE_STAT64(tx_stat_gt1023,
3802                                 tx_stat_etherstatspkts512octetsto1023octets);
3803         UPDATE_STAT64(tx_stat_gt1518,
3804                                 tx_stat_etherstatspkts1024octetsto1522octets);
3805         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3806         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3807         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3808         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3809         UPDATE_STAT64(tx_stat_gterr,
3810                                 tx_stat_dot3statsinternalmactransmiterrors);
3811         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3812
3813         estats->pause_frames_received_hi =
3814                                 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3815         estats->pause_frames_received_lo =
3816                                 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3817
3818         estats->pause_frames_sent_hi =
3819                                 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3820         estats->pause_frames_sent_lo =
3821                                 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3822 }
3823
3824 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3825 {
3826         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3827         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3828         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3829
3830         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3831         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3832         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3833         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3834         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3835         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3836         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3837         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3838         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3839         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3840         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3841         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3842         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3843         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3844         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3845         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3846         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3847         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3848         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3849         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3850         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3851         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3852         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3853         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3854         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3855         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3856         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3857         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3858         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3859         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3860         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3861
3862         estats->pause_frames_received_hi =
3863                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3864         estats->pause_frames_received_lo =
3865                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3866         ADD_64(estats->pause_frames_received_hi,
3867                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3868                estats->pause_frames_received_lo,
3869                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3870
3871         estats->pause_frames_sent_hi =
3872                         pstats->mac_stx[1].tx_stat_outxonsent_hi;
3873         estats->pause_frames_sent_lo =
3874                         pstats->mac_stx[1].tx_stat_outxonsent_lo;
3875         ADD_64(estats->pause_frames_sent_hi,
3876                pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3877                estats->pause_frames_sent_lo,
3878                pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3879 }
3880
3881 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3882 {
3883         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3884         struct nig_stats *old = &(bp->port.old_nig_stats);
3885         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3886         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3887         struct {
3888                 u32 lo;
3889                 u32 hi;
3890         } diff;
3891         u32 nig_timer_max;
3892
3893         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3894                 bnx2x_bmac_stats_update(bp);
3895
3896         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3897                 bnx2x_emac_stats_update(bp);
3898
3899         else { /* unreached */
3900                 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3901                 return -1;
3902         }
3903
3904         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3905                       new->brb_discard - old->brb_discard);
3906         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3907                       new->brb_truncate - old->brb_truncate);
3908
3909         UPDATE_STAT64_NIG(egress_mac_pkt0,
3910                                         etherstatspkts1024octetsto1522octets);
3911         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3912
3913         memcpy(old, new, sizeof(struct nig_stats));
3914
3915         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3916                sizeof(struct mac_stx));
3917         estats->brb_drop_hi = pstats->brb_drop_hi;
3918         estats->brb_drop_lo = pstats->brb_drop_lo;
3919
3920         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3921
3922         nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3923         if (nig_timer_max != estats->nig_timer_max) {
3924                 estats->nig_timer_max = nig_timer_max;
3925                 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3926         }
3927
3928         return 0;
3929 }
3930
3931 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3932 {
3933         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3934         struct tstorm_per_port_stats *tport =
3935                                         &stats->tstorm_common.port_statistics;
3936         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3937         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3938         int i;
3939
3940         memcpy(&(fstats->total_bytes_received_hi),
3941                &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
3942                sizeof(struct host_func_stats) - 2*sizeof(u32));
3943         estats->error_bytes_received_hi = 0;
3944         estats->error_bytes_received_lo = 0;
3945         estats->etherstatsoverrsizepkts_hi = 0;
3946         estats->etherstatsoverrsizepkts_lo = 0;
3947         estats->no_buff_discard_hi = 0;
3948         estats->no_buff_discard_lo = 0;
3949
3950         for_each_rx_queue(bp, i) {
3951                 struct bnx2x_fastpath *fp = &bp->fp[i];
3952                 int cl_id = fp->cl_id;
3953                 struct tstorm_per_client_stats *tclient =
3954                                 &stats->tstorm_common.client_statistics[cl_id];
3955                 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3956                 struct ustorm_per_client_stats *uclient =
3957                                 &stats->ustorm_common.client_statistics[cl_id];
3958                 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3959                 struct xstorm_per_client_stats *xclient =
3960                                 &stats->xstorm_common.client_statistics[cl_id];
3961                 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3962                 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3963                 u32 diff;
3964
3965                 /* are storm stats valid? */
3966                 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3967                                                         bp->stats_counter) {
3968                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3969                            "  xstorm counter (%d) != stats_counter (%d)\n",
3970                            i, xclient->stats_counter, bp->stats_counter);
3971                         return -1;
3972                 }
3973                 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3974                                                         bp->stats_counter) {
3975                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3976                            "  tstorm counter (%d) != stats_counter (%d)\n",
3977                            i, tclient->stats_counter, bp->stats_counter);
3978                         return -2;
3979                 }
3980                 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3981                                                         bp->stats_counter) {
3982                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3983                            "  ustorm counter (%d) != stats_counter (%d)\n",
3984                            i, uclient->stats_counter, bp->stats_counter);
3985                         return -4;
3986                 }
3987
3988                 qstats->total_bytes_received_hi =
3989                         le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3990                 qstats->total_bytes_received_lo =
3991                         le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3992
3993                 ADD_64(qstats->total_bytes_received_hi,
3994                        le32_to_cpu(tclient->rcv_multicast_bytes.hi),
3995                        qstats->total_bytes_received_lo,
3996                        le32_to_cpu(tclient->rcv_multicast_bytes.lo));
3997
3998                 ADD_64(qstats->total_bytes_received_hi,
3999                        le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4000                        qstats->total_bytes_received_lo,
4001                        le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4002
4003                 qstats->valid_bytes_received_hi =
4004                                         qstats->total_bytes_received_hi;
4005                 qstats->valid_bytes_received_lo =
4006                                         qstats->total_bytes_received_lo;
4007
4008                 qstats->error_bytes_received_hi =
4009                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
4010                 qstats->error_bytes_received_lo =
4011                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
4012
4013                 ADD_64(qstats->total_bytes_received_hi,
4014                        qstats->error_bytes_received_hi,
4015                        qstats->total_bytes_received_lo,
4016                        qstats->error_bytes_received_lo);
4017
4018                 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4019                                         total_unicast_packets_received);
4020                 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4021                                         total_multicast_packets_received);
4022                 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4023                                         total_broadcast_packets_received);
4024                 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4025                                         etherstatsoverrsizepkts);
4026                 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4027
4028                 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4029                                         total_unicast_packets_received);
4030                 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4031                                         total_multicast_packets_received);
4032                 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4033                                         total_broadcast_packets_received);
4034                 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4035                 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4036                 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4037
4038                 qstats->total_bytes_transmitted_hi =
4039                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4040                 qstats->total_bytes_transmitted_lo =
4041                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4042
4043                 ADD_64(qstats->total_bytes_transmitted_hi,
4044                        le32_to_cpu(xclient->multicast_bytes_sent.hi),
4045                        qstats->total_bytes_transmitted_lo,
4046                        le32_to_cpu(xclient->multicast_bytes_sent.lo));
4047
4048                 ADD_64(qstats->total_bytes_transmitted_hi,
4049                        le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4050                        qstats->total_bytes_transmitted_lo,
4051                        le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4052
4053                 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4054                                         total_unicast_packets_transmitted);
4055                 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4056                                         total_multicast_packets_transmitted);
4057                 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4058                                         total_broadcast_packets_transmitted);
4059
4060                 old_tclient->checksum_discard = tclient->checksum_discard;
4061                 old_tclient->ttl0_discard = tclient->ttl0_discard;
4062
4063                 ADD_64(fstats->total_bytes_received_hi,
4064                        qstats->total_bytes_received_hi,
4065                        fstats->total_bytes_received_lo,
4066                        qstats->total_bytes_received_lo);
4067                 ADD_64(fstats->total_bytes_transmitted_hi,
4068                        qstats->total_bytes_transmitted_hi,
4069                        fstats->total_bytes_transmitted_lo,
4070                        qstats->total_bytes_transmitted_lo);
4071                 ADD_64(fstats->total_unicast_packets_received_hi,
4072                        qstats->total_unicast_packets_received_hi,
4073                        fstats->total_unicast_packets_received_lo,
4074                        qstats->total_unicast_packets_received_lo);
4075                 ADD_64(fstats->total_multicast_packets_received_hi,
4076                        qstats->total_multicast_packets_received_hi,
4077                        fstats->total_multicast_packets_received_lo,
4078                        qstats->total_multicast_packets_received_lo);
4079                 ADD_64(fstats->total_broadcast_packets_received_hi,
4080                        qstats->total_broadcast_packets_received_hi,
4081                        fstats->total_broadcast_packets_received_lo,
4082                        qstats->total_broadcast_packets_received_lo);
4083                 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4084                        qstats->total_unicast_packets_transmitted_hi,
4085                        fstats->total_unicast_packets_transmitted_lo,
4086                        qstats->total_unicast_packets_transmitted_lo);
4087                 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4088                        qstats->total_multicast_packets_transmitted_hi,
4089                        fstats->total_multicast_packets_transmitted_lo,
4090                        qstats->total_multicast_packets_transmitted_lo);
4091                 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4092                        qstats->total_broadcast_packets_transmitted_hi,
4093                        fstats->total_broadcast_packets_transmitted_lo,
4094                        qstats->total_broadcast_packets_transmitted_lo);
4095                 ADD_64(fstats->valid_bytes_received_hi,
4096                        qstats->valid_bytes_received_hi,
4097                        fstats->valid_bytes_received_lo,
4098                        qstats->valid_bytes_received_lo);
4099
4100                 ADD_64(estats->error_bytes_received_hi,
4101                        qstats->error_bytes_received_hi,
4102                        estats->error_bytes_received_lo,
4103                        qstats->error_bytes_received_lo);
4104                 ADD_64(estats->etherstatsoverrsizepkts_hi,
4105                        qstats->etherstatsoverrsizepkts_hi,
4106                        estats->etherstatsoverrsizepkts_lo,
4107                        qstats->etherstatsoverrsizepkts_lo);
4108                 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4109                        estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4110         }
4111
4112         ADD_64(fstats->total_bytes_received_hi,
4113                estats->rx_stat_ifhcinbadoctets_hi,
4114                fstats->total_bytes_received_lo,
4115                estats->rx_stat_ifhcinbadoctets_lo);
4116
4117         memcpy(estats, &(fstats->total_bytes_received_hi),
4118                sizeof(struct host_func_stats) - 2*sizeof(u32));
4119
4120         ADD_64(estats->etherstatsoverrsizepkts_hi,
4121                estats->rx_stat_dot3statsframestoolong_hi,
4122                estats->etherstatsoverrsizepkts_lo,
4123                estats->rx_stat_dot3statsframestoolong_lo);
4124         ADD_64(estats->error_bytes_received_hi,
4125                estats->rx_stat_ifhcinbadoctets_hi,
4126                estats->error_bytes_received_lo,
4127                estats->rx_stat_ifhcinbadoctets_lo);
4128
4129         if (bp->port.pmf) {
4130                 estats->mac_filter_discard =
4131                                 le32_to_cpu(tport->mac_filter_discard);
4132                 estats->xxoverflow_discard =
4133                                 le32_to_cpu(tport->xxoverflow_discard);
4134                 estats->brb_truncate_discard =
4135                                 le32_to_cpu(tport->brb_truncate_discard);
4136                 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4137         }
4138
4139         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4140
4141         bp->stats_pending = 0;
4142
4143         return 0;
4144 }
4145
4146 static void bnx2x_net_stats_update(struct bnx2x *bp)
4147 {
4148         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4149         struct net_device_stats *nstats = &bp->dev->stats;
4150         int i;
4151
4152         nstats->rx_packets =
4153                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4154                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4155                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4156
4157         nstats->tx_packets =
4158                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4159                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4160                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4161
4162         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4163
4164         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4165
4166         nstats->rx_dropped = estats->mac_discard;
4167         for_each_rx_queue(bp, i)
4168                 nstats->rx_dropped +=
4169                         le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4170
4171         nstats->tx_dropped = 0;
4172
4173         nstats->multicast =
4174                 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4175
4176         nstats->collisions =
4177                 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4178
4179         nstats->rx_length_errors =
4180                 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4181                 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4182         nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4183                                  bnx2x_hilo(&estats->brb_truncate_hi);
4184         nstats->rx_crc_errors =
4185                 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4186         nstats->rx_frame_errors =
4187                 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4188         nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4189         nstats->rx_missed_errors = estats->xxoverflow_discard;
4190
4191         nstats->rx_errors = nstats->rx_length_errors +
4192                             nstats->rx_over_errors +
4193                             nstats->rx_crc_errors +
4194                             nstats->rx_frame_errors +
4195                             nstats->rx_fifo_errors +
4196                             nstats->rx_missed_errors;
4197
4198         nstats->tx_aborted_errors =
4199                 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4200                 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4201         nstats->tx_carrier_errors =
4202                 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4203         nstats->tx_fifo_errors = 0;
4204         nstats->tx_heartbeat_errors = 0;
4205         nstats->tx_window_errors = 0;
4206
4207         nstats->tx_errors = nstats->tx_aborted_errors +
4208                             nstats->tx_carrier_errors +
4209             bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4210 }
4211
4212 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4213 {
4214         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4215         int i;
4216
4217         estats->driver_xoff = 0;
4218         estats->rx_err_discard_pkt = 0;
4219         estats->rx_skb_alloc_failed = 0;
4220         estats->hw_csum_err = 0;
4221         for_each_rx_queue(bp, i) {
4222                 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4223
4224                 estats->driver_xoff += qstats->driver_xoff;
4225                 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4226                 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4227                 estats->hw_csum_err += qstats->hw_csum_err;
4228         }
4229 }
4230
4231 static void bnx2x_stats_update(struct bnx2x *bp)
4232 {
4233         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4234
4235         if (*stats_comp != DMAE_COMP_VAL)
4236                 return;
4237
4238         if (bp->port.pmf)
4239                 bnx2x_hw_stats_update(bp);
4240
4241         if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4242                 BNX2X_ERR("storm stats were not updated for 3 times\n");
4243                 bnx2x_panic();
4244                 return;
4245         }
4246
4247         bnx2x_net_stats_update(bp);
4248         bnx2x_drv_stats_update(bp);
4249
4250         if (bp->msglevel & NETIF_MSG_TIMER) {
4251                 struct bnx2x_fastpath *fp0_rx = bp->fp;
4252                 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
4253                 struct tstorm_per_client_stats *old_tclient =
4254                                                         &bp->fp->old_tclient;
4255                 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4256                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4257                 struct net_device_stats *nstats = &bp->dev->stats;
4258                 int i;
4259
4260                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4261                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
4262                                   "  tx pkt (%lx)\n",
4263                        bnx2x_tx_avail(fp0_tx),
4264                        le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
4265                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
4266                                   "  rx pkt (%lx)\n",
4267                        (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4268                              fp0_rx->rx_comp_cons),
4269                        le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
4270                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u  "
4271                                   "brb truncate %u\n",
4272                        (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4273                        qstats->driver_xoff,
4274                        estats->brb_drop_lo, estats->brb_truncate_lo);
4275                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
4276                         "packets_too_big_discard %lu  no_buff_discard %lu  "
4277                         "mac_discard %u  mac_filter_discard %u  "
4278                         "xxovrflow_discard %u  brb_truncate_discard %u  "
4279                         "ttl0_discard %u\n",
4280                        le32_to_cpu(old_tclient->checksum_discard),
4281                        bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4282                        bnx2x_hilo(&qstats->no_buff_discard_hi),
4283                        estats->mac_discard, estats->mac_filter_discard,
4284                        estats->xxoverflow_discard, estats->brb_truncate_discard,
4285                        le32_to_cpu(old_tclient->ttl0_discard));
4286
4287                 for_each_queue(bp, i) {
4288                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4289                                bnx2x_fp(bp, i, tx_pkt),
4290                                bnx2x_fp(bp, i, rx_pkt),
4291                                bnx2x_fp(bp, i, rx_calls));
4292                 }
4293         }
4294
4295         bnx2x_hw_stats_post(bp);
4296         bnx2x_storm_stats_post(bp);
4297 }
4298
4299 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4300 {
4301         struct dmae_command *dmae;
4302         u32 opcode;
4303         int loader_idx = PMF_DMAE_C(bp);
4304         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4305
4306         bp->executer_idx = 0;
4307
4308         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4309                   DMAE_CMD_C_ENABLE |
4310                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4311 #ifdef __BIG_ENDIAN
4312                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4313 #else
4314                   DMAE_CMD_ENDIANITY_DW_SWAP |
4315 #endif
4316                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4317                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4318
4319         if (bp->port.port_stx) {
4320
4321                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4322                 if (bp->func_stx)
4323                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4324                 else
4325                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4326                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4327                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4328                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4329                 dmae->dst_addr_hi = 0;
4330                 dmae->len = sizeof(struct host_port_stats) >> 2;
4331                 if (bp->func_stx) {
4332                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4333                         dmae->comp_addr_hi = 0;
4334                         dmae->comp_val = 1;
4335                 } else {
4336                         dmae->comp_addr_lo =
4337                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4338                         dmae->comp_addr_hi =
4339                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4340                         dmae->comp_val = DMAE_COMP_VAL;
4341
4342                         *stats_comp = 0;
4343                 }
4344         }
4345
4346         if (bp->func_stx) {
4347
4348                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4349                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4350                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4351                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4352                 dmae->dst_addr_lo = bp->func_stx >> 2;
4353                 dmae->dst_addr_hi = 0;
4354                 dmae->len = sizeof(struct host_func_stats) >> 2;
4355                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4356                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4357                 dmae->comp_val = DMAE_COMP_VAL;
4358
4359                 *stats_comp = 0;
4360         }
4361 }
4362
4363 static void bnx2x_stats_stop(struct bnx2x *bp)
4364 {
4365         int update = 0;
4366
4367         bnx2x_stats_comp(bp);
4368
4369         if (bp->port.pmf)
4370                 update = (bnx2x_hw_stats_update(bp) == 0);
4371
4372         update |= (bnx2x_storm_stats_update(bp) == 0);
4373
4374         if (update) {
4375                 bnx2x_net_stats_update(bp);
4376
4377                 if (bp->port.pmf)
4378                         bnx2x_port_stats_stop(bp);
4379
4380                 bnx2x_hw_stats_post(bp);
4381                 bnx2x_stats_comp(bp);
4382         }
4383 }
4384
4385 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4386 {
4387 }
4388
4389 static const struct {
4390         void (*action)(struct bnx2x *bp);
4391         enum bnx2x_stats_state next_state;
4392 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4393 /* state        event   */
4394 {
4395 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4396 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
4397 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4398 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4399 },
4400 {
4401 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
4402 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
4403 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
4404 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
4405 }
4406 };
4407
4408 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4409 {
4410         enum bnx2x_stats_state state = bp->stats_state;
4411
4412         bnx2x_stats_stm[state][event].action(bp);
4413         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4414
4415         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4416                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4417                    state, event, bp->stats_state);
4418 }
4419
4420 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4421 {
4422         struct dmae_command *dmae;
4423         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4424
4425         /* sanity */
4426         if (!bp->port.pmf || !bp->port.port_stx) {
4427                 BNX2X_ERR("BUG!\n");
4428                 return;
4429         }
4430
4431         bp->executer_idx = 0;
4432
4433         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4434         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4435                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4436                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4437 #ifdef __BIG_ENDIAN
4438                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4439 #else
4440                         DMAE_CMD_ENDIANITY_DW_SWAP |
4441 #endif
4442                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4443                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4444         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4445         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4446         dmae->dst_addr_lo = bp->port.port_stx >> 2;
4447         dmae->dst_addr_hi = 0;
4448         dmae->len = sizeof(struct host_port_stats) >> 2;
4449         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4450         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4451         dmae->comp_val = DMAE_COMP_VAL;
4452
4453         *stats_comp = 0;
4454         bnx2x_hw_stats_post(bp);
4455         bnx2x_stats_comp(bp);
4456 }
4457
4458 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4459 {
4460         int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4461         int port = BP_PORT(bp);
4462         int func;
4463         u32 func_stx;
4464
4465         /* sanity */
4466         if (!bp->port.pmf || !bp->func_stx) {
4467                 BNX2X_ERR("BUG!\n");
4468                 return;
4469         }
4470
4471         /* save our func_stx */
4472         func_stx = bp->func_stx;
4473
4474         for (vn = VN_0; vn < vn_max; vn++) {
4475                 func = 2*vn + port;
4476
4477                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4478                 bnx2x_func_stats_init(bp);
4479                 bnx2x_hw_stats_post(bp);
4480                 bnx2x_stats_comp(bp);
4481         }
4482
4483         /* restore our func_stx */
4484         bp->func_stx = func_stx;
4485 }
4486
4487 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4488 {
4489         struct dmae_command *dmae = &bp->stats_dmae;
4490         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4491
4492         /* sanity */
4493         if (!bp->func_stx) {
4494                 BNX2X_ERR("BUG!\n");
4495                 return;
4496         }
4497
4498         bp->executer_idx = 0;
4499         memset(dmae, 0, sizeof(struct dmae_command));
4500
4501         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4502                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4503                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4504 #ifdef __BIG_ENDIAN
4505                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4506 #else
4507                         DMAE_CMD_ENDIANITY_DW_SWAP |
4508 #endif
4509                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4510                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4511         dmae->src_addr_lo = bp->func_stx >> 2;
4512         dmae->src_addr_hi = 0;
4513         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4514         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4515         dmae->len = sizeof(struct host_func_stats) >> 2;
4516         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4517         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4518         dmae->comp_val = DMAE_COMP_VAL;
4519
4520         *stats_comp = 0;
4521         bnx2x_hw_stats_post(bp);
4522         bnx2x_stats_comp(bp);
4523 }
4524
4525 static void bnx2x_stats_init(struct bnx2x *bp)
4526 {
4527         int port = BP_PORT(bp);
4528         int func = BP_FUNC(bp);
4529         int i;
4530
4531         bp->stats_pending = 0;
4532         bp->executer_idx = 0;
4533         bp->stats_counter = 0;
4534
4535         /* port and func stats for management */
4536         if (!BP_NOMCP(bp)) {
4537                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4538                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4539
4540         } else {
4541                 bp->port.port_stx = 0;
4542                 bp->func_stx = 0;
4543         }
4544         DP(BNX2X_MSG_STATS, "port_stx 0x%x  func_stx 0x%x\n",
4545            bp->port.port_stx, bp->func_stx);
4546
4547         /* port stats */
4548         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4549         bp->port.old_nig_stats.brb_discard =
4550                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4551         bp->port.old_nig_stats.brb_truncate =
4552                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4553         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4554                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4555         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4556                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4557
4558         /* function stats */
4559         for_each_queue(bp, i) {
4560                 struct bnx2x_fastpath *fp = &bp->fp[i];
4561
4562                 memset(&fp->old_tclient, 0,
4563                        sizeof(struct tstorm_per_client_stats));
4564                 memset(&fp->old_uclient, 0,
4565                        sizeof(struct ustorm_per_client_stats));
4566                 memset(&fp->old_xclient, 0,
4567                        sizeof(struct xstorm_per_client_stats));
4568                 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4569         }
4570
4571         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4572         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4573
4574         bp->stats_state = STATS_STATE_DISABLED;
4575
4576         if (bp->port.pmf) {
4577                 if (bp->port.port_stx)
4578                         bnx2x_port_stats_base_init(bp);
4579
4580                 if (bp->func_stx)
4581                         bnx2x_func_stats_base_init(bp);
4582
4583         } else if (bp->func_stx)
4584                 bnx2x_func_stats_base_update(bp);
4585 }
4586
4587 static void bnx2x_timer(unsigned long data)
4588 {
4589         struct bnx2x *bp = (struct bnx2x *) data;
4590
4591         if (!netif_running(bp->dev))
4592                 return;
4593
4594         if (atomic_read(&bp->intr_sem) != 0)
4595                 goto timer_restart;
4596
4597         if (poll) {
4598                 struct bnx2x_fastpath *fp = &bp->fp[0];
4599                 int rc;
4600
4601                 bnx2x_tx_int(fp);
4602                 rc = bnx2x_rx_int(fp, 1000);
4603         }
4604
4605         if (!BP_NOMCP(bp)) {
4606                 int func = BP_FUNC(bp);
4607                 u32 drv_pulse;
4608                 u32 mcp_pulse;
4609
4610                 ++bp->fw_drv_pulse_wr_seq;
4611                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4612                 /* TBD - add SYSTEM_TIME */
4613                 drv_pulse = bp->fw_drv_pulse_wr_seq;
4614                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4615
4616                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4617                              MCP_PULSE_SEQ_MASK);
4618                 /* The delta between driver pulse and mcp response
4619                  * should be 1 (before mcp response) or 0 (after mcp response)
4620                  */
4621                 if ((drv_pulse != mcp_pulse) &&
4622                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4623                         /* someone lost a heartbeat... */
4624                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4625                                   drv_pulse, mcp_pulse);
4626                 }
4627         }
4628
4629         if ((bp->state == BNX2X_STATE_OPEN) ||
4630             (bp->state == BNX2X_STATE_DISABLED))
4631                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4632
4633 timer_restart:
4634         mod_timer(&bp->timer, jiffies + bp->current_interval);
4635 }
4636
4637 /* end of Statistics */
4638
4639 /* nic init */
4640
4641 /*
4642  * nic init service functions
4643  */
4644
4645 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4646 {
4647         int port = BP_PORT(bp);
4648
4649         /* "CSTORM" */
4650         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4651                         CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4652                         CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4653         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4654                         CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4655                         CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
4656 }
4657
4658 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4659                           dma_addr_t mapping, int sb_id)
4660 {
4661         int port = BP_PORT(bp);
4662         int func = BP_FUNC(bp);
4663         int index;
4664         u64 section;
4665
4666         /* USTORM */
4667         section = ((u64)mapping) + offsetof(struct host_status_block,
4668                                             u_status_block);
4669         sb->u_status_block.status_block_id = sb_id;
4670
4671         REG_WR(bp, BAR_CSTRORM_INTMEM +
4672                CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4673         REG_WR(bp, BAR_CSTRORM_INTMEM +
4674                ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
4675                U64_HI(section));
4676         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4677                 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
4678
4679         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4680                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4681                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
4682
4683         /* CSTORM */
4684         section = ((u64)mapping) + offsetof(struct host_status_block,
4685                                             c_status_block);
4686         sb->c_status_block.status_block_id = sb_id;
4687
4688         REG_WR(bp, BAR_CSTRORM_INTMEM +
4689                CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
4690         REG_WR(bp, BAR_CSTRORM_INTMEM +
4691                ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
4692                U64_HI(section));
4693         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4694                 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
4695
4696         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4697                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4698                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
4699
4700         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4701 }
4702
4703 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4704 {
4705         int func = BP_FUNC(bp);
4706
4707         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
4708                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4709                         sizeof(struct tstorm_def_status_block)/4);
4710         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4711                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4712                         sizeof(struct cstorm_def_status_block_u)/4);
4713         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4714                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4715                         sizeof(struct cstorm_def_status_block_c)/4);
4716         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
4717                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4718                         sizeof(struct xstorm_def_status_block)/4);
4719 }
4720
4721 static void bnx2x_init_def_sb(struct bnx2x *bp,
4722                               struct host_def_status_block *def_sb,
4723                               dma_addr_t mapping, int sb_id)
4724 {
4725         int port = BP_PORT(bp);
4726         int func = BP_FUNC(bp);
4727         int index, val, reg_offset;
4728         u64 section;
4729
4730         /* ATTN */
4731         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4732                                             atten_status_block);
4733         def_sb->atten_status_block.status_block_id = sb_id;
4734
4735         bp->attn_state = 0;
4736
4737         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4738                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4739
4740         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4741                 bp->attn_group[index].sig[0] = REG_RD(bp,
4742                                                      reg_offset + 0x10*index);
4743                 bp->attn_group[index].sig[1] = REG_RD(bp,
4744                                                reg_offset + 0x4 + 0x10*index);
4745                 bp->attn_group[index].sig[2] = REG_RD(bp,
4746                                                reg_offset + 0x8 + 0x10*index);
4747                 bp->attn_group[index].sig[3] = REG_RD(bp,
4748                                                reg_offset + 0xc + 0x10*index);
4749         }
4750
4751         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4752                              HC_REG_ATTN_MSG0_ADDR_L);
4753
4754         REG_WR(bp, reg_offset, U64_LO(section));
4755         REG_WR(bp, reg_offset + 4, U64_HI(section));
4756
4757         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4758
4759         val = REG_RD(bp, reg_offset);
4760         val |= sb_id;
4761         REG_WR(bp, reg_offset, val);
4762
4763         /* USTORM */
4764         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4765                                             u_def_status_block);
4766         def_sb->u_def_status_block.status_block_id = sb_id;
4767
4768         REG_WR(bp, BAR_CSTRORM_INTMEM +
4769                CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4770         REG_WR(bp, BAR_CSTRORM_INTMEM +
4771                ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
4772                U64_HI(section));
4773         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4774                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
4775
4776         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4777                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4778                          CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
4779
4780         /* CSTORM */
4781         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4782                                             c_def_status_block);
4783         def_sb->c_def_status_block.status_block_id = sb_id;
4784
4785         REG_WR(bp, BAR_CSTRORM_INTMEM +
4786                CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
4787         REG_WR(bp, BAR_CSTRORM_INTMEM +
4788                ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
4789                U64_HI(section));
4790         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4791                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
4792
4793         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4794                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4795                          CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
4796
4797         /* TSTORM */
4798         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4799                                             t_def_status_block);
4800         def_sb->t_def_status_block.status_block_id = sb_id;
4801
4802         REG_WR(bp, BAR_TSTRORM_INTMEM +
4803                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4804         REG_WR(bp, BAR_TSTRORM_INTMEM +
4805                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4806                U64_HI(section));
4807         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4808                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4809
4810         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4811                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4812                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4813
4814         /* XSTORM */
4815         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4816                                             x_def_status_block);
4817         def_sb->x_def_status_block.status_block_id = sb_id;
4818
4819         REG_WR(bp, BAR_XSTRORM_INTMEM +
4820                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4821         REG_WR(bp, BAR_XSTRORM_INTMEM +
4822                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4823                U64_HI(section));
4824         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4825                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4826
4827         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4828                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4829                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4830
4831         bp->stats_pending = 0;
4832         bp->set_mac_pending = 0;
4833
4834         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4835 }
4836
4837 static void bnx2x_update_coalesce(struct bnx2x *bp)
4838 {
4839         int port = BP_PORT(bp);
4840         int i;
4841
4842         for_each_queue(bp, i) {
4843                 int sb_id = bp->fp[i].sb_id;
4844
4845                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4846                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4847                         CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4848                                                       U_SB_ETH_RX_CQ_INDEX),
4849                         bp->rx_ticks/12);
4850                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4851                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4852                                                        U_SB_ETH_RX_CQ_INDEX),
4853                          (bp->rx_ticks/12) ? 0 : 1);
4854
4855                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4856                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4857                         CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4858                                                       C_SB_ETH_TX_CQ_INDEX),
4859                         bp->tx_ticks/12);
4860                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4861                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4862                                                        C_SB_ETH_TX_CQ_INDEX),
4863                          (bp->tx_ticks/12) ? 0 : 1);
4864         }
4865 }
4866
4867 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4868                                        struct bnx2x_fastpath *fp, int last)
4869 {
4870         int i;
4871
4872         for (i = 0; i < last; i++) {
4873                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4874                 struct sk_buff *skb = rx_buf->skb;
4875
4876                 if (skb == NULL) {
4877                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4878                         continue;
4879                 }
4880
4881                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4882                         pci_unmap_single(bp->pdev,
4883                                          pci_unmap_addr(rx_buf, mapping),
4884                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4885
4886                 dev_kfree_skb(skb);
4887                 rx_buf->skb = NULL;
4888         }
4889 }
4890
4891 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4892 {
4893         int func = BP_FUNC(bp);
4894         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4895                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4896         u16 ring_prod, cqe_ring_prod;
4897         int i, j;
4898
4899         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4900         DP(NETIF_MSG_IFUP,
4901            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4902
4903         if (bp->flags & TPA_ENABLE_FLAG) {
4904
4905                 for_each_rx_queue(bp, j) {
4906                         struct bnx2x_fastpath *fp = &bp->fp[j];
4907
4908                         for (i = 0; i < max_agg_queues; i++) {
4909                                 fp->tpa_pool[i].skb =
4910                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4911                                 if (!fp->tpa_pool[i].skb) {
4912                                         BNX2X_ERR("Failed to allocate TPA "
4913                                                   "skb pool for queue[%d] - "
4914                                                   "disabling TPA on this "
4915                                                   "queue!\n", j);
4916                                         bnx2x_free_tpa_pool(bp, fp, i);
4917                                         fp->disable_tpa = 1;
4918                                         break;
4919                                 }
4920                                 pci_unmap_addr_set((struct sw_rx_bd *)
4921                                                         &bp->fp->tpa_pool[i],
4922                                                    mapping, 0);
4923                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4924                         }
4925                 }
4926         }
4927
4928         for_each_rx_queue(bp, j) {
4929                 struct bnx2x_fastpath *fp = &bp->fp[j];
4930
4931                 fp->rx_bd_cons = 0;
4932                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4933                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4934
4935                 /* Mark queue as Rx */
4936                 fp->is_rx_queue = 1;
4937
4938                 /* "next page" elements initialization */
4939                 /* SGE ring */
4940                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4941                         struct eth_rx_sge *sge;
4942
4943                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4944                         sge->addr_hi =
4945                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4946                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4947                         sge->addr_lo =
4948                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4949                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4950                 }
4951
4952                 bnx2x_init_sge_ring_bit_mask(fp);
4953
4954                 /* RX BD ring */
4955                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4956                         struct eth_rx_bd *rx_bd;
4957
4958                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4959                         rx_bd->addr_hi =
4960                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4961                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4962                         rx_bd->addr_lo =
4963                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4964                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4965                 }
4966
4967                 /* CQ ring */
4968                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4969                         struct eth_rx_cqe_next_page *nextpg;
4970
4971                         nextpg = (struct eth_rx_cqe_next_page *)
4972                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4973                         nextpg->addr_hi =
4974                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4975                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4976                         nextpg->addr_lo =
4977                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4978                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4979                 }
4980
4981                 /* Allocate SGEs and initialize the ring elements */
4982                 for (i = 0, ring_prod = 0;
4983                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4984
4985                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4986                                 BNX2X_ERR("was only able to allocate "
4987                                           "%d rx sges\n", i);
4988                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4989                                 /* Cleanup already allocated elements */
4990                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4991                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4992                                 fp->disable_tpa = 1;
4993                                 ring_prod = 0;
4994                                 break;
4995                         }
4996                         ring_prod = NEXT_SGE_IDX(ring_prod);
4997                 }
4998                 fp->rx_sge_prod = ring_prod;
4999
5000                 /* Allocate BDs and initialize BD ring */
5001                 fp->rx_comp_cons = 0;
5002                 cqe_ring_prod = ring_prod = 0;
5003                 for (i = 0; i < bp->rx_ring_size; i++) {
5004                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5005                                 BNX2X_ERR("was only able to allocate "
5006                                           "%d rx skbs on queue[%d]\n", i, j);
5007                                 fp->eth_q_stats.rx_skb_alloc_failed++;
5008                                 break;
5009                         }
5010                         ring_prod = NEXT_RX_IDX(ring_prod);
5011                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5012                         WARN_ON(ring_prod <= i);
5013                 }
5014
5015                 fp->rx_bd_prod = ring_prod;
5016                 /* must not have more available CQEs than BDs */
5017                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5018                                        cqe_ring_prod);
5019                 fp->rx_pkt = fp->rx_calls = 0;
5020
5021                 /* Warning!
5022                  * this will generate an interrupt (to the TSTORM)
5023                  * must only be done after chip is initialized
5024                  */
5025                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5026                                      fp->rx_sge_prod);
5027                 if (j != 0)
5028                         continue;
5029
5030                 REG_WR(bp, BAR_USTRORM_INTMEM +
5031                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5032                        U64_LO(fp->rx_comp_mapping));
5033                 REG_WR(bp, BAR_USTRORM_INTMEM +
5034                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5035                        U64_HI(fp->rx_comp_mapping));
5036         }
5037 }
5038
5039 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5040 {
5041         int i, j;
5042
5043         for_each_tx_queue(bp, j) {
5044                 struct bnx2x_fastpath *fp = &bp->fp[j];
5045
5046                 for (i = 1; i <= NUM_TX_RINGS; i++) {
5047                         struct eth_tx_next_bd *tx_next_bd =
5048                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5049
5050                         tx_next_bd->addr_hi =
5051                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5052                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5053                         tx_next_bd->addr_lo =
5054                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5055                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5056                 }
5057
5058                 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5059                 fp->tx_db.data.zero_fill1 = 0;
5060                 fp->tx_db.data.prod = 0;
5061
5062                 fp->tx_pkt_prod = 0;
5063                 fp->tx_pkt_cons = 0;
5064                 fp->tx_bd_prod = 0;
5065                 fp->tx_bd_cons = 0;
5066                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5067                 fp->tx_pkt = 0;
5068         }
5069
5070         /* clean tx statistics */
5071         for_each_rx_queue(bp, i)
5072                 bnx2x_fp(bp, i, tx_pkt) = 0;
5073 }
5074
5075 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5076 {
5077         int func = BP_FUNC(bp);
5078
5079         spin_lock_init(&bp->spq_lock);
5080
5081         bp->spq_left = MAX_SPQ_PENDING;
5082         bp->spq_prod_idx = 0;
5083         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5084         bp->spq_prod_bd = bp->spq;
5085         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5086
5087         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5088                U64_LO(bp->spq_mapping));
5089         REG_WR(bp,
5090                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5091                U64_HI(bp->spq_mapping));
5092
5093         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5094                bp->spq_prod_idx);
5095 }
5096
5097 static void bnx2x_init_context(struct bnx2x *bp)
5098 {
5099         int i;
5100
5101         for_each_rx_queue(bp, i) {
5102                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5103                 struct bnx2x_fastpath *fp = &bp->fp[i];
5104                 u8 cl_id = fp->cl_id;
5105
5106                 context->ustorm_st_context.common.sb_index_numbers =
5107                                                 BNX2X_RX_SB_INDEX_NUM;
5108                 context->ustorm_st_context.common.clientId = cl_id;
5109                 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5110                 context->ustorm_st_context.common.flags =
5111                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5112                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5113                 context->ustorm_st_context.common.statistics_counter_id =
5114                                                 cl_id;
5115                 context->ustorm_st_context.common.mc_alignment_log_size =
5116                                                 BNX2X_RX_ALIGN_SHIFT;
5117                 context->ustorm_st_context.common.bd_buff_size =
5118                                                 bp->rx_buf_size;
5119                 context->ustorm_st_context.common.bd_page_base_hi =
5120                                                 U64_HI(fp->rx_desc_mapping);
5121                 context->ustorm_st_context.common.bd_page_base_lo =
5122                                                 U64_LO(fp->rx_desc_mapping);
5123                 if (!fp->disable_tpa) {
5124                         context->ustorm_st_context.common.flags |=
5125                                 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5126                         context->ustorm_st_context.common.sge_buff_size =
5127                                 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5128                                          (u32)0xffff);
5129                         context->ustorm_st_context.common.sge_page_base_hi =
5130                                                 U64_HI(fp->rx_sge_mapping);
5131                         context->ustorm_st_context.common.sge_page_base_lo =
5132                                                 U64_LO(fp->rx_sge_mapping);
5133
5134                         context->ustorm_st_context.common.max_sges_for_packet =
5135                                 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5136                         context->ustorm_st_context.common.max_sges_for_packet =
5137                                 ((context->ustorm_st_context.common.
5138                                   max_sges_for_packet + PAGES_PER_SGE - 1) &
5139                                  (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5140                 }
5141
5142                 context->ustorm_ag_context.cdu_usage =
5143                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5144                                                CDU_REGION_NUMBER_UCM_AG,
5145                                                ETH_CONNECTION_TYPE);
5146
5147                 context->xstorm_ag_context.cdu_reserved =
5148                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5149                                                CDU_REGION_NUMBER_XCM_AG,
5150                                                ETH_CONNECTION_TYPE);
5151         }
5152
5153         for_each_tx_queue(bp, i) {
5154                 struct bnx2x_fastpath *fp = &bp->fp[i];
5155                 struct eth_context *context =
5156                         bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
5157
5158                 context->cstorm_st_context.sb_index_number =
5159                                                 C_SB_ETH_TX_CQ_INDEX;
5160                 context->cstorm_st_context.status_block_id = fp->sb_id;
5161
5162                 context->xstorm_st_context.tx_bd_page_base_hi =
5163                                                 U64_HI(fp->tx_desc_mapping);
5164                 context->xstorm_st_context.tx_bd_page_base_lo =
5165                                                 U64_LO(fp->tx_desc_mapping);
5166                 context->xstorm_st_context.statistics_data = (fp->cl_id |
5167                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5168         }
5169 }
5170
5171 static void bnx2x_init_ind_table(struct bnx2x *bp)
5172 {
5173         int func = BP_FUNC(bp);
5174         int i;
5175
5176         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5177                 return;
5178
5179         DP(NETIF_MSG_IFUP,
5180            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
5181         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5182                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5183                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5184                         bp->fp->cl_id + (i % bp->num_rx_queues));
5185 }
5186
5187 static void bnx2x_set_client_config(struct bnx2x *bp)
5188 {
5189         struct tstorm_eth_client_config tstorm_client = {0};
5190         int port = BP_PORT(bp);
5191         int i;
5192
5193         tstorm_client.mtu = bp->dev->mtu;
5194         tstorm_client.config_flags =
5195                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5196                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5197 #ifdef BCM_VLAN
5198         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5199                 tstorm_client.config_flags |=
5200                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5201                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5202         }
5203 #endif
5204
5205         for_each_queue(bp, i) {
5206                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5207
5208                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5209                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5210                        ((u32 *)&tstorm_client)[0]);
5211                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5212                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5213                        ((u32 *)&tstorm_client)[1]);
5214         }
5215
5216         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5217            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5218 }
5219
5220 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5221 {
5222         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5223         int mode = bp->rx_mode;
5224         int mask = (1 << BP_L_ID(bp));
5225         int func = BP_FUNC(bp);
5226         int port = BP_PORT(bp);
5227         int i;
5228         /* All but management unicast packets should pass to the host as well */
5229         u32 llh_mask =
5230                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5231                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5232                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5233                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5234
5235         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
5236
5237         switch (mode) {
5238         case BNX2X_RX_MODE_NONE: /* no Rx */
5239                 tstorm_mac_filter.ucast_drop_all = mask;
5240                 tstorm_mac_filter.mcast_drop_all = mask;
5241                 tstorm_mac_filter.bcast_drop_all = mask;
5242                 break;
5243
5244         case BNX2X_RX_MODE_NORMAL:
5245                 tstorm_mac_filter.bcast_accept_all = mask;
5246                 break;
5247
5248         case BNX2X_RX_MODE_ALLMULTI:
5249                 tstorm_mac_filter.mcast_accept_all = mask;
5250                 tstorm_mac_filter.bcast_accept_all = mask;
5251                 break;
5252
5253         case BNX2X_RX_MODE_PROMISC:
5254                 tstorm_mac_filter.ucast_accept_all = mask;
5255                 tstorm_mac_filter.mcast_accept_all = mask;
5256                 tstorm_mac_filter.bcast_accept_all = mask;
5257                 /* pass management unicast packets as well */
5258                 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5259                 break;
5260
5261         default:
5262                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5263                 break;
5264         }
5265
5266         REG_WR(bp,
5267                (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5268                llh_mask);
5269
5270         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5271                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5272                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5273                        ((u32 *)&tstorm_mac_filter)[i]);
5274
5275 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5276                    ((u32 *)&tstorm_mac_filter)[i]); */
5277         }
5278
5279         if (mode != BNX2X_RX_MODE_NONE)
5280                 bnx2x_set_client_config(bp);
5281 }
5282
5283 static void bnx2x_init_internal_common(struct bnx2x *bp)
5284 {
5285         int i;
5286
5287         /* Zero this manually as its initialization is
5288            currently missing in the initTool */
5289         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5290                 REG_WR(bp, BAR_USTRORM_INTMEM +
5291                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
5292 }
5293
5294 static void bnx2x_init_internal_port(struct bnx2x *bp)
5295 {
5296         int port = BP_PORT(bp);
5297
5298         REG_WR(bp,
5299                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5300         REG_WR(bp,
5301                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5302         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5303         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5304 }
5305
5306 static void bnx2x_init_internal_func(struct bnx2x *bp)
5307 {
5308         struct tstorm_eth_function_common_config tstorm_config = {0};
5309         struct stats_indication_flags stats_flags = {0};
5310         int port = BP_PORT(bp);
5311         int func = BP_FUNC(bp);
5312         int i, j;
5313         u32 offset;
5314         u16 max_agg_size;
5315
5316         if (is_multi(bp)) {
5317                 tstorm_config.config_flags = MULTI_FLAGS(bp);
5318                 tstorm_config.rss_result_mask = MULTI_MASK;
5319         }
5320
5321         /* Enable TPA if needed */
5322         if (bp->flags & TPA_ENABLE_FLAG)
5323                 tstorm_config.config_flags |=
5324                         TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5325
5326         if (IS_E1HMF(bp))
5327                 tstorm_config.config_flags |=
5328                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5329
5330         tstorm_config.leading_client_id = BP_L_ID(bp);
5331
5332         REG_WR(bp, BAR_TSTRORM_INTMEM +
5333                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5334                (*(u32 *)&tstorm_config));
5335
5336         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5337         bnx2x_set_storm_rx_mode(bp);
5338
5339         for_each_queue(bp, i) {
5340                 u8 cl_id = bp->fp[i].cl_id;
5341
5342                 /* reset xstorm per client statistics */
5343                 offset = BAR_XSTRORM_INTMEM +
5344                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5345                 for (j = 0;
5346                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5347                         REG_WR(bp, offset + j*4, 0);
5348
5349                 /* reset tstorm per client statistics */
5350                 offset = BAR_TSTRORM_INTMEM +
5351                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5352                 for (j = 0;
5353                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5354                         REG_WR(bp, offset + j*4, 0);
5355
5356                 /* reset ustorm per client statistics */
5357                 offset = BAR_USTRORM_INTMEM +
5358                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5359                 for (j = 0;
5360                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5361                         REG_WR(bp, offset + j*4, 0);
5362         }
5363
5364         /* Init statistics related context */
5365         stats_flags.collect_eth = 1;
5366
5367         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5368                ((u32 *)&stats_flags)[0]);
5369         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5370                ((u32 *)&stats_flags)[1]);
5371
5372         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5373                ((u32 *)&stats_flags)[0]);
5374         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5375                ((u32 *)&stats_flags)[1]);
5376
5377         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5378                ((u32 *)&stats_flags)[0]);
5379         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5380                ((u32 *)&stats_flags)[1]);
5381
5382         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5383                ((u32 *)&stats_flags)[0]);
5384         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5385                ((u32 *)&stats_flags)[1]);
5386
5387         REG_WR(bp, BAR_XSTRORM_INTMEM +
5388                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5389                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5390         REG_WR(bp, BAR_XSTRORM_INTMEM +
5391                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5392                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5393
5394         REG_WR(bp, BAR_TSTRORM_INTMEM +
5395                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5396                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5397         REG_WR(bp, BAR_TSTRORM_INTMEM +
5398                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5399                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5400
5401         REG_WR(bp, BAR_USTRORM_INTMEM +
5402                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5403                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5404         REG_WR(bp, BAR_USTRORM_INTMEM +
5405                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5406                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5407
5408         if (CHIP_IS_E1H(bp)) {
5409                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5410                         IS_E1HMF(bp));
5411                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5412                         IS_E1HMF(bp));
5413                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5414                         IS_E1HMF(bp));
5415                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5416                         IS_E1HMF(bp));
5417
5418                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5419                          bp->e1hov);
5420         }
5421
5422         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5423         max_agg_size =
5424                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5425                           SGE_PAGE_SIZE * PAGES_PER_SGE),
5426                     (u32)0xffff);
5427         for_each_rx_queue(bp, i) {
5428                 struct bnx2x_fastpath *fp = &bp->fp[i];
5429
5430                 REG_WR(bp, BAR_USTRORM_INTMEM +
5431                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5432                        U64_LO(fp->rx_comp_mapping));
5433                 REG_WR(bp, BAR_USTRORM_INTMEM +
5434                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5435                        U64_HI(fp->rx_comp_mapping));
5436
5437                 /* Next page */
5438                 REG_WR(bp, BAR_USTRORM_INTMEM +
5439                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5440                        U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5441                 REG_WR(bp, BAR_USTRORM_INTMEM +
5442                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5443                        U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5444
5445                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5446                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5447                          max_agg_size);
5448         }
5449
5450         /* dropless flow control */
5451         if (CHIP_IS_E1H(bp)) {
5452                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5453
5454                 rx_pause.bd_thr_low = 250;
5455                 rx_pause.cqe_thr_low = 250;
5456                 rx_pause.cos = 1;
5457                 rx_pause.sge_thr_low = 0;
5458                 rx_pause.bd_thr_high = 350;
5459                 rx_pause.cqe_thr_high = 350;
5460                 rx_pause.sge_thr_high = 0;
5461
5462                 for_each_rx_queue(bp, i) {
5463                         struct bnx2x_fastpath *fp = &bp->fp[i];
5464
5465                         if (!fp->disable_tpa) {
5466                                 rx_pause.sge_thr_low = 150;
5467                                 rx_pause.sge_thr_high = 250;
5468                         }
5469
5470
5471                         offset = BAR_USTRORM_INTMEM +
5472                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5473                                                                    fp->cl_id);
5474                         for (j = 0;
5475                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5476                              j++)
5477                                 REG_WR(bp, offset + j*4,
5478                                        ((u32 *)&rx_pause)[j]);
5479                 }
5480         }
5481
5482         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5483
5484         /* Init rate shaping and fairness contexts */
5485         if (IS_E1HMF(bp)) {
5486                 int vn;
5487
5488                 /* During init there is no active link
5489                    Until link is up, set link rate to 10Gbps */
5490                 bp->link_vars.line_speed = SPEED_10000;
5491                 bnx2x_init_port_minmax(bp);
5492
5493                 bnx2x_calc_vn_weight_sum(bp);
5494
5495                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5496                         bnx2x_init_vn_minmax(bp, 2*vn + port);
5497
5498                 /* Enable rate shaping and fairness */
5499                 bp->cmng.flags.cmng_enables =
5500                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5501                 if (bp->vn_weight_sum)
5502                         bp->cmng.flags.cmng_enables |=
5503                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5504                 else
5505                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5506                            "  fairness will be disabled\n");
5507         } else {
5508                 /* rate shaping and fairness are disabled */
5509                 DP(NETIF_MSG_IFUP,
5510                    "single function mode  minmax will be disabled\n");
5511         }
5512
5513
5514         /* Store it to internal memory */
5515         if (bp->port.pmf)
5516                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5517                         REG_WR(bp, BAR_XSTRORM_INTMEM +
5518                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5519                                ((u32 *)(&bp->cmng))[i]);
5520 }
5521
5522 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5523 {
5524         switch (load_code) {
5525         case FW_MSG_CODE_DRV_LOAD_COMMON:
5526                 bnx2x_init_internal_common(bp);
5527                 /* no break */
5528
5529         case FW_MSG_CODE_DRV_LOAD_PORT:
5530                 bnx2x_init_internal_port(bp);
5531                 /* no break */
5532
5533         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5534                 bnx2x_init_internal_func(bp);
5535                 break;
5536
5537         default:
5538                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5539                 break;
5540         }
5541 }
5542
5543 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5544 {
5545         int i;
5546
5547         for_each_queue(bp, i) {
5548                 struct bnx2x_fastpath *fp = &bp->fp[i];
5549
5550                 fp->bp = bp;
5551                 fp->state = BNX2X_FP_STATE_CLOSED;
5552                 fp->index = i;
5553                 fp->cl_id = BP_L_ID(bp) + i;
5554                 fp->sb_id = fp->cl_id;
5555                 /* Suitable Rx and Tx SBs are served by the same client */
5556                 if (i >= bp->num_rx_queues)
5557                         fp->cl_id -= bp->num_rx_queues;
5558                 DP(NETIF_MSG_IFUP,
5559                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
5560                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5561                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5562                               fp->sb_id);
5563                 bnx2x_update_fpsb_idx(fp);
5564         }
5565
5566         /* ensure status block indices were read */
5567         rmb();
5568
5569
5570         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5571                           DEF_SB_ID);
5572         bnx2x_update_dsb_idx(bp);
5573         bnx2x_update_coalesce(bp);
5574         bnx2x_init_rx_rings(bp);
5575         bnx2x_init_tx_ring(bp);
5576         bnx2x_init_sp_ring(bp);
5577         bnx2x_init_context(bp);
5578         bnx2x_init_internal(bp, load_code);
5579         bnx2x_init_ind_table(bp);
5580         bnx2x_stats_init(bp);
5581
5582         /* At this point, we are ready for interrupts */
5583         atomic_set(&bp->intr_sem, 0);
5584
5585         /* flush all before enabling interrupts */
5586         mb();
5587         mmiowb();
5588
5589         bnx2x_int_enable(bp);
5590
5591         /* Check for SPIO5 */
5592         bnx2x_attn_int_deasserted0(bp,
5593                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5594                                    AEU_INPUTS_ATTN_BITS_SPIO5);
5595 }
5596
5597 /* end of nic init */
5598
5599 /*
5600  * gzip service functions
5601  */
5602
5603 static int bnx2x_gunzip_init(struct bnx2x *bp)
5604 {
5605         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5606                                               &bp->gunzip_mapping);
5607         if (bp->gunzip_buf  == NULL)
5608                 goto gunzip_nomem1;
5609
5610         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5611         if (bp->strm  == NULL)
5612                 goto gunzip_nomem2;
5613
5614         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5615                                       GFP_KERNEL);
5616         if (bp->strm->workspace == NULL)
5617                 goto gunzip_nomem3;
5618
5619         return 0;
5620
5621 gunzip_nomem3:
5622         kfree(bp->strm);
5623         bp->strm = NULL;
5624
5625 gunzip_nomem2:
5626         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5627                             bp->gunzip_mapping);
5628         bp->gunzip_buf = NULL;
5629
5630 gunzip_nomem1:
5631         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5632                " un-compression\n", bp->dev->name);
5633         return -ENOMEM;
5634 }
5635
5636 static void bnx2x_gunzip_end(struct bnx2x *bp)
5637 {
5638         kfree(bp->strm->workspace);
5639
5640         kfree(bp->strm);
5641         bp->strm = NULL;
5642
5643         if (bp->gunzip_buf) {
5644                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5645                                     bp->gunzip_mapping);
5646                 bp->gunzip_buf = NULL;
5647         }
5648 }
5649
5650 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5651 {
5652         int n, rc;
5653
5654         /* check gzip header */
5655         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5656                 BNX2X_ERR("Bad gzip header\n");
5657                 return -EINVAL;
5658         }
5659
5660         n = 10;
5661
5662 #define FNAME                           0x8
5663
5664         if (zbuf[3] & FNAME)
5665                 while ((zbuf[n++] != 0) && (n < len));
5666
5667         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5668         bp->strm->avail_in = len - n;
5669         bp->strm->next_out = bp->gunzip_buf;
5670         bp->strm->avail_out = FW_BUF_SIZE;
5671
5672         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5673         if (rc != Z_OK)
5674                 return rc;
5675
5676         rc = zlib_inflate(bp->strm, Z_FINISH);
5677         if ((rc != Z_OK) && (rc != Z_STREAM_END))
5678                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5679                        bp->dev->name, bp->strm->msg);
5680
5681         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5682         if (bp->gunzip_outlen & 0x3)
5683                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5684                                     " gunzip_outlen (%d) not aligned\n",
5685                        bp->dev->name, bp->gunzip_outlen);
5686         bp->gunzip_outlen >>= 2;
5687
5688         zlib_inflateEnd(bp->strm);
5689
5690         if (rc == Z_STREAM_END)
5691                 return 0;
5692
5693         return rc;
5694 }
5695
5696 /* nic load/unload */
5697
5698 /*
5699  * General service functions
5700  */
5701
5702 /* send a NIG loopback debug packet */
5703 static void bnx2x_lb_pckt(struct bnx2x *bp)
5704 {
5705         u32 wb_write[3];
5706
5707         /* Ethernet source and destination addresses */
5708         wb_write[0] = 0x55555555;
5709         wb_write[1] = 0x55555555;
5710         wb_write[2] = 0x20;             /* SOP */
5711         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5712
5713         /* NON-IP protocol */
5714         wb_write[0] = 0x09000000;
5715         wb_write[1] = 0x55555555;
5716         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
5717         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5718 }
5719
5720 /* some of the internal memories
5721  * are not directly readable from the driver
5722  * to test them we send debug packets
5723  */
5724 static int bnx2x_int_mem_test(struct bnx2x *bp)
5725 {
5726         int factor;
5727         int count, i;
5728         u32 val = 0;
5729
5730         if (CHIP_REV_IS_FPGA(bp))
5731                 factor = 120;
5732         else if (CHIP_REV_IS_EMUL(bp))
5733                 factor = 200;
5734         else
5735                 factor = 1;
5736
5737         DP(NETIF_MSG_HW, "start part1\n");
5738
5739         /* Disable inputs of parser neighbor blocks */
5740         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5741         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5742         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5743         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5744
5745         /*  Write 0 to parser credits for CFC search request */
5746         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5747
5748         /* send Ethernet packet */
5749         bnx2x_lb_pckt(bp);
5750
5751         /* TODO do i reset NIG statistic? */
5752         /* Wait until NIG register shows 1 packet of size 0x10 */
5753         count = 1000 * factor;
5754         while (count) {
5755
5756                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5757                 val = *bnx2x_sp(bp, wb_data[0]);
5758                 if (val == 0x10)
5759                         break;
5760
5761                 msleep(10);
5762                 count--;
5763         }
5764         if (val != 0x10) {
5765                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5766                 return -1;
5767         }
5768
5769         /* Wait until PRS register shows 1 packet */
5770         count = 1000 * factor;
5771         while (count) {
5772                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5773                 if (val == 1)
5774                         break;
5775
5776                 msleep(10);
5777                 count--;
5778         }
5779         if (val != 0x1) {
5780                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5781                 return -2;
5782         }
5783
5784         /* Reset and init BRB, PRS */
5785         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5786         msleep(50);
5787         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5788         msleep(50);
5789         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5790         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5791
5792         DP(NETIF_MSG_HW, "part2\n");
5793
5794         /* Disable inputs of parser neighbor blocks */
5795         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5796         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5797         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5798         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5799
5800         /* Write 0 to parser credits for CFC search request */
5801         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5802
5803         /* send 10 Ethernet packets */
5804         for (i = 0; i < 10; i++)
5805                 bnx2x_lb_pckt(bp);
5806
5807         /* Wait until NIG register shows 10 + 1
5808            packets of size 11*0x10 = 0xb0 */
5809         count = 1000 * factor;
5810         while (count) {
5811
5812                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5813                 val = *bnx2x_sp(bp, wb_data[0]);
5814                 if (val == 0xb0)
5815                         break;
5816
5817                 msleep(10);
5818                 count--;
5819         }
5820         if (val != 0xb0) {
5821                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5822                 return -3;
5823         }
5824
5825         /* Wait until PRS register shows 2 packets */
5826         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5827         if (val != 2)
5828                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5829
5830         /* Write 1 to parser credits for CFC search request */
5831         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5832
5833         /* Wait until PRS register shows 3 packets */
5834         msleep(10 * factor);
5835         /* Wait until NIG register shows 1 packet of size 0x10 */
5836         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5837         if (val != 3)
5838                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5839
5840         /* clear NIG EOP FIFO */
5841         for (i = 0; i < 11; i++)
5842                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5843         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5844         if (val != 1) {
5845                 BNX2X_ERR("clear of NIG failed\n");
5846                 return -4;
5847         }
5848
5849         /* Reset and init BRB, PRS, NIG */
5850         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5851         msleep(50);
5852         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5853         msleep(50);
5854         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5855         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5856 #ifndef BCM_ISCSI
5857         /* set NIC mode */
5858         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5859 #endif
5860
5861         /* Enable inputs of parser neighbor blocks */
5862         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5863         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5864         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5865         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5866
5867         DP(NETIF_MSG_HW, "done\n");
5868
5869         return 0; /* OK */
5870 }
5871
5872 static void enable_blocks_attention(struct bnx2x *bp)
5873 {
5874         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5875         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5876         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5877         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5878         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5879         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5880         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5881         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5882         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5883 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5884 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5885         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5886         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5887         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5888 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5889 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5890         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5891         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5892         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5893         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5894 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5895 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5896         if (CHIP_REV_IS_FPGA(bp))
5897                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5898         else
5899                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5900         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5901         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5902         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5903 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5904 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5905         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5906         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5907 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5908         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5909 }
5910
5911
5912 static void bnx2x_reset_common(struct bnx2x *bp)
5913 {
5914         /* reset_common */
5915         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5916                0xd3ffff7f);
5917         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5918 }
5919
5920
5921 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5922 {
5923         u32 val;
5924         u8 port;
5925         u8 is_required = 0;
5926
5927         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5928               SHARED_HW_CFG_FAN_FAILURE_MASK;
5929
5930         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5931                 is_required = 1;
5932
5933         /*
5934          * The fan failure mechanism is usually related to the PHY type since
5935          * the power consumption of the board is affected by the PHY. Currently,
5936          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5937          */
5938         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5939                 for (port = PORT_0; port < PORT_MAX; port++) {
5940                         u32 phy_type =
5941                                 SHMEM_RD(bp, dev_info.port_hw_config[port].
5942                                          external_phy_config) &
5943                                 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
5944                         is_required |=
5945                                 ((phy_type ==
5946                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
5947                                  (phy_type ==
5948                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
5949                                  (phy_type ==
5950                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
5951                 }
5952
5953         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
5954
5955         if (is_required == 0)
5956                 return;
5957
5958         /* Fan failure is indicated by SPIO 5 */
5959         bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5960                        MISC_REGISTERS_SPIO_INPUT_HI_Z);
5961
5962         /* set to active low mode */
5963         val = REG_RD(bp, MISC_REG_SPIO_INT);
5964         val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5965                                 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5966         REG_WR(bp, MISC_REG_SPIO_INT, val);
5967
5968         /* enable interrupt to signal the IGU */
5969         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5970         val |= (1 << MISC_REGISTERS_SPIO_5);
5971         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5972 }
5973
5974 static int bnx2x_init_common(struct bnx2x *bp)
5975 {
5976         u32 val, i;
5977
5978         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
5979
5980         bnx2x_reset_common(bp);
5981         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5982         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5983
5984         bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
5985         if (CHIP_IS_E1H(bp))
5986                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5987
5988         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5989         msleep(30);
5990         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5991
5992         bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
5993         if (CHIP_IS_E1(bp)) {
5994                 /* enable HW interrupt from PXP on USDM overflow
5995                    bit 16 on INT_MASK_0 */
5996                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5997         }
5998
5999         bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
6000         bnx2x_init_pxp(bp);
6001
6002 #ifdef __BIG_ENDIAN
6003         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6004         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6005         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6006         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6007         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6008         /* make sure this value is 0 */
6009         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6010
6011 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6012         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6013         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6014         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6015         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6016 #endif
6017
6018         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
6019 #ifdef BCM_ISCSI
6020         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6021         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6022         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6023 #endif
6024
6025         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6026                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6027
6028         /* let the HW do it's magic ... */
6029         msleep(100);
6030         /* finish PXP init */
6031         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6032         if (val != 1) {
6033                 BNX2X_ERR("PXP2 CFG failed\n");
6034                 return -EBUSY;
6035         }
6036         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6037         if (val != 1) {
6038                 BNX2X_ERR("PXP2 RD_INIT failed\n");
6039                 return -EBUSY;
6040         }
6041
6042         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6043         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6044
6045         bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
6046
6047         /* clean the DMAE memory */
6048         bp->dmae_ready = 1;
6049         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6050
6051         bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6052         bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6053         bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6054         bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
6055
6056         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6057         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6058         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6059         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6060
6061         bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
6062         /* soft reset pulse */
6063         REG_WR(bp, QM_REG_SOFT_RESET, 1);
6064         REG_WR(bp, QM_REG_SOFT_RESET, 0);
6065
6066 #ifdef BCM_ISCSI
6067         bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
6068 #endif
6069
6070         bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
6071         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6072         if (!CHIP_REV_IS_SLOW(bp)) {
6073                 /* enable hw interrupt from doorbell Q */
6074                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6075         }
6076
6077         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6078         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6079         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6080         /* set NIC mode */
6081         REG_WR(bp, PRS_REG_NIC_MODE, 1);
6082         if (CHIP_IS_E1H(bp))
6083                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
6084
6085         bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6086         bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6087         bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6088         bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
6089
6090         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6091         bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6092         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6093         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6094
6095         bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6096         bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6097         bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6098         bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
6099
6100         /* sync semi rtc */
6101         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6102                0x80000000);
6103         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6104                0x80000000);
6105
6106         bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6107         bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6108         bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
6109
6110         REG_WR(bp, SRC_REG_SOFT_RST, 1);
6111         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6112                 REG_WR(bp, i, 0xc0cac01a);
6113                 /* TODO: replace with something meaningful */
6114         }
6115         bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6116         REG_WR(bp, SRC_REG_SOFT_RST, 0);
6117
6118         if (sizeof(union cdu_context) != 1024)
6119                 /* we currently assume that a context is 1024 bytes */
6120                 printk(KERN_ALERT PFX "please adjust the size of"
6121                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
6122
6123         bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
6124         val = (4 << 24) + (0 << 12) + 1024;
6125         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6126
6127         bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
6128         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6129         /* enable context validation interrupt from CFC */
6130         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6131
6132         /* set the thresholds to prevent CFC/CDU race */
6133         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6134
6135         bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6136         bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
6137
6138         bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
6139         /* Reset PCIE errors for debug */
6140         REG_WR(bp, 0x2814, 0xffffffff);
6141         REG_WR(bp, 0x3820, 0xffffffff);
6142
6143         bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
6144         bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
6145         bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
6146         bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
6147
6148         bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
6149         if (CHIP_IS_E1H(bp)) {
6150                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6151                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6152         }
6153
6154         if (CHIP_REV_IS_SLOW(bp))
6155                 msleep(200);
6156
6157         /* finish CFC init */
6158         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6159         if (val != 1) {
6160                 BNX2X_ERR("CFC LL_INIT failed\n");
6161                 return -EBUSY;
6162         }
6163         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6164         if (val != 1) {
6165                 BNX2X_ERR("CFC AC_INIT failed\n");
6166                 return -EBUSY;
6167         }
6168         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6169         if (val != 1) {
6170                 BNX2X_ERR("CFC CAM_INIT failed\n");
6171                 return -EBUSY;
6172         }
6173         REG_WR(bp, CFC_REG_DEBUG0, 0);
6174
6175         /* read NIG statistic
6176            to see if this is our first up since powerup */
6177         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6178         val = *bnx2x_sp(bp, wb_data[0]);
6179
6180         /* do internal memory self test */
6181         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6182                 BNX2X_ERR("internal mem self test failed\n");
6183                 return -EBUSY;
6184         }
6185
6186         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6187         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6188         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6189         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6190         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6191                 bp->port.need_hw_lock = 1;
6192                 break;
6193
6194         default:
6195                 break;
6196         }
6197
6198         bnx2x_setup_fan_failure_detection(bp);
6199
6200         /* clear PXP2 attentions */
6201         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6202
6203         enable_blocks_attention(bp);
6204
6205         if (!BP_NOMCP(bp)) {
6206                 bnx2x_acquire_phy_lock(bp);
6207                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6208                 bnx2x_release_phy_lock(bp);
6209         } else
6210                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6211
6212         return 0;
6213 }
6214
6215 static int bnx2x_init_port(struct bnx2x *bp)
6216 {
6217         int port = BP_PORT(bp);
6218         int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
6219         u32 low, high;
6220         u32 val;
6221
6222         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
6223
6224         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6225
6226         bnx2x_init_block(bp, PXP_BLOCK, init_stage);
6227         bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
6228
6229         bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6230         bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6231         bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6232 #ifdef BCM_ISCSI
6233         /* Port0  1
6234          * Port1  385 */
6235         i++;
6236         wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
6237         wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
6238         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6239         REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6240
6241         /* Port0  2
6242          * Port1  386 */
6243         i++;
6244         wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
6245         wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
6246         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6247         REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6248
6249         /* Port0  3
6250          * Port1  387 */
6251         i++;
6252         wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
6253         wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
6254         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6255         REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6256 #endif
6257         bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6258
6259 #ifdef BCM_ISCSI
6260         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
6261         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
6262
6263         bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6264 #endif
6265         bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6266
6267         bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
6268         if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6269                 /* no pause for emulation and FPGA */
6270                 low = 0;
6271                 high = 513;
6272         } else {
6273                 if (IS_E1HMF(bp))
6274                         low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6275                 else if (bp->dev->mtu > 4096) {
6276                         if (bp->flags & ONE_PORT_FLAG)
6277                                 low = 160;
6278                         else {
6279                                 val = bp->dev->mtu;
6280                                 /* (24*1024 + val*4)/256 */
6281                                 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6282                         }
6283                 } else
6284                         low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6285                 high = low + 56;        /* 14*1024/256 */
6286         }
6287         REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6288         REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6289
6290
6291         bnx2x_init_block(bp, PRS_BLOCK, init_stage);
6292
6293         bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
6294         bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
6295         bnx2x_init_block(bp, USDM_BLOCK, init_stage);
6296         bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
6297
6298         bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6299         bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6300         bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6301         bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
6302
6303         bnx2x_init_block(bp, UPB_BLOCK, init_stage);
6304         bnx2x_init_block(bp, XPB_BLOCK, init_stage);
6305
6306         bnx2x_init_block(bp, PBF_BLOCK, init_stage);
6307
6308         /* configure PBF to work without PAUSE mtu 9000 */
6309         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6310
6311         /* update threshold */
6312         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6313         /* update init credit */
6314         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6315
6316         /* probe changes */
6317         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6318         msleep(5);
6319         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6320
6321 #ifdef BCM_ISCSI
6322         /* tell the searcher where the T2 table is */
6323         REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
6324
6325         wb_write[0] = U64_LO(bp->t2_mapping);
6326         wb_write[1] = U64_HI(bp->t2_mapping);
6327         REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
6328         wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
6329         wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
6330         REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
6331
6332         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
6333 #endif
6334         bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6335         bnx2x_init_block(bp, CFC_BLOCK, init_stage);
6336
6337         if (CHIP_IS_E1(bp)) {
6338                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6339                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6340         }
6341         bnx2x_init_block(bp, HC_BLOCK, init_stage);
6342
6343         bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6344         /* init aeu_mask_attn_func_0/1:
6345          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6346          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6347          *             bits 4-7 are used for "per vn group attention" */
6348         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6349                (IS_E1HMF(bp) ? 0xF7 : 0x7));
6350
6351         bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6352         bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6353         bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6354         bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6355         bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6356
6357         bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6358
6359         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6360
6361         if (CHIP_IS_E1H(bp)) {
6362                 /* 0x2 disable e1hov, 0x1 enable */
6363                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6364                        (IS_E1HMF(bp) ? 0x1 : 0x2));
6365
6366                 {
6367                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6368                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6369                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6370                 }
6371         }
6372
6373         bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6374         bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6375
6376         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6377         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6378                 {
6379                 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6380
6381                 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6382                                MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6383
6384                 /* The GPIO should be swapped if the swap register is
6385                    set and active */
6386                 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6387                 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6388
6389                 /* Select function upon port-swap configuration */
6390                 if (port == 0) {
6391                         offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6392                         aeu_gpio_mask = (swap_val && swap_override) ?
6393                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6394                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6395                 } else {
6396                         offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6397                         aeu_gpio_mask = (swap_val && swap_override) ?
6398                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6399                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6400                 }
6401                 val = REG_RD(bp, offset);
6402                 /* add GPIO3 to group */
6403                 val |= aeu_gpio_mask;
6404                 REG_WR(bp, offset, val);
6405                 }
6406                 break;
6407
6408         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6409         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6410                 /* add SPIO 5 to group 0 */
6411                 {
6412                 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6413                                        MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6414                 val = REG_RD(bp, reg_addr);
6415                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6416                 REG_WR(bp, reg_addr, val);
6417                 }
6418                 break;
6419
6420         default:
6421                 break;
6422         }
6423
6424         bnx2x__link_reset(bp);
6425
6426         return 0;
6427 }
6428
6429 #define ILT_PER_FUNC            (768/2)
6430 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
6431 /* the phys address is shifted right 12 bits and has an added
6432    1=valid bit added to the 53rd bit
6433    then since this is a wide register(TM)
6434    we split it into two 32 bit writes
6435  */
6436 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6437 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
6438 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
6439 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
6440
6441 #define CNIC_ILT_LINES          0
6442
6443 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6444 {
6445         int reg;
6446
6447         if (CHIP_IS_E1H(bp))
6448                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6449         else /* E1 */
6450                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6451
6452         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6453 }
6454
6455 static int bnx2x_init_func(struct bnx2x *bp)
6456 {
6457         int port = BP_PORT(bp);
6458         int func = BP_FUNC(bp);
6459         u32 addr, val;
6460         int i;
6461
6462         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
6463
6464         /* set MSI reconfigure capability */
6465         addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6466         val = REG_RD(bp, addr);
6467         val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6468         REG_WR(bp, addr, val);
6469
6470         i = FUNC_ILT_BASE(func);
6471
6472         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6473         if (CHIP_IS_E1H(bp)) {
6474                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6475                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6476         } else /* E1 */
6477                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6478                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6479
6480
6481         if (CHIP_IS_E1H(bp)) {
6482                 for (i = 0; i < 9; i++)
6483                         bnx2x_init_block(bp,
6484                                          cm_blocks[i], FUNC0_STAGE + func);
6485
6486                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6487                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6488         }
6489
6490         /* HC init per function */
6491         if (CHIP_IS_E1H(bp)) {
6492                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6493
6494                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6495                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6496         }
6497         bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
6498
6499         /* Reset PCIE errors for debug */
6500         REG_WR(bp, 0x2114, 0xffffffff);
6501         REG_WR(bp, 0x2120, 0xffffffff);
6502
6503         return 0;
6504 }
6505
6506 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6507 {
6508         int i, rc = 0;
6509
6510         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
6511            BP_FUNC(bp), load_code);
6512
6513         bp->dmae_ready = 0;
6514         mutex_init(&bp->dmae_mutex);
6515         bnx2x_gunzip_init(bp);
6516
6517         switch (load_code) {
6518         case FW_MSG_CODE_DRV_LOAD_COMMON:
6519                 rc = bnx2x_init_common(bp);
6520                 if (rc)
6521                         goto init_hw_err;
6522                 /* no break */
6523
6524         case FW_MSG_CODE_DRV_LOAD_PORT:
6525                 bp->dmae_ready = 1;
6526                 rc = bnx2x_init_port(bp);
6527                 if (rc)
6528                         goto init_hw_err;
6529                 /* no break */
6530
6531         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6532                 bp->dmae_ready = 1;
6533                 rc = bnx2x_init_func(bp);
6534                 if (rc)
6535                         goto init_hw_err;
6536                 break;
6537
6538         default:
6539                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6540                 break;
6541         }
6542
6543         if (!BP_NOMCP(bp)) {
6544                 int func = BP_FUNC(bp);
6545
6546                 bp->fw_drv_pulse_wr_seq =
6547                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6548                                  DRV_PULSE_SEQ_MASK);
6549                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6550         }
6551
6552         /* this needs to be done before gunzip end */
6553         bnx2x_zero_def_sb(bp);
6554         for_each_queue(bp, i)
6555                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6556
6557 init_hw_err:
6558         bnx2x_gunzip_end(bp);
6559
6560         return rc;
6561 }
6562
6563 static void bnx2x_free_mem(struct bnx2x *bp)
6564 {
6565
6566 #define BNX2X_PCI_FREE(x, y, size) \
6567         do { \
6568                 if (x) { \
6569                         pci_free_consistent(bp->pdev, size, x, y); \
6570                         x = NULL; \
6571                         y = 0; \
6572                 } \
6573         } while (0)
6574
6575 #define BNX2X_FREE(x) \
6576         do { \
6577                 if (x) { \
6578                         vfree(x); \
6579                         x = NULL; \
6580                 } \
6581         } while (0)
6582
6583         int i;
6584
6585         /* fastpath */
6586         /* Common */
6587         for_each_queue(bp, i) {
6588
6589                 /* status blocks */
6590                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6591                                bnx2x_fp(bp, i, status_blk_mapping),
6592                                sizeof(struct host_status_block));
6593         }
6594         /* Rx */
6595         for_each_rx_queue(bp, i) {
6596
6597                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6598                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6599                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6600                                bnx2x_fp(bp, i, rx_desc_mapping),
6601                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
6602
6603                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6604                                bnx2x_fp(bp, i, rx_comp_mapping),
6605                                sizeof(struct eth_fast_path_rx_cqe) *
6606                                NUM_RCQ_BD);
6607
6608                 /* SGE ring */
6609                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6610                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6611                                bnx2x_fp(bp, i, rx_sge_mapping),
6612                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6613         }
6614         /* Tx */
6615         for_each_tx_queue(bp, i) {
6616
6617                 /* fastpath tx rings: tx_buf tx_desc */
6618                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6619                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6620                                bnx2x_fp(bp, i, tx_desc_mapping),
6621                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6622         }
6623         /* end of fastpath */
6624
6625         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6626                        sizeof(struct host_def_status_block));
6627
6628         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6629                        sizeof(struct bnx2x_slowpath));
6630
6631 #ifdef BCM_ISCSI
6632         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6633         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6634         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6635         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6636 #endif
6637         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6638
6639 #undef BNX2X_PCI_FREE
6640 #undef BNX2X_KFREE
6641 }
6642
6643 static int bnx2x_alloc_mem(struct bnx2x *bp)
6644 {
6645
6646 #define BNX2X_PCI_ALLOC(x, y, size) \
6647         do { \
6648                 x = pci_alloc_consistent(bp->pdev, size, y); \
6649                 if (x == NULL) \
6650                         goto alloc_mem_err; \
6651                 memset(x, 0, size); \
6652         } while (0)
6653
6654 #define BNX2X_ALLOC(x, size) \
6655         do { \
6656                 x = vmalloc(size); \
6657                 if (x == NULL) \
6658                         goto alloc_mem_err; \
6659                 memset(x, 0, size); \
6660         } while (0)
6661
6662         int i;
6663
6664         /* fastpath */
6665         /* Common */
6666         for_each_queue(bp, i) {
6667                 bnx2x_fp(bp, i, bp) = bp;
6668
6669                 /* status blocks */
6670                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6671                                 &bnx2x_fp(bp, i, status_blk_mapping),
6672                                 sizeof(struct host_status_block));
6673         }
6674         /* Rx */
6675         for_each_rx_queue(bp, i) {
6676
6677                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6678                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6679                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6680                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6681                                 &bnx2x_fp(bp, i, rx_desc_mapping),
6682                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6683
6684                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6685                                 &bnx2x_fp(bp, i, rx_comp_mapping),
6686                                 sizeof(struct eth_fast_path_rx_cqe) *
6687                                 NUM_RCQ_BD);
6688
6689                 /* SGE ring */
6690                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6691                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6692                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6693                                 &bnx2x_fp(bp, i, rx_sge_mapping),
6694                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6695         }
6696         /* Tx */
6697         for_each_tx_queue(bp, i) {
6698
6699                 /* fastpath tx rings: tx_buf tx_desc */
6700                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6701                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6702                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6703                                 &bnx2x_fp(bp, i, tx_desc_mapping),
6704                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6705         }
6706         /* end of fastpath */
6707
6708         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6709                         sizeof(struct host_def_status_block));
6710
6711         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6712                         sizeof(struct bnx2x_slowpath));
6713
6714 #ifdef BCM_ISCSI
6715         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6716
6717         /* Initialize T1 */
6718         for (i = 0; i < 64*1024; i += 64) {
6719                 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6720                 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6721         }
6722
6723         /* allocate searcher T2 table
6724            we allocate 1/4 of alloc num for T2
6725           (which is not entered into the ILT) */
6726         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6727
6728         /* Initialize T2 */
6729         for (i = 0; i < 16*1024; i += 64)
6730                 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6731
6732         /* now fixup the last line in the block to point to the next block */
6733         *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6734
6735         /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6736         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6737
6738         /* QM queues (128*MAX_CONN) */
6739         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6740 #endif
6741
6742         /* Slow path ring */
6743         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6744
6745         return 0;
6746
6747 alloc_mem_err:
6748         bnx2x_free_mem(bp);
6749         return -ENOMEM;
6750
6751 #undef BNX2X_PCI_ALLOC
6752 #undef BNX2X_ALLOC
6753 }
6754
6755 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6756 {
6757         int i;
6758
6759         for_each_tx_queue(bp, i) {
6760                 struct bnx2x_fastpath *fp = &bp->fp[i];
6761
6762                 u16 bd_cons = fp->tx_bd_cons;
6763                 u16 sw_prod = fp->tx_pkt_prod;
6764                 u16 sw_cons = fp->tx_pkt_cons;
6765
6766                 while (sw_cons != sw_prod) {
6767                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6768                         sw_cons++;
6769                 }
6770         }
6771 }
6772
6773 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6774 {
6775         int i, j;
6776
6777         for_each_rx_queue(bp, j) {
6778                 struct bnx2x_fastpath *fp = &bp->fp[j];
6779
6780                 for (i = 0; i < NUM_RX_BD; i++) {
6781                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6782                         struct sk_buff *skb = rx_buf->skb;
6783
6784                         if (skb == NULL)
6785                                 continue;
6786
6787                         pci_unmap_single(bp->pdev,
6788                                          pci_unmap_addr(rx_buf, mapping),
6789                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6790
6791                         rx_buf->skb = NULL;
6792                         dev_kfree_skb(skb);
6793                 }
6794                 if (!fp->disable_tpa)
6795                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6796                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
6797                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
6798         }
6799 }
6800
6801 static void bnx2x_free_skbs(struct bnx2x *bp)
6802 {
6803         bnx2x_free_tx_skbs(bp);
6804         bnx2x_free_rx_skbs(bp);
6805 }
6806
6807 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6808 {
6809         int i, offset = 1;
6810
6811         free_irq(bp->msix_table[0].vector, bp->dev);
6812         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6813            bp->msix_table[0].vector);
6814
6815         for_each_queue(bp, i) {
6816                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
6817                    "state %x\n", i, bp->msix_table[i + offset].vector,
6818                    bnx2x_fp(bp, i, state));
6819
6820                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6821         }
6822 }
6823
6824 static void bnx2x_free_irq(struct bnx2x *bp)
6825 {
6826         if (bp->flags & USING_MSIX_FLAG) {
6827                 bnx2x_free_msix_irqs(bp);
6828                 pci_disable_msix(bp->pdev);
6829                 bp->flags &= ~USING_MSIX_FLAG;
6830
6831         } else if (bp->flags & USING_MSI_FLAG) {
6832                 free_irq(bp->pdev->irq, bp->dev);
6833                 pci_disable_msi(bp->pdev);
6834                 bp->flags &= ~USING_MSI_FLAG;
6835
6836         } else
6837                 free_irq(bp->pdev->irq, bp->dev);
6838 }
6839
6840 static int bnx2x_enable_msix(struct bnx2x *bp)
6841 {
6842         int i, rc, offset = 1;
6843         int igu_vec = 0;
6844
6845         bp->msix_table[0].entry = igu_vec;
6846         DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6847
6848         for_each_queue(bp, i) {
6849                 igu_vec = BP_L_ID(bp) + offset + i;
6850                 bp->msix_table[i + offset].entry = igu_vec;
6851                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6852                    "(fastpath #%u)\n", i + offset, igu_vec, i);
6853         }
6854
6855         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6856                              BNX2X_NUM_QUEUES(bp) + offset);
6857         if (rc) {
6858                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
6859                 return rc;
6860         }
6861
6862         bp->flags |= USING_MSIX_FLAG;
6863
6864         return 0;
6865 }
6866
6867 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6868 {
6869         int i, rc, offset = 1;
6870
6871         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6872                          bp->dev->name, bp->dev);
6873         if (rc) {
6874                 BNX2X_ERR("request sp irq failed\n");
6875                 return -EBUSY;
6876         }
6877
6878         for_each_queue(bp, i) {
6879                 struct bnx2x_fastpath *fp = &bp->fp[i];
6880
6881                 if (i < bp->num_rx_queues)
6882                         sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
6883                 else
6884                         sprintf(fp->name, "%s-tx-%d",
6885                                 bp->dev->name, i - bp->num_rx_queues);
6886
6887                 rc = request_irq(bp->msix_table[i + offset].vector,
6888                                  bnx2x_msix_fp_int, 0, fp->name, fp);
6889                 if (rc) {
6890                         BNX2X_ERR("request fp #%d irq failed  rc %d\n", i, rc);
6891                         bnx2x_free_msix_irqs(bp);
6892                         return -EBUSY;
6893                 }
6894
6895                 fp->state = BNX2X_FP_STATE_IRQ;
6896         }
6897
6898         i = BNX2X_NUM_QUEUES(bp);
6899         printk(KERN_INFO PFX "%s: using MSI-X  IRQs: sp %d  fp[%d] %d"
6900                " ... fp[%d] %d\n",
6901                bp->dev->name, bp->msix_table[0].vector,
6902                0, bp->msix_table[offset].vector,
6903                i - 1, bp->msix_table[offset + i - 1].vector);
6904
6905         return 0;
6906 }
6907
6908 static int bnx2x_enable_msi(struct bnx2x *bp)
6909 {
6910         int rc;
6911
6912         rc = pci_enable_msi(bp->pdev);
6913         if (rc) {
6914                 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6915                 return -1;
6916         }
6917         bp->flags |= USING_MSI_FLAG;
6918
6919         return 0;
6920 }
6921
6922 static int bnx2x_req_irq(struct bnx2x *bp)
6923 {
6924         unsigned long flags;
6925         int rc;
6926
6927         if (bp->flags & USING_MSI_FLAG)
6928                 flags = 0;
6929         else
6930                 flags = IRQF_SHARED;
6931
6932         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
6933                          bp->dev->name, bp->dev);
6934         if (!rc)
6935                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6936
6937         return rc;
6938 }
6939
6940 static void bnx2x_napi_enable(struct bnx2x *bp)
6941 {
6942         int i;
6943
6944         for_each_rx_queue(bp, i)
6945                 napi_enable(&bnx2x_fp(bp, i, napi));
6946 }
6947
6948 static void bnx2x_napi_disable(struct bnx2x *bp)
6949 {
6950         int i;
6951
6952         for_each_rx_queue(bp, i)
6953                 napi_disable(&bnx2x_fp(bp, i, napi));
6954 }
6955
6956 static void bnx2x_netif_start(struct bnx2x *bp)
6957 {
6958         int intr_sem;
6959
6960         intr_sem = atomic_dec_and_test(&bp->intr_sem);
6961         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
6962
6963         if (intr_sem) {
6964                 if (netif_running(bp->dev)) {
6965                         bnx2x_napi_enable(bp);
6966                         bnx2x_int_enable(bp);
6967                         if (bp->state == BNX2X_STATE_OPEN)
6968                                 netif_tx_wake_all_queues(bp->dev);
6969                 }
6970         }
6971 }
6972
6973 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6974 {
6975         bnx2x_int_disable_sync(bp, disable_hw);
6976         bnx2x_napi_disable(bp);
6977         netif_tx_disable(bp->dev);
6978         bp->dev->trans_start = jiffies; /* prevent tx timeout */
6979 }
6980
6981 /*
6982  * Init service functions
6983  */
6984
6985 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6986 {
6987         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6988         int port = BP_PORT(bp);
6989
6990         /* CAM allocation
6991          * unicasts 0-31:port0 32-63:port1
6992          * multicast 64-127:port0 128-191:port1
6993          */
6994         config->hdr.length = 2;
6995         config->hdr.offset = port ? 32 : 0;
6996         config->hdr.client_id = bp->fp->cl_id;
6997         config->hdr.reserved1 = 0;
6998
6999         /* primary MAC */
7000         config->config_table[0].cam_entry.msb_mac_addr =
7001                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
7002         config->config_table[0].cam_entry.middle_mac_addr =
7003                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
7004         config->config_table[0].cam_entry.lsb_mac_addr =
7005                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
7006         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
7007         if (set)
7008                 config->config_table[0].target_table_entry.flags = 0;
7009         else
7010                 CAM_INVALIDATE(config->config_table[0]);
7011         config->config_table[0].target_table_entry.clients_bit_vector =
7012                                                 cpu_to_le32(1 << BP_L_ID(bp));
7013         config->config_table[0].target_table_entry.vlan_id = 0;
7014
7015         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7016            (set ? "setting" : "clearing"),
7017            config->config_table[0].cam_entry.msb_mac_addr,
7018            config->config_table[0].cam_entry.middle_mac_addr,
7019            config->config_table[0].cam_entry.lsb_mac_addr);
7020
7021         /* broadcast */
7022         config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
7023         config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
7024         config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
7025         config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7026         if (set)
7027                 config->config_table[1].target_table_entry.flags =
7028                                 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7029         else
7030                 CAM_INVALIDATE(config->config_table[1]);
7031         config->config_table[1].target_table_entry.clients_bit_vector =
7032                                                 cpu_to_le32(1 << BP_L_ID(bp));
7033         config->config_table[1].target_table_entry.vlan_id = 0;
7034
7035         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7036                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7037                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7038 }
7039
7040 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
7041 {
7042         struct mac_configuration_cmd_e1h *config =
7043                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7044
7045         /* CAM allocation for E1H
7046          * unicasts: by func number
7047          * multicast: 20+FUNC*20, 20 each
7048          */
7049         config->hdr.length = 1;
7050         config->hdr.offset = BP_FUNC(bp);
7051         config->hdr.client_id = bp->fp->cl_id;
7052         config->hdr.reserved1 = 0;
7053
7054         /* primary MAC */
7055         config->config_table[0].msb_mac_addr =
7056                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
7057         config->config_table[0].middle_mac_addr =
7058                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
7059         config->config_table[0].lsb_mac_addr =
7060                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
7061         config->config_table[0].clients_bit_vector =
7062                                         cpu_to_le32(1 << BP_L_ID(bp));
7063         config->config_table[0].vlan_id = 0;
7064         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
7065         if (set)
7066                 config->config_table[0].flags = BP_PORT(bp);
7067         else
7068                 config->config_table[0].flags =
7069                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
7070
7071         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID %d\n",
7072            (set ? "setting" : "clearing"),
7073            config->config_table[0].msb_mac_addr,
7074            config->config_table[0].middle_mac_addr,
7075            config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
7076
7077         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7078                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7079                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7080 }
7081
7082 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7083                              int *state_p, int poll)
7084 {
7085         /* can take a while if any port is running */
7086         int cnt = 5000;
7087
7088         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7089            poll ? "polling" : "waiting", state, idx);
7090
7091         might_sleep();
7092         while (cnt--) {
7093                 if (poll) {
7094                         bnx2x_rx_int(bp->fp, 10);
7095                         /* if index is different from 0
7096                          * the reply for some commands will
7097                          * be on the non default queue
7098                          */
7099                         if (idx)
7100                                 bnx2x_rx_int(&bp->fp[idx], 10);
7101                 }
7102
7103                 mb(); /* state is changed by bnx2x_sp_event() */
7104                 if (*state_p == state) {
7105 #ifdef BNX2X_STOP_ON_ERROR
7106                         DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
7107 #endif
7108                         return 0;
7109                 }
7110
7111                 msleep(1);
7112
7113                 if (bp->panic)
7114                         return -EIO;
7115         }
7116
7117         /* timeout! */
7118         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7119                   poll ? "polling" : "waiting", state, idx);
7120 #ifdef BNX2X_STOP_ON_ERROR
7121         bnx2x_panic();
7122 #endif
7123
7124         return -EBUSY;
7125 }
7126
7127 static int bnx2x_setup_leading(struct bnx2x *bp)
7128 {
7129         int rc;
7130
7131         /* reset IGU state */
7132         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7133
7134         /* SETUP ramrod */
7135         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7136
7137         /* Wait for completion */
7138         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7139
7140         return rc;
7141 }
7142
7143 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7144 {
7145         struct bnx2x_fastpath *fp = &bp->fp[index];
7146
7147         /* reset IGU state */
7148         bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7149
7150         /* SETUP ramrod */
7151         fp->state = BNX2X_FP_STATE_OPENING;
7152         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7153                       fp->cl_id, 0);
7154
7155         /* Wait for completion */
7156         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7157                                  &(fp->state), 0);
7158 }
7159
7160 static int bnx2x_poll(struct napi_struct *napi, int budget);
7161
7162 static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out,
7163                                     int *num_tx_queues_out)
7164 {
7165         int _num_rx_queues = 0, _num_tx_queues = 0;
7166
7167         switch (bp->multi_mode) {
7168         case ETH_RSS_MODE_DISABLED:
7169                 _num_rx_queues = 1;
7170                 _num_tx_queues = 1;
7171                 break;
7172
7173         case ETH_RSS_MODE_REGULAR:
7174                 if (num_rx_queues)
7175                         _num_rx_queues = min_t(u32, num_rx_queues,
7176                                                BNX2X_MAX_QUEUES(bp));
7177                 else
7178                         _num_rx_queues = min_t(u32, num_online_cpus(),
7179                                                BNX2X_MAX_QUEUES(bp));
7180
7181                 if (num_tx_queues)
7182                         _num_tx_queues = min_t(u32, num_tx_queues,
7183                                                BNX2X_MAX_QUEUES(bp));
7184                 else
7185                         _num_tx_queues = min_t(u32, num_online_cpus(),
7186                                                BNX2X_MAX_QUEUES(bp));
7187
7188                 /* There must be not more Tx queues than Rx queues */
7189                 if (_num_tx_queues > _num_rx_queues) {
7190                         BNX2X_ERR("number of tx queues (%d) > "
7191                                   "number of rx queues (%d)"
7192                                   "  defaulting to %d\n",
7193                                   _num_tx_queues, _num_rx_queues,
7194                                   _num_rx_queues);
7195                         _num_tx_queues = _num_rx_queues;
7196                 }
7197                 break;
7198
7199
7200         default:
7201                 _num_rx_queues = 1;
7202                 _num_tx_queues = 1;
7203                 break;
7204         }
7205
7206         *num_rx_queues_out = _num_rx_queues;
7207         *num_tx_queues_out = _num_tx_queues;
7208 }
7209
7210 static int bnx2x_set_int_mode(struct bnx2x *bp)
7211 {
7212         int rc = 0;
7213
7214         switch (int_mode) {
7215         case INT_MODE_INTx:
7216         case INT_MODE_MSI:
7217                 bp->num_rx_queues = 1;
7218                 bp->num_tx_queues = 1;
7219                 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7220                 break;
7221
7222         case INT_MODE_MSIX:
7223         default:
7224                 /* Set interrupt mode according to bp->multi_mode value */
7225                 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues,
7226                                         &bp->num_tx_queues);
7227
7228                 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n",
7229                    bp->num_rx_queues, bp->num_tx_queues);
7230
7231                 /* if we can't use MSI-X we only need one fp,
7232                  * so try to enable MSI-X with the requested number of fp's
7233                  * and fallback to MSI or legacy INTx with one fp
7234                  */
7235                 rc = bnx2x_enable_msix(bp);
7236                 if (rc) {
7237                         /* failed to enable MSI-X */
7238                         if (bp->multi_mode)
7239                                 BNX2X_ERR("Multi requested but failed to "
7240                                           "enable MSI-X (rx %d tx %d), "
7241                                           "set number of queues to 1\n",
7242                                           bp->num_rx_queues, bp->num_tx_queues);
7243                         bp->num_rx_queues = 1;
7244                         bp->num_tx_queues = 1;
7245                 }
7246                 break;
7247         }
7248         bp->dev->real_num_tx_queues = bp->num_tx_queues;
7249         return rc;
7250 }
7251
7252
7253 /* must be called with rtnl_lock */
7254 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7255 {
7256         u32 load_code;
7257         int i, rc;
7258
7259 #ifdef BNX2X_STOP_ON_ERROR
7260         if (unlikely(bp->panic))
7261                 return -EPERM;
7262 #endif
7263
7264         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7265
7266         rc = bnx2x_set_int_mode(bp);
7267
7268         if (bnx2x_alloc_mem(bp))
7269                 return -ENOMEM;
7270
7271         for_each_rx_queue(bp, i)
7272                 bnx2x_fp(bp, i, disable_tpa) =
7273                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
7274
7275         for_each_rx_queue(bp, i)
7276                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7277                                bnx2x_poll, 128);
7278
7279         bnx2x_napi_enable(bp);
7280
7281         if (bp->flags & USING_MSIX_FLAG) {
7282                 rc = bnx2x_req_msix_irqs(bp);
7283                 if (rc) {
7284                         pci_disable_msix(bp->pdev);
7285                         goto load_error1;
7286                 }
7287         } else {
7288                 /* Fall to INTx if failed to enable MSI-X due to lack of
7289                    memory (in bnx2x_set_int_mode()) */
7290                 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7291                         bnx2x_enable_msi(bp);
7292                 bnx2x_ack_int(bp);
7293                 rc = bnx2x_req_irq(bp);
7294                 if (rc) {
7295                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
7296                         if (bp->flags & USING_MSI_FLAG)
7297                                 pci_disable_msi(bp->pdev);
7298                         goto load_error1;
7299                 }
7300                 if (bp->flags & USING_MSI_FLAG) {
7301                         bp->dev->irq = bp->pdev->irq;
7302                         printk(KERN_INFO PFX "%s: using MSI  IRQ %d\n",
7303                                bp->dev->name, bp->pdev->irq);
7304                 }
7305         }
7306
7307         /* Send LOAD_REQUEST command to MCP
7308            Returns the type of LOAD command:
7309            if it is the first port to be initialized
7310            common blocks should be initialized, otherwise - not
7311         */
7312         if (!BP_NOMCP(bp)) {
7313                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7314                 if (!load_code) {
7315                         BNX2X_ERR("MCP response failure, aborting\n");
7316                         rc = -EBUSY;
7317                         goto load_error2;
7318                 }
7319                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7320                         rc = -EBUSY; /* other port in diagnostic mode */
7321                         goto load_error2;
7322                 }
7323
7324         } else {
7325                 int port = BP_PORT(bp);
7326
7327                 DP(NETIF_MSG_IFUP, "NO MCP - load counts      %d, %d, %d\n",
7328                    load_count[0], load_count[1], load_count[2]);
7329                 load_count[0]++;
7330                 load_count[1 + port]++;
7331                 DP(NETIF_MSG_IFUP, "NO MCP - new load counts  %d, %d, %d\n",
7332                    load_count[0], load_count[1], load_count[2]);
7333                 if (load_count[0] == 1)
7334                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7335                 else if (load_count[1 + port] == 1)
7336                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7337                 else
7338                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7339         }
7340
7341         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7342             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7343                 bp->port.pmf = 1;
7344         else
7345                 bp->port.pmf = 0;
7346         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7347
7348         /* Initialize HW */
7349         rc = bnx2x_init_hw(bp, load_code);
7350         if (rc) {
7351                 BNX2X_ERR("HW init failed, aborting\n");
7352                 goto load_error2;
7353         }
7354
7355         /* Setup NIC internals and enable interrupts */
7356         bnx2x_nic_init(bp, load_code);
7357
7358         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7359             (bp->common.shmem2_base))
7360                 SHMEM2_WR(bp, dcc_support,
7361                           (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7362                            SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7363
7364         /* Send LOAD_DONE command to MCP */
7365         if (!BP_NOMCP(bp)) {
7366                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7367                 if (!load_code) {
7368                         BNX2X_ERR("MCP response failure, aborting\n");
7369                         rc = -EBUSY;
7370                         goto load_error3;
7371                 }
7372         }
7373
7374         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7375
7376         rc = bnx2x_setup_leading(bp);
7377         if (rc) {
7378                 BNX2X_ERR("Setup leading failed!\n");
7379 #ifndef BNX2X_STOP_ON_ERROR
7380                 goto load_error3;
7381 #else
7382                 bp->panic = 1;
7383                 return -EBUSY;
7384 #endif
7385         }
7386
7387         if (CHIP_IS_E1H(bp))
7388                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
7389                         DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
7390                         bp->state = BNX2X_STATE_DISABLED;
7391                 }
7392
7393         if (bp->state == BNX2X_STATE_OPEN) {
7394                 for_each_nondefault_queue(bp, i) {
7395                         rc = bnx2x_setup_multi(bp, i);
7396                         if (rc)
7397                                 goto load_error3;
7398                 }
7399
7400                 if (CHIP_IS_E1(bp))
7401                         bnx2x_set_mac_addr_e1(bp, 1);
7402                 else
7403                         bnx2x_set_mac_addr_e1h(bp, 1);
7404         }
7405
7406         if (bp->port.pmf)
7407                 bnx2x_initial_phy_init(bp, load_mode);
7408
7409         /* Start fast path */
7410         switch (load_mode) {
7411         case LOAD_NORMAL:
7412                 if (bp->state == BNX2X_STATE_OPEN) {
7413                         /* Tx queue should be only reenabled */
7414                         netif_tx_wake_all_queues(bp->dev);
7415                 }
7416                 /* Initialize the receive filter. */
7417                 bnx2x_set_rx_mode(bp->dev);
7418                 break;
7419
7420         case LOAD_OPEN:
7421                 netif_tx_start_all_queues(bp->dev);
7422                 if (bp->state != BNX2X_STATE_OPEN)
7423                         netif_tx_disable(bp->dev);
7424                 /* Initialize the receive filter. */
7425                 bnx2x_set_rx_mode(bp->dev);
7426                 break;
7427
7428         case LOAD_DIAG:
7429                 /* Initialize the receive filter. */
7430                 bnx2x_set_rx_mode(bp->dev);
7431                 bp->state = BNX2X_STATE_DIAG;
7432                 break;
7433
7434         default:
7435                 break;
7436         }
7437
7438         if (!bp->port.pmf)
7439                 bnx2x__link_status_update(bp);
7440
7441         /* start the timer */
7442         mod_timer(&bp->timer, jiffies + bp->current_interval);
7443
7444
7445         return 0;
7446
7447 load_error3:
7448         bnx2x_int_disable_sync(bp, 1);
7449         if (!BP_NOMCP(bp)) {
7450                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7451                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7452         }
7453         bp->port.pmf = 0;
7454         /* Free SKBs, SGEs, TPA pool and driver internals */
7455         bnx2x_free_skbs(bp);
7456         for_each_rx_queue(bp, i)
7457                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7458 load_error2:
7459         /* Release IRQs */
7460         bnx2x_free_irq(bp);
7461 load_error1:
7462         bnx2x_napi_disable(bp);
7463         for_each_rx_queue(bp, i)
7464                 netif_napi_del(&bnx2x_fp(bp, i, napi));
7465         bnx2x_free_mem(bp);
7466
7467         return rc;
7468 }
7469
7470 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7471 {
7472         struct bnx2x_fastpath *fp = &bp->fp[index];
7473         int rc;
7474
7475         /* halt the connection */
7476         fp->state = BNX2X_FP_STATE_HALTING;
7477         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7478
7479         /* Wait for completion */
7480         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7481                                &(fp->state), 1);
7482         if (rc) /* timeout */
7483                 return rc;
7484
7485         /* delete cfc entry */
7486         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7487
7488         /* Wait for completion */
7489         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7490                                &(fp->state), 1);
7491         return rc;
7492 }
7493
7494 static int bnx2x_stop_leading(struct bnx2x *bp)
7495 {
7496         __le16 dsb_sp_prod_idx;
7497         /* if the other port is handling traffic,
7498            this can take a lot of time */
7499         int cnt = 500;
7500         int rc;
7501
7502         might_sleep();
7503
7504         /* Send HALT ramrod */
7505         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7506         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7507
7508         /* Wait for completion */
7509         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7510                                &(bp->fp[0].state), 1);
7511         if (rc) /* timeout */
7512                 return rc;
7513
7514         dsb_sp_prod_idx = *bp->dsb_sp_prod;
7515
7516         /* Send PORT_DELETE ramrod */
7517         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7518
7519         /* Wait for completion to arrive on default status block
7520            we are going to reset the chip anyway
7521            so there is not much to do if this times out
7522          */
7523         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7524                 if (!cnt) {
7525                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7526                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7527                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
7528 #ifdef BNX2X_STOP_ON_ERROR
7529                         bnx2x_panic();
7530 #endif
7531                         rc = -EBUSY;
7532                         break;
7533                 }
7534                 cnt--;
7535                 msleep(1);
7536                 rmb(); /* Refresh the dsb_sp_prod */
7537         }
7538         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7539         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7540
7541         return rc;
7542 }
7543
7544 static void bnx2x_reset_func(struct bnx2x *bp)
7545 {
7546         int port = BP_PORT(bp);
7547         int func = BP_FUNC(bp);
7548         int base, i;
7549
7550         /* Configure IGU */
7551         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7552         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7553
7554         /* Clear ILT */
7555         base = FUNC_ILT_BASE(func);
7556         for (i = base; i < base + ILT_PER_FUNC; i++)
7557                 bnx2x_ilt_wr(bp, i, 0);
7558 }
7559
7560 static void bnx2x_reset_port(struct bnx2x *bp)
7561 {
7562         int port = BP_PORT(bp);
7563         u32 val;
7564
7565         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7566
7567         /* Do not rcv packets to BRB */
7568         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7569         /* Do not direct rcv packets that are not for MCP to the BRB */
7570         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7571                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7572
7573         /* Configure AEU */
7574         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7575
7576         msleep(100);
7577         /* Check for BRB port occupancy */
7578         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7579         if (val)
7580                 DP(NETIF_MSG_IFDOWN,
7581                    "BRB1 is not empty  %d blocks are occupied\n", val);
7582
7583         /* TODO: Close Doorbell port? */
7584 }
7585
7586 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7587 {
7588         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
7589            BP_FUNC(bp), reset_code);
7590
7591         switch (reset_code) {
7592         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7593                 bnx2x_reset_port(bp);
7594                 bnx2x_reset_func(bp);
7595                 bnx2x_reset_common(bp);
7596                 break;
7597
7598         case FW_MSG_CODE_DRV_UNLOAD_PORT:
7599                 bnx2x_reset_port(bp);
7600                 bnx2x_reset_func(bp);
7601                 break;
7602
7603         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7604                 bnx2x_reset_func(bp);
7605                 break;
7606
7607         default:
7608                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7609                 break;
7610         }
7611 }
7612
7613 /* must be called with rtnl_lock */
7614 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7615 {
7616         int port = BP_PORT(bp);
7617         u32 reset_code = 0;
7618         int i, cnt, rc;
7619
7620         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7621
7622         bp->rx_mode = BNX2X_RX_MODE_NONE;
7623         bnx2x_set_storm_rx_mode(bp);
7624
7625         bnx2x_netif_stop(bp, 1);
7626
7627         del_timer_sync(&bp->timer);
7628         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7629                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7630         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7631
7632         /* Release IRQs */
7633         bnx2x_free_irq(bp);
7634
7635         /* Wait until tx fastpath tasks complete */
7636         for_each_tx_queue(bp, i) {
7637                 struct bnx2x_fastpath *fp = &bp->fp[i];
7638
7639                 cnt = 1000;
7640                 while (bnx2x_has_tx_work_unload(fp)) {
7641
7642                         bnx2x_tx_int(fp);
7643                         if (!cnt) {
7644                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
7645                                           i);
7646 #ifdef BNX2X_STOP_ON_ERROR
7647                                 bnx2x_panic();
7648                                 return -EBUSY;
7649 #else
7650                                 break;
7651 #endif
7652                         }
7653                         cnt--;
7654                         msleep(1);
7655                 }
7656         }
7657         /* Give HW time to discard old tx messages */
7658         msleep(1);
7659
7660         if (CHIP_IS_E1(bp)) {
7661                 struct mac_configuration_cmd *config =
7662                                                 bnx2x_sp(bp, mcast_config);
7663
7664                 bnx2x_set_mac_addr_e1(bp, 0);
7665
7666                 for (i = 0; i < config->hdr.length; i++)
7667                         CAM_INVALIDATE(config->config_table[i]);
7668
7669                 config->hdr.length = i;
7670                 if (CHIP_REV_IS_SLOW(bp))
7671                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7672                 else
7673                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7674                 config->hdr.client_id = bp->fp->cl_id;
7675                 config->hdr.reserved1 = 0;
7676
7677                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7678                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7679                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7680
7681         } else { /* E1H */
7682                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7683
7684                 bnx2x_set_mac_addr_e1h(bp, 0);
7685
7686                 for (i = 0; i < MC_HASH_SIZE; i++)
7687                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7688
7689                 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7690         }
7691
7692         if (unload_mode == UNLOAD_NORMAL)
7693                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7694
7695         else if (bp->flags & NO_WOL_FLAG)
7696                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7697
7698         else if (bp->wol) {
7699                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7700                 u8 *mac_addr = bp->dev->dev_addr;
7701                 u32 val;
7702                 /* The mac address is written to entries 1-4 to
7703                    preserve entry 0 which is used by the PMF */
7704                 u8 entry = (BP_E1HVN(bp) + 1)*8;
7705
7706                 val = (mac_addr[0] << 8) | mac_addr[1];
7707                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7708
7709                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7710                       (mac_addr[4] << 8) | mac_addr[5];
7711                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7712
7713                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7714
7715         } else
7716                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7717
7718         /* Close multi and leading connections
7719            Completions for ramrods are collected in a synchronous way */
7720         for_each_nondefault_queue(bp, i)
7721                 if (bnx2x_stop_multi(bp, i))
7722                         goto unload_error;
7723
7724         rc = bnx2x_stop_leading(bp);
7725         if (rc) {
7726                 BNX2X_ERR("Stop leading failed!\n");
7727 #ifdef BNX2X_STOP_ON_ERROR
7728                 return -EBUSY;
7729 #else
7730                 goto unload_error;
7731 #endif
7732         }
7733
7734 unload_error:
7735         if (!BP_NOMCP(bp))
7736                 reset_code = bnx2x_fw_command(bp, reset_code);
7737         else {
7738                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts      %d, %d, %d\n",
7739                    load_count[0], load_count[1], load_count[2]);
7740                 load_count[0]--;
7741                 load_count[1 + port]--;
7742                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts  %d, %d, %d\n",
7743                    load_count[0], load_count[1], load_count[2]);
7744                 if (load_count[0] == 0)
7745                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7746                 else if (load_count[1 + port] == 0)
7747                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7748                 else
7749                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7750         }
7751
7752         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7753             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7754                 bnx2x__link_reset(bp);
7755
7756         /* Reset the chip */
7757         bnx2x_reset_chip(bp, reset_code);
7758
7759         /* Report UNLOAD_DONE to MCP */
7760         if (!BP_NOMCP(bp))
7761                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7762
7763         bp->port.pmf = 0;
7764
7765         /* Free SKBs, SGEs, TPA pool and driver internals */
7766         bnx2x_free_skbs(bp);
7767         for_each_rx_queue(bp, i)
7768                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7769         for_each_rx_queue(bp, i)
7770                 netif_napi_del(&bnx2x_fp(bp, i, napi));
7771         bnx2x_free_mem(bp);
7772
7773         bp->state = BNX2X_STATE_CLOSED;
7774
7775         netif_carrier_off(bp->dev);
7776
7777         return 0;
7778 }
7779
7780 static void bnx2x_reset_task(struct work_struct *work)
7781 {
7782         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7783
7784 #ifdef BNX2X_STOP_ON_ERROR
7785         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7786                   " so reset not done to allow debug dump,\n"
7787                   " you will need to reboot when done\n");
7788         return;
7789 #endif
7790
7791         rtnl_lock();
7792
7793         if (!netif_running(bp->dev))
7794                 goto reset_task_exit;
7795
7796         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7797         bnx2x_nic_load(bp, LOAD_NORMAL);
7798
7799 reset_task_exit:
7800         rtnl_unlock();
7801 }
7802
7803 /* end of nic load/unload */
7804
7805 /* ethtool_ops */
7806
7807 /*
7808  * Init service functions
7809  */
7810
7811 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7812 {
7813         switch (func) {
7814         case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7815         case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7816         case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7817         case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7818         case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7819         case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7820         case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7821         case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7822         default:
7823                 BNX2X_ERR("Unsupported function index: %d\n", func);
7824                 return (u32)(-1);
7825         }
7826 }
7827
7828 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7829 {
7830         u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7831
7832         /* Flush all outstanding writes */
7833         mmiowb();
7834
7835         /* Pretend to be function 0 */
7836         REG_WR(bp, reg, 0);
7837         /* Flush the GRC transaction (in the chip) */
7838         new_val = REG_RD(bp, reg);
7839         if (new_val != 0) {
7840                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7841                           new_val);
7842                 BUG();
7843         }
7844
7845         /* From now we are in the "like-E1" mode */
7846         bnx2x_int_disable(bp);
7847
7848         /* Flush all outstanding writes */
7849         mmiowb();
7850
7851         /* Restore the original funtion settings */
7852         REG_WR(bp, reg, orig_func);
7853         new_val = REG_RD(bp, reg);
7854         if (new_val != orig_func) {
7855                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7856                           orig_func, new_val);
7857                 BUG();
7858         }
7859 }
7860
7861 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7862 {
7863         if (CHIP_IS_E1H(bp))
7864                 bnx2x_undi_int_disable_e1h(bp, func);
7865         else
7866                 bnx2x_int_disable(bp);
7867 }
7868
7869 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7870 {
7871         u32 val;
7872
7873         /* Check if there is any driver already loaded */
7874         val = REG_RD(bp, MISC_REG_UNPREPARED);
7875         if (val == 0x1) {
7876                 /* Check if it is the UNDI driver
7877                  * UNDI driver initializes CID offset for normal bell to 0x7
7878                  */
7879                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7880                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7881                 if (val == 0x7) {
7882                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7883                         /* save our func */
7884                         int func = BP_FUNC(bp);
7885                         u32 swap_en;
7886                         u32 swap_val;
7887
7888                         /* clear the UNDI indication */
7889                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7890
7891                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
7892
7893                         /* try unload UNDI on port 0 */
7894                         bp->func = 0;
7895                         bp->fw_seq =
7896                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7897                                 DRV_MSG_SEQ_NUMBER_MASK);
7898                         reset_code = bnx2x_fw_command(bp, reset_code);
7899
7900                         /* if UNDI is loaded on the other port */
7901                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7902
7903                                 /* send "DONE" for previous unload */
7904                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7905
7906                                 /* unload UNDI on port 1 */
7907                                 bp->func = 1;
7908                                 bp->fw_seq =
7909                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7910                                         DRV_MSG_SEQ_NUMBER_MASK);
7911                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7912
7913                                 bnx2x_fw_command(bp, reset_code);
7914                         }
7915
7916                         /* now it's safe to release the lock */
7917                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7918
7919                         bnx2x_undi_int_disable(bp, func);
7920
7921                         /* close input traffic and wait for it */
7922                         /* Do not rcv packets to BRB */
7923                         REG_WR(bp,
7924                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7925                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7926                         /* Do not direct rcv packets that are not for MCP to
7927                          * the BRB */
7928                         REG_WR(bp,
7929                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7930                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7931                         /* clear AEU */
7932                         REG_WR(bp,
7933                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7934                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7935                         msleep(10);
7936
7937                         /* save NIG port swap info */
7938                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7939                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7940                         /* reset device */
7941                         REG_WR(bp,
7942                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7943                                0xd3ffffff);
7944                         REG_WR(bp,
7945                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7946                                0x1403);
7947                         /* take the NIG out of reset and restore swap values */
7948                         REG_WR(bp,
7949                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7950                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
7951                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7952                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7953
7954                         /* send unload done to the MCP */
7955                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7956
7957                         /* restore our func and fw_seq */
7958                         bp->func = func;
7959                         bp->fw_seq =
7960                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7961                                 DRV_MSG_SEQ_NUMBER_MASK);
7962
7963                 } else
7964                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7965         }
7966 }
7967
7968 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7969 {
7970         u32 val, val2, val3, val4, id;
7971         u16 pmc;
7972
7973         /* Get the chip revision id and number. */
7974         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7975         val = REG_RD(bp, MISC_REG_CHIP_NUM);
7976         id = ((val & 0xffff) << 16);
7977         val = REG_RD(bp, MISC_REG_CHIP_REV);
7978         id |= ((val & 0xf) << 12);
7979         val = REG_RD(bp, MISC_REG_CHIP_METAL);
7980         id |= ((val & 0xff) << 4);
7981         val = REG_RD(bp, MISC_REG_BOND_ID);
7982         id |= (val & 0xf);
7983         bp->common.chip_id = id;
7984         bp->link_params.chip_id = bp->common.chip_id;
7985         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7986
7987         val = (REG_RD(bp, 0x2874) & 0x55);
7988         if ((bp->common.chip_id & 0x1) ||
7989             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7990                 bp->flags |= ONE_PORT_FLAG;
7991                 BNX2X_DEV_INFO("single port device\n");
7992         }
7993
7994         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7995         bp->common.flash_size = (NVRAM_1MB_SIZE <<
7996                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
7997         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7998                        bp->common.flash_size, bp->common.flash_size);
7999
8000         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8001         bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
8002         bp->link_params.shmem_base = bp->common.shmem_base;
8003         BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
8004                        bp->common.shmem_base, bp->common.shmem2_base);
8005
8006         if (!bp->common.shmem_base ||
8007             (bp->common.shmem_base < 0xA0000) ||
8008             (bp->common.shmem_base >= 0xC0000)) {
8009                 BNX2X_DEV_INFO("MCP not active\n");
8010                 bp->flags |= NO_MCP_FLAG;
8011                 return;
8012         }
8013
8014         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8015         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8016                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8017                 BNX2X_ERR("BAD MCP validity signature\n");
8018
8019         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
8020         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
8021
8022         bp->link_params.hw_led_mode = ((bp->common.hw_config &
8023                                         SHARED_HW_CFG_LED_MODE_MASK) >>
8024                                        SHARED_HW_CFG_LED_MODE_SHIFT);
8025
8026         bp->link_params.feature_config_flags = 0;
8027         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8028         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8029                 bp->link_params.feature_config_flags |=
8030                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8031         else
8032                 bp->link_params.feature_config_flags &=
8033                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8034
8035         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8036         bp->common.bc_ver = val;
8037         BNX2X_DEV_INFO("bc_ver %X\n", val);
8038         if (val < BNX2X_BC_VER) {
8039                 /* for now only warn
8040                  * later we might need to enforce this */
8041                 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8042                           " please upgrade BC\n", BNX2X_BC_VER, val);
8043         }
8044         bp->link_params.feature_config_flags |=
8045                 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8046                 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
8047
8048         if (BP_E1HVN(bp) == 0) {
8049                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8050                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8051         } else {
8052                 /* no WOL capability for E1HVN != 0 */
8053                 bp->flags |= NO_WOL_FLAG;
8054         }
8055         BNX2X_DEV_INFO("%sWoL capable\n",
8056                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
8057
8058         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8059         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8060         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8061         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8062
8063         printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8064                val, val2, val3, val4);
8065 }
8066
8067 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8068                                                     u32 switch_cfg)
8069 {
8070         int port = BP_PORT(bp);
8071         u32 ext_phy_type;
8072
8073         switch (switch_cfg) {
8074         case SWITCH_CFG_1G:
8075                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8076
8077                 ext_phy_type =
8078                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8079                 switch (ext_phy_type) {
8080                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8081                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8082                                        ext_phy_type);
8083
8084                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8085                                                SUPPORTED_10baseT_Full |
8086                                                SUPPORTED_100baseT_Half |
8087                                                SUPPORTED_100baseT_Full |
8088                                                SUPPORTED_1000baseT_Full |
8089                                                SUPPORTED_2500baseX_Full |
8090                                                SUPPORTED_TP |
8091                                                SUPPORTED_FIBRE |
8092                                                SUPPORTED_Autoneg |
8093                                                SUPPORTED_Pause |
8094                                                SUPPORTED_Asym_Pause);
8095                         break;
8096
8097                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8098                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8099                                        ext_phy_type);
8100
8101                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8102                                                SUPPORTED_10baseT_Full |
8103                                                SUPPORTED_100baseT_Half |
8104                                                SUPPORTED_100baseT_Full |
8105                                                SUPPORTED_1000baseT_Full |
8106                                                SUPPORTED_TP |
8107                                                SUPPORTED_FIBRE |
8108                                                SUPPORTED_Autoneg |
8109                                                SUPPORTED_Pause |
8110                                                SUPPORTED_Asym_Pause);
8111                         break;
8112
8113                 default:
8114                         BNX2X_ERR("NVRAM config error. "
8115                                   "BAD SerDes ext_phy_config 0x%x\n",
8116                                   bp->link_params.ext_phy_config);
8117                         return;
8118                 }
8119
8120                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8121                                            port*0x10);
8122                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8123                 break;
8124
8125         case SWITCH_CFG_10G:
8126                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8127
8128                 ext_phy_type =
8129                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8130                 switch (ext_phy_type) {
8131                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8132                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8133                                        ext_phy_type);
8134
8135                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8136                                                SUPPORTED_10baseT_Full |
8137                                                SUPPORTED_100baseT_Half |
8138                                                SUPPORTED_100baseT_Full |
8139                                                SUPPORTED_1000baseT_Full |
8140                                                SUPPORTED_2500baseX_Full |
8141                                                SUPPORTED_10000baseT_Full |
8142                                                SUPPORTED_TP |
8143                                                SUPPORTED_FIBRE |
8144                                                SUPPORTED_Autoneg |
8145                                                SUPPORTED_Pause |
8146                                                SUPPORTED_Asym_Pause);
8147                         break;
8148
8149                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8150                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
8151                                        ext_phy_type);
8152
8153                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8154                                                SUPPORTED_1000baseT_Full |
8155                                                SUPPORTED_FIBRE |
8156                                                SUPPORTED_Autoneg |
8157                                                SUPPORTED_Pause |
8158                                                SUPPORTED_Asym_Pause);
8159                         break;
8160
8161                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8162                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
8163                                        ext_phy_type);
8164
8165                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8166                                                SUPPORTED_2500baseX_Full |
8167                                                SUPPORTED_1000baseT_Full |
8168                                                SUPPORTED_FIBRE |
8169                                                SUPPORTED_Autoneg |
8170                                                SUPPORTED_Pause |
8171                                                SUPPORTED_Asym_Pause);
8172                         break;
8173
8174                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8175                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8176                                        ext_phy_type);
8177
8178                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8179                                                SUPPORTED_FIBRE |
8180                                                SUPPORTED_Pause |
8181                                                SUPPORTED_Asym_Pause);
8182                         break;
8183
8184                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8185                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
8186                                        ext_phy_type);
8187
8188                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8189                                                SUPPORTED_1000baseT_Full |
8190                                                SUPPORTED_FIBRE |
8191                                                SUPPORTED_Pause |
8192                                                SUPPORTED_Asym_Pause);
8193                         break;
8194
8195                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8196                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
8197                                        ext_phy_type);
8198
8199                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8200                                                SUPPORTED_1000baseT_Full |
8201                                                SUPPORTED_Autoneg |
8202                                                SUPPORTED_FIBRE |
8203                                                SUPPORTED_Pause |
8204                                                SUPPORTED_Asym_Pause);
8205                         break;
8206
8207                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8208                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8209                                        ext_phy_type);
8210
8211                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8212                                                SUPPORTED_1000baseT_Full |
8213                                                SUPPORTED_Autoneg |
8214                                                SUPPORTED_FIBRE |
8215                                                SUPPORTED_Pause |
8216                                                SUPPORTED_Asym_Pause);
8217                         break;
8218
8219                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8220                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8221                                        ext_phy_type);
8222
8223                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8224                                                SUPPORTED_TP |
8225                                                SUPPORTED_Autoneg |
8226                                                SUPPORTED_Pause |
8227                                                SUPPORTED_Asym_Pause);
8228                         break;
8229
8230                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8231                         BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8232                                        ext_phy_type);
8233
8234                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8235                                                SUPPORTED_10baseT_Full |
8236                                                SUPPORTED_100baseT_Half |
8237                                                SUPPORTED_100baseT_Full |
8238                                                SUPPORTED_1000baseT_Full |
8239                                                SUPPORTED_10000baseT_Full |
8240                                                SUPPORTED_TP |
8241                                                SUPPORTED_Autoneg |
8242                                                SUPPORTED_Pause |
8243                                                SUPPORTED_Asym_Pause);
8244                         break;
8245
8246                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8247                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8248                                   bp->link_params.ext_phy_config);
8249                         break;
8250
8251                 default:
8252                         BNX2X_ERR("NVRAM config error. "
8253                                   "BAD XGXS ext_phy_config 0x%x\n",
8254                                   bp->link_params.ext_phy_config);
8255                         return;
8256                 }
8257
8258                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8259                                            port*0x18);
8260                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8261
8262                 break;
8263
8264         default:
8265                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
8266                           bp->port.link_config);
8267                 return;
8268         }
8269         bp->link_params.phy_addr = bp->port.phy_addr;
8270
8271         /* mask what we support according to speed_cap_mask */
8272         if (!(bp->link_params.speed_cap_mask &
8273                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
8274                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
8275
8276         if (!(bp->link_params.speed_cap_mask &
8277                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
8278                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
8279
8280         if (!(bp->link_params.speed_cap_mask &
8281                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
8282                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
8283
8284         if (!(bp->link_params.speed_cap_mask &
8285                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
8286                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
8287
8288         if (!(bp->link_params.speed_cap_mask &
8289                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
8290                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8291                                         SUPPORTED_1000baseT_Full);
8292
8293         if (!(bp->link_params.speed_cap_mask &
8294                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
8295                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
8296
8297         if (!(bp->link_params.speed_cap_mask &
8298                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
8299                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
8300
8301         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
8302 }
8303
8304 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8305 {
8306         bp->link_params.req_duplex = DUPLEX_FULL;
8307
8308         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
8309         case PORT_FEATURE_LINK_SPEED_AUTO:
8310                 if (bp->port.supported & SUPPORTED_Autoneg) {
8311                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8312                         bp->port.advertising = bp->port.supported;
8313                 } else {
8314                         u32 ext_phy_type =
8315                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8316
8317                         if ((ext_phy_type ==
8318                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8319                             (ext_phy_type ==
8320                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
8321                                 /* force 10G, no AN */
8322                                 bp->link_params.req_line_speed = SPEED_10000;
8323                                 bp->port.advertising =
8324                                                 (ADVERTISED_10000baseT_Full |
8325                                                  ADVERTISED_FIBRE);
8326                                 break;
8327                         }
8328                         BNX2X_ERR("NVRAM config error. "
8329                                   "Invalid link_config 0x%x"
8330                                   "  Autoneg not supported\n",
8331                                   bp->port.link_config);
8332                         return;
8333                 }
8334                 break;
8335
8336         case PORT_FEATURE_LINK_SPEED_10M_FULL:
8337                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
8338                         bp->link_params.req_line_speed = SPEED_10;
8339                         bp->port.advertising = (ADVERTISED_10baseT_Full |
8340                                                 ADVERTISED_TP);
8341                 } else {
8342                         BNX2X_ERR("NVRAM config error. "
8343                                   "Invalid link_config 0x%x"
8344                                   "  speed_cap_mask 0x%x\n",
8345                                   bp->port.link_config,
8346                                   bp->link_params.speed_cap_mask);
8347                         return;
8348                 }
8349                 break;
8350
8351         case PORT_FEATURE_LINK_SPEED_10M_HALF:
8352                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
8353                         bp->link_params.req_line_speed = SPEED_10;
8354                         bp->link_params.req_duplex = DUPLEX_HALF;
8355                         bp->port.advertising = (ADVERTISED_10baseT_Half |
8356                                                 ADVERTISED_TP);
8357                 } else {
8358                         BNX2X_ERR("NVRAM config error. "
8359                                   "Invalid link_config 0x%x"
8360                                   "  speed_cap_mask 0x%x\n",
8361                                   bp->port.link_config,
8362                                   bp->link_params.speed_cap_mask);
8363                         return;
8364                 }
8365                 break;
8366
8367         case PORT_FEATURE_LINK_SPEED_100M_FULL:
8368                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
8369                         bp->link_params.req_line_speed = SPEED_100;
8370                         bp->port.advertising = (ADVERTISED_100baseT_Full |
8371                                                 ADVERTISED_TP);
8372                 } else {
8373                         BNX2X_ERR("NVRAM config error. "
8374                                   "Invalid link_config 0x%x"
8375                                   "  speed_cap_mask 0x%x\n",
8376                                   bp->port.link_config,
8377                                   bp->link_params.speed_cap_mask);
8378                         return;
8379                 }
8380                 break;
8381
8382         case PORT_FEATURE_LINK_SPEED_100M_HALF:
8383                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
8384                         bp->link_params.req_line_speed = SPEED_100;
8385                         bp->link_params.req_duplex = DUPLEX_HALF;
8386                         bp->port.advertising = (ADVERTISED_100baseT_Half |
8387                                                 ADVERTISED_TP);
8388                 } else {
8389                         BNX2X_ERR("NVRAM config error. "
8390                                   "Invalid link_config 0x%x"
8391                                   "  speed_cap_mask 0x%x\n",
8392                                   bp->port.link_config,
8393                                   bp->link_params.speed_cap_mask);
8394                         return;
8395                 }
8396                 break;
8397
8398         case PORT_FEATURE_LINK_SPEED_1G:
8399                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
8400                         bp->link_params.req_line_speed = SPEED_1000;
8401                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
8402                                                 ADVERTISED_TP);
8403                 } else {
8404                         BNX2X_ERR("NVRAM config error. "
8405                                   "Invalid link_config 0x%x"
8406                                   "  speed_cap_mask 0x%x\n",
8407                                   bp->port.link_config,
8408                                   bp->link_params.speed_cap_mask);
8409                         return;
8410                 }
8411                 break;
8412
8413         case PORT_FEATURE_LINK_SPEED_2_5G:
8414                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
8415                         bp->link_params.req_line_speed = SPEED_2500;
8416                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
8417                                                 ADVERTISED_TP);
8418                 } else {
8419                         BNX2X_ERR("NVRAM config error. "
8420                                   "Invalid link_config 0x%x"
8421                                   "  speed_cap_mask 0x%x\n",
8422                                   bp->port.link_config,
8423                                   bp->link_params.speed_cap_mask);
8424                         return;
8425                 }
8426                 break;
8427
8428         case PORT_FEATURE_LINK_SPEED_10G_CX4:
8429         case PORT_FEATURE_LINK_SPEED_10G_KX4:
8430         case PORT_FEATURE_LINK_SPEED_10G_KR:
8431                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
8432                         bp->link_params.req_line_speed = SPEED_10000;
8433                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
8434                                                 ADVERTISED_FIBRE);
8435                 } else {
8436                         BNX2X_ERR("NVRAM config error. "
8437                                   "Invalid link_config 0x%x"
8438                                   "  speed_cap_mask 0x%x\n",
8439                                   bp->port.link_config,
8440                                   bp->link_params.speed_cap_mask);
8441                         return;
8442                 }
8443                 break;
8444
8445         default:
8446                 BNX2X_ERR("NVRAM config error. "
8447                           "BAD link speed link_config 0x%x\n",
8448                           bp->port.link_config);
8449                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8450                 bp->port.advertising = bp->port.supported;
8451                 break;
8452         }
8453
8454         bp->link_params.req_flow_ctrl = (bp->port.link_config &
8455                                          PORT_FEATURE_FLOW_CONTROL_MASK);
8456         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8457             !(bp->port.supported & SUPPORTED_Autoneg))
8458                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8459
8460         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
8461                        "  advertising 0x%x\n",
8462                        bp->link_params.req_line_speed,
8463                        bp->link_params.req_duplex,
8464                        bp->link_params.req_flow_ctrl, bp->port.advertising);
8465 }
8466
8467 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8468 {
8469         int port = BP_PORT(bp);
8470         u32 val, val2;
8471         u32 config;
8472         u16 i;
8473         u32 ext_phy_type;
8474
8475         bp->link_params.bp = bp;
8476         bp->link_params.port = port;
8477
8478         bp->link_params.lane_config =
8479                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8480         bp->link_params.ext_phy_config =
8481                 SHMEM_RD(bp,
8482                          dev_info.port_hw_config[port].external_phy_config);
8483         /* BCM8727_NOC => BCM8727 no over current */
8484         if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8485             PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8486                 bp->link_params.ext_phy_config &=
8487                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8488                 bp->link_params.ext_phy_config |=
8489                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8490                 bp->link_params.feature_config_flags |=
8491                         FEATURE_CONFIG_BCM8727_NOC;
8492         }
8493
8494         bp->link_params.speed_cap_mask =
8495                 SHMEM_RD(bp,
8496                          dev_info.port_hw_config[port].speed_capability_mask);
8497
8498         bp->port.link_config =
8499                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8500
8501         /* Get the 4 lanes xgxs config rx and tx */
8502         for (i = 0; i < 2; i++) {
8503                 val = SHMEM_RD(bp,
8504                            dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8505                 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8506                 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8507
8508                 val = SHMEM_RD(bp,
8509                            dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8510                 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8511                 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8512         }
8513
8514         /* If the device is capable of WoL, set the default state according
8515          * to the HW
8516          */
8517         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8518         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8519                    (config & PORT_FEATURE_WOL_ENABLED));
8520
8521         BNX2X_DEV_INFO("lane_config 0x%08x  ext_phy_config 0x%08x"
8522                        "  speed_cap_mask 0x%08x  link_config 0x%08x\n",
8523                        bp->link_params.lane_config,
8524                        bp->link_params.ext_phy_config,
8525                        bp->link_params.speed_cap_mask, bp->port.link_config);
8526
8527         bp->link_params.switch_cfg |= (bp->port.link_config &
8528                                        PORT_FEATURE_CONNECTED_SWITCH_MASK);
8529         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8530
8531         bnx2x_link_settings_requested(bp);
8532
8533         /*
8534          * If connected directly, work with the internal PHY, otherwise, work
8535          * with the external PHY
8536          */
8537         ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8538         if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8539                 bp->mdio.prtad = bp->link_params.phy_addr;
8540
8541         else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8542                  (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8543                 bp->mdio.prtad =
8544                         (bp->link_params.ext_phy_config &
8545                          PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
8546                                 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT;
8547
8548         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8549         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8550         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8551         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8552         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8553         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8554         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
8555         bp->dev->dev_addr[5] = (u8)(val & 0xff);
8556         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8557         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8558 }
8559
8560 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8561 {
8562         int func = BP_FUNC(bp);
8563         u32 val, val2;
8564         int rc = 0;
8565
8566         bnx2x_get_common_hwinfo(bp);
8567
8568         bp->e1hov = 0;
8569         bp->e1hmf = 0;
8570         if (CHIP_IS_E1H(bp)) {
8571                 bp->mf_config =
8572                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8573
8574                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
8575                        FUNC_MF_CFG_E1HOV_TAG_MASK);
8576                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
8577                         bp->e1hmf = 1;
8578                 BNX2X_DEV_INFO("%s function mode\n",
8579                                IS_E1HMF(bp) ? "multi" : "single");
8580
8581                 if (IS_E1HMF(bp)) {
8582                         val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8583                                                                 e1hov_tag) &
8584                                FUNC_MF_CFG_E1HOV_TAG_MASK);
8585                         if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8586                                 bp->e1hov = val;
8587                                 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8588                                                "(0x%04x)\n",
8589                                                func, bp->e1hov, bp->e1hov);
8590                         } else {
8591                                 BNX2X_ERR("!!!  No valid E1HOV for func %d,"
8592                                           "  aborting\n", func);
8593                                 rc = -EPERM;
8594                         }
8595                 } else {
8596                         if (BP_E1HVN(bp)) {
8597                                 BNX2X_ERR("!!!  VN %d in single function mode,"
8598                                           "  aborting\n", BP_E1HVN(bp));
8599                                 rc = -EPERM;
8600                         }
8601                 }
8602         }
8603
8604         if (!BP_NOMCP(bp)) {
8605                 bnx2x_get_port_hwinfo(bp);
8606
8607                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8608                               DRV_MSG_SEQ_NUMBER_MASK);
8609                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8610         }
8611
8612         if (IS_E1HMF(bp)) {
8613                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8614                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
8615                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8616                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8617                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8618                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8619                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8620                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8621                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
8622                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
8623                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8624                                ETH_ALEN);
8625                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8626                                ETH_ALEN);
8627                 }
8628
8629                 return rc;
8630         }
8631
8632         if (BP_NOMCP(bp)) {
8633                 /* only supposed to happen on emulation/FPGA */
8634                 BNX2X_ERR("warning random MAC workaround active\n");
8635                 random_ether_addr(bp->dev->dev_addr);
8636                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8637         }
8638
8639         return rc;
8640 }
8641
8642 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8643 {
8644         int func = BP_FUNC(bp);
8645         int timer_interval;
8646         int rc;
8647
8648         /* Disable interrupt handling until HW is initialized */
8649         atomic_set(&bp->intr_sem, 1);
8650         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8651
8652         mutex_init(&bp->port.phy_mutex);
8653
8654         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8655         INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8656
8657         rc = bnx2x_get_hwinfo(bp);
8658
8659         /* need to reset chip if undi was active */
8660         if (!BP_NOMCP(bp))
8661                 bnx2x_undi_unload(bp);
8662
8663         if (CHIP_REV_IS_FPGA(bp))
8664                 printk(KERN_ERR PFX "FPGA detected\n");
8665
8666         if (BP_NOMCP(bp) && (func == 0))
8667                 printk(KERN_ERR PFX
8668                        "MCP disabled, must load devices in order!\n");
8669
8670         /* Set multi queue mode */
8671         if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8672             ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8673                 printk(KERN_ERR PFX
8674                       "Multi disabled since int_mode requested is not MSI-X\n");
8675                 multi_mode = ETH_RSS_MODE_DISABLED;
8676         }
8677         bp->multi_mode = multi_mode;
8678
8679
8680         /* Set TPA flags */
8681         if (disable_tpa) {
8682                 bp->flags &= ~TPA_ENABLE_FLAG;
8683                 bp->dev->features &= ~NETIF_F_LRO;
8684         } else {
8685                 bp->flags |= TPA_ENABLE_FLAG;
8686                 bp->dev->features |= NETIF_F_LRO;
8687         }
8688
8689         if (CHIP_IS_E1(bp))
8690                 bp->dropless_fc = 0;
8691         else
8692                 bp->dropless_fc = dropless_fc;
8693
8694         bp->mrrs = mrrs;
8695
8696         bp->tx_ring_size = MAX_TX_AVAIL;
8697         bp->rx_ring_size = MAX_RX_AVAIL;
8698
8699         bp->rx_csum = 1;
8700
8701         bp->tx_ticks = 50;
8702         bp->rx_ticks = 25;
8703
8704         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8705         bp->current_interval = (poll ? poll : timer_interval);
8706
8707         init_timer(&bp->timer);
8708         bp->timer.expires = jiffies + bp->current_interval;
8709         bp->timer.data = (unsigned long) bp;
8710         bp->timer.function = bnx2x_timer;
8711
8712         return rc;
8713 }
8714
8715 /*
8716  * ethtool service functions
8717  */
8718
8719 /* All ethtool functions called with rtnl_lock */
8720
8721 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8722 {
8723         struct bnx2x *bp = netdev_priv(dev);
8724
8725         cmd->supported = bp->port.supported;
8726         cmd->advertising = bp->port.advertising;
8727
8728         if (netif_carrier_ok(dev)) {
8729                 cmd->speed = bp->link_vars.line_speed;
8730                 cmd->duplex = bp->link_vars.duplex;
8731         } else {
8732                 cmd->speed = bp->link_params.req_line_speed;
8733                 cmd->duplex = bp->link_params.req_duplex;
8734         }
8735         if (IS_E1HMF(bp)) {
8736                 u16 vn_max_rate;
8737
8738                 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8739                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8740                 if (vn_max_rate < cmd->speed)
8741                         cmd->speed = vn_max_rate;
8742         }
8743
8744         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8745                 u32 ext_phy_type =
8746                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8747
8748                 switch (ext_phy_type) {
8749                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8750                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8751                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8752                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8753                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8754                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8755                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8756                         cmd->port = PORT_FIBRE;
8757                         break;
8758
8759                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8760                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8761                         cmd->port = PORT_TP;
8762                         break;
8763
8764                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8765                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8766                                   bp->link_params.ext_phy_config);
8767                         break;
8768
8769                 default:
8770                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
8771                            bp->link_params.ext_phy_config);
8772                         break;
8773                 }
8774         } else
8775                 cmd->port = PORT_TP;
8776
8777         cmd->phy_address = bp->mdio.prtad;
8778         cmd->transceiver = XCVR_INTERNAL;
8779
8780         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8781                 cmd->autoneg = AUTONEG_ENABLE;
8782         else
8783                 cmd->autoneg = AUTONEG_DISABLE;
8784
8785         cmd->maxtxpkt = 0;
8786         cmd->maxrxpkt = 0;
8787
8788         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8789            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
8790            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
8791            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
8792            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8793            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8794            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8795
8796         return 0;
8797 }
8798
8799 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8800 {
8801         struct bnx2x *bp = netdev_priv(dev);
8802         u32 advertising;
8803
8804         if (IS_E1HMF(bp))
8805                 return 0;
8806
8807         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8808            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
8809            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
8810            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
8811            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8812            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8813            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8814
8815         if (cmd->autoneg == AUTONEG_ENABLE) {
8816                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8817                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8818                         return -EINVAL;
8819                 }
8820
8821                 /* advertise the requested speed and duplex if supported */
8822                 cmd->advertising &= bp->port.supported;
8823
8824                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8825                 bp->link_params.req_duplex = DUPLEX_FULL;
8826                 bp->port.advertising |= (ADVERTISED_Autoneg |
8827                                          cmd->advertising);
8828
8829         } else { /* forced speed */
8830                 /* advertise the requested speed and duplex if supported */
8831                 switch (cmd->speed) {
8832                 case SPEED_10:
8833                         if (cmd->duplex == DUPLEX_FULL) {
8834                                 if (!(bp->port.supported &
8835                                       SUPPORTED_10baseT_Full)) {
8836                                         DP(NETIF_MSG_LINK,
8837                                            "10M full not supported\n");
8838                                         return -EINVAL;
8839                                 }
8840
8841                                 advertising = (ADVERTISED_10baseT_Full |
8842                                                ADVERTISED_TP);
8843                         } else {
8844                                 if (!(bp->port.supported &
8845                                       SUPPORTED_10baseT_Half)) {
8846                                         DP(NETIF_MSG_LINK,
8847                                            "10M half not supported\n");
8848                                         return -EINVAL;
8849                                 }
8850
8851                                 advertising = (ADVERTISED_10baseT_Half |
8852                                                ADVERTISED_TP);
8853                         }
8854                         break;
8855
8856                 case SPEED_100:
8857                         if (cmd->duplex == DUPLEX_FULL) {
8858                                 if (!(bp->port.supported &
8859                                                 SUPPORTED_100baseT_Full)) {
8860                                         DP(NETIF_MSG_LINK,
8861                                            "100M full not supported\n");
8862                                         return -EINVAL;
8863                                 }
8864
8865                                 advertising = (ADVERTISED_100baseT_Full |
8866                                                ADVERTISED_TP);
8867                         } else {
8868                                 if (!(bp->port.supported &
8869                                                 SUPPORTED_100baseT_Half)) {
8870                                         DP(NETIF_MSG_LINK,
8871                                            "100M half not supported\n");
8872                                         return -EINVAL;
8873                                 }
8874
8875                                 advertising = (ADVERTISED_100baseT_Half |
8876                                                ADVERTISED_TP);
8877                         }
8878                         break;
8879
8880                 case SPEED_1000:
8881                         if (cmd->duplex != DUPLEX_FULL) {
8882                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
8883                                 return -EINVAL;
8884                         }
8885
8886                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
8887                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
8888                                 return -EINVAL;
8889                         }
8890
8891                         advertising = (ADVERTISED_1000baseT_Full |
8892                                        ADVERTISED_TP);
8893                         break;
8894
8895                 case SPEED_2500:
8896                         if (cmd->duplex != DUPLEX_FULL) {
8897                                 DP(NETIF_MSG_LINK,
8898                                    "2.5G half not supported\n");
8899                                 return -EINVAL;
8900                         }
8901
8902                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
8903                                 DP(NETIF_MSG_LINK,
8904                                    "2.5G full not supported\n");
8905                                 return -EINVAL;
8906                         }
8907
8908                         advertising = (ADVERTISED_2500baseX_Full |
8909                                        ADVERTISED_TP);
8910                         break;
8911
8912                 case SPEED_10000:
8913                         if (cmd->duplex != DUPLEX_FULL) {
8914                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
8915                                 return -EINVAL;
8916                         }
8917
8918                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
8919                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
8920                                 return -EINVAL;
8921                         }
8922
8923                         advertising = (ADVERTISED_10000baseT_Full |
8924                                        ADVERTISED_FIBRE);
8925                         break;
8926
8927                 default:
8928                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
8929                         return -EINVAL;
8930                 }
8931
8932                 bp->link_params.req_line_speed = cmd->speed;
8933                 bp->link_params.req_duplex = cmd->duplex;
8934                 bp->port.advertising = advertising;
8935         }
8936
8937         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
8938            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
8939            bp->link_params.req_line_speed, bp->link_params.req_duplex,
8940            bp->port.advertising);
8941
8942         if (netif_running(dev)) {
8943                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8944                 bnx2x_link_set(bp);
8945         }
8946
8947         return 0;
8948 }
8949
8950 #define PHY_FW_VER_LEN                  10
8951
8952 static void bnx2x_get_drvinfo(struct net_device *dev,
8953                               struct ethtool_drvinfo *info)
8954 {
8955         struct bnx2x *bp = netdev_priv(dev);
8956         u8 phy_fw_ver[PHY_FW_VER_LEN];
8957
8958         strcpy(info->driver, DRV_MODULE_NAME);
8959         strcpy(info->version, DRV_MODULE_VERSION);
8960
8961         phy_fw_ver[0] = '\0';
8962         if (bp->port.pmf) {
8963                 bnx2x_acquire_phy_lock(bp);
8964                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8965                                              (bp->state != BNX2X_STATE_CLOSED),
8966                                              phy_fw_ver, PHY_FW_VER_LEN);
8967                 bnx2x_release_phy_lock(bp);
8968         }
8969
8970         snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8971                  (bp->common.bc_ver & 0xff0000) >> 16,
8972                  (bp->common.bc_ver & 0xff00) >> 8,
8973                  (bp->common.bc_ver & 0xff),
8974                  ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
8975         strcpy(info->bus_info, pci_name(bp->pdev));
8976         info->n_stats = BNX2X_NUM_STATS;
8977         info->testinfo_len = BNX2X_NUM_TESTS;
8978         info->eedump_len = bp->common.flash_size;
8979         info->regdump_len = 0;
8980 }
8981
8982 #define IS_E1_ONLINE(info)      (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8983 #define IS_E1H_ONLINE(info)     (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8984
8985 static int bnx2x_get_regs_len(struct net_device *dev)
8986 {
8987         static u32 regdump_len;
8988         struct bnx2x *bp = netdev_priv(dev);
8989         int i;
8990
8991         if (regdump_len)
8992                 return regdump_len;
8993
8994         if (CHIP_IS_E1(bp)) {
8995                 for (i = 0; i < REGS_COUNT; i++)
8996                         if (IS_E1_ONLINE(reg_addrs[i].info))
8997                                 regdump_len += reg_addrs[i].size;
8998
8999                 for (i = 0; i < WREGS_COUNT_E1; i++)
9000                         if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
9001                                 regdump_len += wreg_addrs_e1[i].size *
9002                                         (1 + wreg_addrs_e1[i].read_regs_count);
9003
9004         } else { /* E1H */
9005                 for (i = 0; i < REGS_COUNT; i++)
9006                         if (IS_E1H_ONLINE(reg_addrs[i].info))
9007                                 regdump_len += reg_addrs[i].size;
9008
9009                 for (i = 0; i < WREGS_COUNT_E1H; i++)
9010                         if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
9011                                 regdump_len += wreg_addrs_e1h[i].size *
9012                                         (1 + wreg_addrs_e1h[i].read_regs_count);
9013         }
9014         regdump_len *= 4;
9015         regdump_len += sizeof(struct dump_hdr);
9016
9017         return regdump_len;
9018 }
9019
9020 static void bnx2x_get_regs(struct net_device *dev,
9021                            struct ethtool_regs *regs, void *_p)
9022 {
9023         u32 *p = _p, i, j;
9024         struct bnx2x *bp = netdev_priv(dev);
9025         struct dump_hdr dump_hdr = {0};
9026
9027         regs->version = 0;
9028         memset(p, 0, regs->len);
9029
9030         if (!netif_running(bp->dev))
9031                 return;
9032
9033         dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9034         dump_hdr.dump_sign = dump_sign_all;
9035         dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9036         dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9037         dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9038         dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9039         dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9040
9041         memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9042         p += dump_hdr.hdr_size + 1;
9043
9044         if (CHIP_IS_E1(bp)) {
9045                 for (i = 0; i < REGS_COUNT; i++)
9046                         if (IS_E1_ONLINE(reg_addrs[i].info))
9047                                 for (j = 0; j < reg_addrs[i].size; j++)
9048                                         *p++ = REG_RD(bp,
9049                                                       reg_addrs[i].addr + j*4);
9050
9051         } else { /* E1H */
9052                 for (i = 0; i < REGS_COUNT; i++)
9053                         if (IS_E1H_ONLINE(reg_addrs[i].info))
9054                                 for (j = 0; j < reg_addrs[i].size; j++)
9055                                         *p++ = REG_RD(bp,
9056                                                       reg_addrs[i].addr + j*4);
9057         }
9058 }
9059
9060 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9061 {
9062         struct bnx2x *bp = netdev_priv(dev);
9063
9064         if (bp->flags & NO_WOL_FLAG) {
9065                 wol->supported = 0;
9066                 wol->wolopts = 0;
9067         } else {
9068                 wol->supported = WAKE_MAGIC;
9069                 if (bp->wol)
9070                         wol->wolopts = WAKE_MAGIC;
9071                 else
9072                         wol->wolopts = 0;
9073         }
9074         memset(&wol->sopass, 0, sizeof(wol->sopass));
9075 }
9076
9077 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9078 {
9079         struct bnx2x *bp = netdev_priv(dev);
9080
9081         if (wol->wolopts & ~WAKE_MAGIC)
9082                 return -EINVAL;
9083
9084         if (wol->wolopts & WAKE_MAGIC) {
9085                 if (bp->flags & NO_WOL_FLAG)
9086                         return -EINVAL;
9087
9088                 bp->wol = 1;
9089         } else
9090                 bp->wol = 0;
9091
9092         return 0;
9093 }
9094
9095 static u32 bnx2x_get_msglevel(struct net_device *dev)
9096 {
9097         struct bnx2x *bp = netdev_priv(dev);
9098
9099         return bp->msglevel;
9100 }
9101
9102 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9103 {
9104         struct bnx2x *bp = netdev_priv(dev);
9105
9106         if (capable(CAP_NET_ADMIN))
9107                 bp->msglevel = level;
9108 }
9109
9110 static int bnx2x_nway_reset(struct net_device *dev)
9111 {
9112         struct bnx2x *bp = netdev_priv(dev);
9113
9114         if (!bp->port.pmf)
9115                 return 0;
9116
9117         if (netif_running(dev)) {
9118                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9119                 bnx2x_link_set(bp);
9120         }
9121
9122         return 0;
9123 }
9124
9125 static u32
9126 bnx2x_get_link(struct net_device *dev)
9127 {
9128         struct bnx2x *bp = netdev_priv(dev);
9129
9130         return bp->link_vars.link_up;
9131 }
9132
9133 static int bnx2x_get_eeprom_len(struct net_device *dev)
9134 {
9135         struct bnx2x *bp = netdev_priv(dev);
9136
9137         return bp->common.flash_size;
9138 }
9139
9140 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9141 {
9142         int port = BP_PORT(bp);
9143         int count, i;
9144         u32 val = 0;
9145
9146         /* adjust timeout for emulation/FPGA */
9147         count = NVRAM_TIMEOUT_COUNT;
9148         if (CHIP_REV_IS_SLOW(bp))
9149                 count *= 100;
9150
9151         /* request access to nvram interface */
9152         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9153                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9154
9155         for (i = 0; i < count*10; i++) {
9156                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9157                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9158                         break;
9159
9160                 udelay(5);
9161         }
9162
9163         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
9164                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
9165                 return -EBUSY;
9166         }
9167
9168         return 0;
9169 }
9170
9171 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9172 {
9173         int port = BP_PORT(bp);
9174         int count, i;
9175         u32 val = 0;
9176
9177         /* adjust timeout for emulation/FPGA */
9178         count = NVRAM_TIMEOUT_COUNT;
9179         if (CHIP_REV_IS_SLOW(bp))
9180                 count *= 100;
9181
9182         /* relinquish nvram interface */
9183         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9184                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9185
9186         for (i = 0; i < count*10; i++) {
9187                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9188                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9189                         break;
9190
9191                 udelay(5);
9192         }
9193
9194         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
9195                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
9196                 return -EBUSY;
9197         }
9198
9199         return 0;
9200 }
9201
9202 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9203 {
9204         u32 val;
9205
9206         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9207
9208         /* enable both bits, even on read */
9209         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9210                (val | MCPR_NVM_ACCESS_ENABLE_EN |
9211                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
9212 }
9213
9214 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9215 {
9216         u32 val;
9217
9218         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9219
9220         /* disable both bits, even after read */
9221         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9222                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9223                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9224 }
9225
9226 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
9227                                   u32 cmd_flags)
9228 {
9229         int count, i, rc;
9230         u32 val;
9231
9232         /* build the command word */
9233         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9234
9235         /* need to clear DONE bit separately */
9236         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9237
9238         /* address of the NVRAM to read from */
9239         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9240                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9241
9242         /* issue a read command */
9243         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9244
9245         /* adjust timeout for emulation/FPGA */
9246         count = NVRAM_TIMEOUT_COUNT;
9247         if (CHIP_REV_IS_SLOW(bp))
9248                 count *= 100;
9249
9250         /* wait for completion */
9251         *ret_val = 0;
9252         rc = -EBUSY;
9253         for (i = 0; i < count; i++) {
9254                 udelay(5);
9255                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9256
9257                 if (val & MCPR_NVM_COMMAND_DONE) {
9258                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
9259                         /* we read nvram data in cpu order
9260                          * but ethtool sees it as an array of bytes
9261                          * converting to big-endian will do the work */
9262                         *ret_val = cpu_to_be32(val);
9263                         rc = 0;
9264                         break;
9265                 }
9266         }
9267
9268         return rc;
9269 }
9270
9271 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9272                             int buf_size)
9273 {
9274         int rc;
9275         u32 cmd_flags;
9276         __be32 val;
9277
9278         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9279                 DP(BNX2X_MSG_NVM,
9280                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
9281                    offset, buf_size);
9282                 return -EINVAL;
9283         }
9284
9285         if (offset + buf_size > bp->common.flash_size) {
9286                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9287                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9288                    offset, buf_size, bp->common.flash_size);
9289                 return -EINVAL;
9290         }
9291
9292         /* request access to nvram interface */
9293         rc = bnx2x_acquire_nvram_lock(bp);
9294         if (rc)
9295                 return rc;
9296
9297         /* enable access to nvram interface */
9298         bnx2x_enable_nvram_access(bp);
9299
9300         /* read the first word(s) */
9301         cmd_flags = MCPR_NVM_COMMAND_FIRST;
9302         while ((buf_size > sizeof(u32)) && (rc == 0)) {
9303                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9304                 memcpy(ret_buf, &val, 4);
9305
9306                 /* advance to the next dword */
9307                 offset += sizeof(u32);
9308                 ret_buf += sizeof(u32);
9309                 buf_size -= sizeof(u32);
9310                 cmd_flags = 0;
9311         }
9312
9313         if (rc == 0) {
9314                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9315                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9316                 memcpy(ret_buf, &val, 4);
9317         }
9318
9319         /* disable access to nvram interface */
9320         bnx2x_disable_nvram_access(bp);
9321         bnx2x_release_nvram_lock(bp);
9322
9323         return rc;
9324 }
9325
9326 static int bnx2x_get_eeprom(struct net_device *dev,
9327                             struct ethtool_eeprom *eeprom, u8 *eebuf)
9328 {
9329         struct bnx2x *bp = netdev_priv(dev);
9330         int rc;
9331
9332         if (!netif_running(dev))
9333                 return -EAGAIN;
9334
9335         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9336            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
9337            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9338            eeprom->len, eeprom->len);
9339
9340         /* parameters already validated in ethtool_get_eeprom */
9341
9342         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9343
9344         return rc;
9345 }
9346
9347 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9348                                    u32 cmd_flags)
9349 {
9350         int count, i, rc;
9351
9352         /* build the command word */
9353         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9354
9355         /* need to clear DONE bit separately */
9356         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9357
9358         /* write the data */
9359         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9360
9361         /* address of the NVRAM to write to */
9362         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9363                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9364
9365         /* issue the write command */
9366         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9367
9368         /* adjust timeout for emulation/FPGA */
9369         count = NVRAM_TIMEOUT_COUNT;
9370         if (CHIP_REV_IS_SLOW(bp))
9371                 count *= 100;
9372
9373         /* wait for completion */
9374         rc = -EBUSY;
9375         for (i = 0; i < count; i++) {
9376                 udelay(5);
9377                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9378                 if (val & MCPR_NVM_COMMAND_DONE) {
9379                         rc = 0;
9380                         break;
9381                 }
9382         }
9383
9384         return rc;
9385 }
9386
9387 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
9388
9389 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9390                               int buf_size)
9391 {
9392         int rc;
9393         u32 cmd_flags;
9394         u32 align_offset;
9395         __be32 val;
9396
9397         if (offset + buf_size > bp->common.flash_size) {
9398                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9399                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9400                    offset, buf_size, bp->common.flash_size);
9401                 return -EINVAL;
9402         }
9403
9404         /* request access to nvram interface */
9405         rc = bnx2x_acquire_nvram_lock(bp);
9406         if (rc)
9407                 return rc;
9408
9409         /* enable access to nvram interface */
9410         bnx2x_enable_nvram_access(bp);
9411
9412         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9413         align_offset = (offset & ~0x03);
9414         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9415
9416         if (rc == 0) {
9417                 val &= ~(0xff << BYTE_OFFSET(offset));
9418                 val |= (*data_buf << BYTE_OFFSET(offset));
9419
9420                 /* nvram data is returned as an array of bytes
9421                  * convert it back to cpu order */
9422                 val = be32_to_cpu(val);
9423
9424                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9425                                              cmd_flags);
9426         }
9427
9428         /* disable access to nvram interface */
9429         bnx2x_disable_nvram_access(bp);
9430         bnx2x_release_nvram_lock(bp);
9431
9432         return rc;
9433 }
9434
9435 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9436                              int buf_size)
9437 {
9438         int rc;
9439         u32 cmd_flags;
9440         u32 val;
9441         u32 written_so_far;
9442
9443         if (buf_size == 1)      /* ethtool */
9444                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
9445
9446         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9447                 DP(BNX2X_MSG_NVM,
9448                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
9449                    offset, buf_size);
9450                 return -EINVAL;
9451         }
9452
9453         if (offset + buf_size > bp->common.flash_size) {
9454                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9455                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9456                    offset, buf_size, bp->common.flash_size);
9457                 return -EINVAL;
9458         }
9459
9460         /* request access to nvram interface */
9461         rc = bnx2x_acquire_nvram_lock(bp);
9462         if (rc)
9463                 return rc;
9464
9465         /* enable access to nvram interface */
9466         bnx2x_enable_nvram_access(bp);
9467
9468         written_so_far = 0;
9469         cmd_flags = MCPR_NVM_COMMAND_FIRST;
9470         while ((written_so_far < buf_size) && (rc == 0)) {
9471                 if (written_so_far == (buf_size - sizeof(u32)))
9472                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
9473                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9474                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
9475                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9476                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9477
9478                 memcpy(&val, data_buf, 4);
9479
9480                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9481
9482                 /* advance to the next dword */
9483                 offset += sizeof(u32);
9484                 data_buf += sizeof(u32);
9485                 written_so_far += sizeof(u32);
9486                 cmd_flags = 0;
9487         }
9488
9489         /* disable access to nvram interface */
9490         bnx2x_disable_nvram_access(bp);
9491         bnx2x_release_nvram_lock(bp);
9492
9493         return rc;
9494 }
9495
9496 static int bnx2x_set_eeprom(struct net_device *dev,
9497                             struct ethtool_eeprom *eeprom, u8 *eebuf)
9498 {
9499         struct bnx2x *bp = netdev_priv(dev);
9500         int port = BP_PORT(bp);
9501         int rc = 0;
9502
9503         if (!netif_running(dev))
9504                 return -EAGAIN;
9505
9506         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9507            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
9508            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9509            eeprom->len, eeprom->len);
9510
9511         /* parameters already validated in ethtool_set_eeprom */
9512
9513         /* PHY eeprom can be accessed only by the PMF */
9514         if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9515             !bp->port.pmf)
9516                 return -EINVAL;
9517
9518         if (eeprom->magic == 0x50485950) {
9519                 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9520                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9521
9522                 bnx2x_acquire_phy_lock(bp);
9523                 rc |= bnx2x_link_reset(&bp->link_params,
9524                                        &bp->link_vars, 0);
9525                 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9526                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9527                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9528                                        MISC_REGISTERS_GPIO_HIGH, port);
9529                 bnx2x_release_phy_lock(bp);
9530                 bnx2x_link_report(bp);
9531
9532         } else if (eeprom->magic == 0x50485952) {
9533                 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
9534                 if ((bp->state == BNX2X_STATE_OPEN) ||
9535                     (bp->state == BNX2X_STATE_DISABLED)) {
9536                         bnx2x_acquire_phy_lock(bp);
9537                         rc |= bnx2x_link_reset(&bp->link_params,
9538                                                &bp->link_vars, 1);
9539
9540                         rc |= bnx2x_phy_init(&bp->link_params,
9541                                              &bp->link_vars);
9542                         bnx2x_release_phy_lock(bp);
9543                         bnx2x_calc_fc_adv(bp);
9544                 }
9545         } else if (eeprom->magic == 0x53985943) {
9546                 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9547                 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9548                                        PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9549                         u8 ext_phy_addr =
9550                                 (bp->link_params.ext_phy_config &
9551                                  PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
9552                                         PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT;
9553
9554                         /* DSP Remove Download Mode */
9555                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9556                                        MISC_REGISTERS_GPIO_LOW, port);
9557
9558                         bnx2x_acquire_phy_lock(bp);
9559
9560                         bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9561
9562                         /* wait 0.5 sec to allow it to run */
9563                         msleep(500);
9564                         bnx2x_ext_phy_hw_reset(bp, port);
9565                         msleep(500);
9566                         bnx2x_release_phy_lock(bp);
9567                 }
9568         } else
9569                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
9570
9571         return rc;
9572 }
9573
9574 static int bnx2x_get_coalesce(struct net_device *dev,
9575                               struct ethtool_coalesce *coal)
9576 {
9577         struct bnx2x *bp = netdev_priv(dev);
9578
9579         memset(coal, 0, sizeof(struct ethtool_coalesce));
9580
9581         coal->rx_coalesce_usecs = bp->rx_ticks;
9582         coal->tx_coalesce_usecs = bp->tx_ticks;
9583
9584         return 0;
9585 }
9586
9587 #define BNX2X_MAX_COALES_TOUT  (0xf0*12) /* Maximal coalescing timeout in us */
9588 static int bnx2x_set_coalesce(struct net_device *dev,
9589                               struct ethtool_coalesce *coal)
9590 {
9591         struct bnx2x *bp = netdev_priv(dev);
9592
9593         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
9594         if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9595                 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
9596
9597         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
9598         if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9599                 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
9600
9601         if (netif_running(dev))
9602                 bnx2x_update_coalesce(bp);
9603
9604         return 0;
9605 }
9606
9607 static void bnx2x_get_ringparam(struct net_device *dev,
9608                                 struct ethtool_ringparam *ering)
9609 {
9610         struct bnx2x *bp = netdev_priv(dev);
9611
9612         ering->rx_max_pending = MAX_RX_AVAIL;
9613         ering->rx_mini_max_pending = 0;
9614         ering->rx_jumbo_max_pending = 0;
9615
9616         ering->rx_pending = bp->rx_ring_size;
9617         ering->rx_mini_pending = 0;
9618         ering->rx_jumbo_pending = 0;
9619
9620         ering->tx_max_pending = MAX_TX_AVAIL;
9621         ering->tx_pending = bp->tx_ring_size;
9622 }
9623
9624 static int bnx2x_set_ringparam(struct net_device *dev,
9625                                struct ethtool_ringparam *ering)
9626 {
9627         struct bnx2x *bp = netdev_priv(dev);
9628         int rc = 0;
9629
9630         if ((ering->rx_pending > MAX_RX_AVAIL) ||
9631             (ering->tx_pending > MAX_TX_AVAIL) ||
9632             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9633                 return -EINVAL;
9634
9635         bp->rx_ring_size = ering->rx_pending;
9636         bp->tx_ring_size = ering->tx_pending;
9637
9638         if (netif_running(dev)) {
9639                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9640                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9641         }
9642
9643         return rc;
9644 }
9645
9646 static void bnx2x_get_pauseparam(struct net_device *dev,
9647                                  struct ethtool_pauseparam *epause)
9648 {
9649         struct bnx2x *bp = netdev_priv(dev);
9650
9651         epause->autoneg = (bp->link_params.req_flow_ctrl ==
9652                            BNX2X_FLOW_CTRL_AUTO) &&
9653                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9654
9655         epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9656                             BNX2X_FLOW_CTRL_RX);
9657         epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9658                             BNX2X_FLOW_CTRL_TX);
9659
9660         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9661            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9662            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9663 }
9664
9665 static int bnx2x_set_pauseparam(struct net_device *dev,
9666                                 struct ethtool_pauseparam *epause)
9667 {
9668         struct bnx2x *bp = netdev_priv(dev);
9669
9670         if (IS_E1HMF(bp))
9671                 return 0;
9672
9673         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9674            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9675            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9676
9677         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9678
9679         if (epause->rx_pause)
9680                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9681
9682         if (epause->tx_pause)
9683                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9684
9685         if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9686                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9687
9688         if (epause->autoneg) {
9689                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9690                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
9691                         return -EINVAL;
9692                 }
9693
9694                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9695                         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9696         }
9697
9698         DP(NETIF_MSG_LINK,
9699            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9700
9701         if (netif_running(dev)) {
9702                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9703                 bnx2x_link_set(bp);
9704         }
9705
9706         return 0;
9707 }
9708
9709 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9710 {
9711         struct bnx2x *bp = netdev_priv(dev);
9712         int changed = 0;
9713         int rc = 0;
9714
9715         /* TPA requires Rx CSUM offloading */
9716         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9717                 if (!(dev->features & NETIF_F_LRO)) {
9718                         dev->features |= NETIF_F_LRO;
9719                         bp->flags |= TPA_ENABLE_FLAG;
9720                         changed = 1;
9721                 }
9722
9723         } else if (dev->features & NETIF_F_LRO) {
9724                 dev->features &= ~NETIF_F_LRO;
9725                 bp->flags &= ~TPA_ENABLE_FLAG;
9726                 changed = 1;
9727         }
9728
9729         if (changed && netif_running(dev)) {
9730                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9731                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9732         }
9733
9734         return rc;
9735 }
9736
9737 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9738 {
9739         struct bnx2x *bp = netdev_priv(dev);
9740
9741         return bp->rx_csum;
9742 }
9743
9744 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9745 {
9746         struct bnx2x *bp = netdev_priv(dev);
9747         int rc = 0;
9748
9749         bp->rx_csum = data;
9750
9751         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9752            TPA'ed packets will be discarded due to wrong TCP CSUM */
9753         if (!data) {
9754                 u32 flags = ethtool_op_get_flags(dev);
9755
9756                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9757         }
9758
9759         return rc;
9760 }
9761
9762 static int bnx2x_set_tso(struct net_device *dev, u32 data)
9763 {
9764         if (data) {
9765                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9766                 dev->features |= NETIF_F_TSO6;
9767         } else {
9768                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
9769                 dev->features &= ~NETIF_F_TSO6;
9770         }
9771
9772         return 0;
9773 }
9774
9775 static const struct {
9776         char string[ETH_GSTRING_LEN];
9777 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
9778         { "register_test (offline)" },
9779         { "memory_test (offline)" },
9780         { "loopback_test (offline)" },
9781         { "nvram_test (online)" },
9782         { "interrupt_test (online)" },
9783         { "link_test (online)" },
9784         { "idle check (online)" }
9785 };
9786
9787 static int bnx2x_self_test_count(struct net_device *dev)
9788 {
9789         return BNX2X_NUM_TESTS;
9790 }
9791
9792 static int bnx2x_test_registers(struct bnx2x *bp)
9793 {
9794         int idx, i, rc = -ENODEV;
9795         u32 wr_val = 0;
9796         int port = BP_PORT(bp);
9797         static const struct {
9798                 u32  offset0;
9799                 u32  offset1;
9800                 u32  mask;
9801         } reg_tbl[] = {
9802 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
9803                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
9804                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
9805                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
9806                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
9807                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
9808                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
9809                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
9810                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
9811                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
9812 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
9813                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
9814                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
9815                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
9816                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
9817                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9818                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
9819                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
9820                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
9821                 { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
9822 /* 20 */        { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
9823                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
9824                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
9825                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
9826                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
9827                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
9828                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
9829                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
9830                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
9831                 { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
9832 /* 30 */        { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
9833                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
9834                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
9835                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9836                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
9837                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9838                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
9839
9840                 { 0xffffffff, 0, 0x00000000 }
9841         };
9842
9843         if (!netif_running(bp->dev))
9844                 return rc;
9845
9846         /* Repeat the test twice:
9847            First by writing 0x00000000, second by writing 0xffffffff */
9848         for (idx = 0; idx < 2; idx++) {
9849
9850                 switch (idx) {
9851                 case 0:
9852                         wr_val = 0;
9853                         break;
9854                 case 1:
9855                         wr_val = 0xffffffff;
9856                         break;
9857                 }
9858
9859                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9860                         u32 offset, mask, save_val, val;
9861
9862                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9863                         mask = reg_tbl[i].mask;
9864
9865                         save_val = REG_RD(bp, offset);
9866
9867                         REG_WR(bp, offset, wr_val);
9868                         val = REG_RD(bp, offset);
9869
9870                         /* Restore the original register's value */
9871                         REG_WR(bp, offset, save_val);
9872
9873                         /* verify that value is as expected value */
9874                         if ((val & mask) != (wr_val & mask))
9875                                 goto test_reg_exit;
9876                 }
9877         }
9878
9879         rc = 0;
9880
9881 test_reg_exit:
9882         return rc;
9883 }
9884
9885 static int bnx2x_test_memory(struct bnx2x *bp)
9886 {
9887         int i, j, rc = -ENODEV;
9888         u32 val;
9889         static const struct {
9890                 u32 offset;
9891                 int size;
9892         } mem_tbl[] = {
9893                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
9894                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9895                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
9896                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
9897                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
9898                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
9899                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
9900
9901                 { 0xffffffff, 0 }
9902         };
9903         static const struct {
9904                 char *name;
9905                 u32 offset;
9906                 u32 e1_mask;
9907                 u32 e1h_mask;
9908         } prty_tbl[] = {
9909                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
9910                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
9911                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
9912                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
9913                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
9914                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
9915
9916                 { NULL, 0xffffffff, 0, 0 }
9917         };
9918
9919         if (!netif_running(bp->dev))
9920                 return rc;
9921
9922         /* Go through all the memories */
9923         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9924                 for (j = 0; j < mem_tbl[i].size; j++)
9925                         REG_RD(bp, mem_tbl[i].offset + j*4);
9926
9927         /* Check the parity status */
9928         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9929                 val = REG_RD(bp, prty_tbl[i].offset);
9930                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9931                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
9932                         DP(NETIF_MSG_HW,
9933                            "%s is 0x%x\n", prty_tbl[i].name, val);
9934                         goto test_mem_exit;
9935                 }
9936         }
9937
9938         rc = 0;
9939
9940 test_mem_exit:
9941         return rc;
9942 }
9943
9944 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9945 {
9946         int cnt = 1000;
9947
9948         if (link_up)
9949                 while (bnx2x_link_test(bp) && cnt--)
9950                         msleep(10);
9951 }
9952
9953 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9954 {
9955         unsigned int pkt_size, num_pkts, i;
9956         struct sk_buff *skb;
9957         unsigned char *packet;
9958         struct bnx2x_fastpath *fp_rx = &bp->fp[0];
9959         struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues];
9960         u16 tx_start_idx, tx_idx;
9961         u16 rx_start_idx, rx_idx;
9962         u16 pkt_prod, bd_prod;
9963         struct sw_tx_bd *tx_buf;
9964         struct eth_tx_start_bd *tx_start_bd;
9965         struct eth_tx_parse_bd *pbd = NULL;
9966         dma_addr_t mapping;
9967         union eth_rx_cqe *cqe;
9968         u8 cqe_fp_flags;
9969         struct sw_rx_bd *rx_buf;
9970         u16 len;
9971         int rc = -ENODEV;
9972
9973         /* check the loopback mode */
9974         switch (loopback_mode) {
9975         case BNX2X_PHY_LOOPBACK:
9976                 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9977                         return -EINVAL;
9978                 break;
9979         case BNX2X_MAC_LOOPBACK:
9980                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
9981                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
9982                 break;
9983         default:
9984                 return -EINVAL;
9985         }
9986
9987         /* prepare the loopback packet */
9988         pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9989                      bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
9990         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9991         if (!skb) {
9992                 rc = -ENOMEM;
9993                 goto test_loopback_exit;
9994         }
9995         packet = skb_put(skb, pkt_size);
9996         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9997         memset(packet + ETH_ALEN, 0, ETH_ALEN);
9998         memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
9999         for (i = ETH_HLEN; i < pkt_size; i++)
10000                 packet[i] = (unsigned char) (i & 0xff);
10001
10002         /* send the loopback packet */
10003         num_pkts = 0;
10004         tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10005         rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10006
10007         pkt_prod = fp_tx->tx_pkt_prod++;
10008         tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10009         tx_buf->first_bd = fp_tx->tx_bd_prod;
10010         tx_buf->skb = skb;
10011         tx_buf->flags = 0;
10012
10013         bd_prod = TX_BD(fp_tx->tx_bd_prod);
10014         tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
10015         mapping = pci_map_single(bp->pdev, skb->data,
10016                                  skb_headlen(skb), PCI_DMA_TODEVICE);
10017         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10018         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10019         tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10020         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10021         tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10022         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10023         tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10024                                 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10025
10026         /* turn on parsing and get a BD */
10027         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10028         pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10029
10030         memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10031
10032         wmb();
10033
10034         fp_tx->tx_db.data.prod += 2;
10035         barrier();
10036         DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw);
10037
10038         mmiowb();
10039
10040         num_pkts++;
10041         fp_tx->tx_bd_prod += 2; /* start + pbd */
10042         bp->dev->trans_start = jiffies;
10043
10044         udelay(100);
10045
10046         tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10047         if (tx_idx != tx_start_idx + num_pkts)
10048                 goto test_loopback_exit;
10049
10050         rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10051         if (rx_idx != rx_start_idx + num_pkts)
10052                 goto test_loopback_exit;
10053
10054         cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
10055         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10056         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10057                 goto test_loopback_rx_exit;
10058
10059         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10060         if (len != pkt_size)
10061                 goto test_loopback_rx_exit;
10062
10063         rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
10064         skb = rx_buf->skb;
10065         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10066         for (i = ETH_HLEN; i < pkt_size; i++)
10067                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10068                         goto test_loopback_rx_exit;
10069
10070         rc = 0;
10071
10072 test_loopback_rx_exit:
10073
10074         fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10075         fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10076         fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10077         fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
10078
10079         /* Update producers */
10080         bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10081                              fp_rx->rx_sge_prod);
10082
10083 test_loopback_exit:
10084         bp->link_params.loopback_mode = LOOPBACK_NONE;
10085
10086         return rc;
10087 }
10088
10089 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10090 {
10091         int rc = 0, res;
10092
10093         if (!netif_running(bp->dev))
10094                 return BNX2X_LOOPBACK_FAILED;
10095
10096         bnx2x_netif_stop(bp, 1);
10097         bnx2x_acquire_phy_lock(bp);
10098
10099         res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10100         if (res) {
10101                 DP(NETIF_MSG_PROBE, "  PHY loopback failed  (res %d)\n", res);
10102                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
10103         }
10104
10105         res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10106         if (res) {
10107                 DP(NETIF_MSG_PROBE, "  MAC loopback failed  (res %d)\n", res);
10108                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
10109         }
10110
10111         bnx2x_release_phy_lock(bp);
10112         bnx2x_netif_start(bp);
10113
10114         return rc;
10115 }
10116
10117 #define CRC32_RESIDUAL                  0xdebb20e3
10118
10119 static int bnx2x_test_nvram(struct bnx2x *bp)
10120 {
10121         static const struct {
10122                 int offset;
10123                 int size;
10124         } nvram_tbl[] = {
10125                 {     0,  0x14 }, /* bootstrap */
10126                 {  0x14,  0xec }, /* dir */
10127                 { 0x100, 0x350 }, /* manuf_info */
10128                 { 0x450,  0xf0 }, /* feature_info */
10129                 { 0x640,  0x64 }, /* upgrade_key_info */
10130                 { 0x6a4,  0x64 },
10131                 { 0x708,  0x70 }, /* manuf_key_info */
10132                 { 0x778,  0x70 },
10133                 {     0,     0 }
10134         };
10135         __be32 buf[0x350 / 4];
10136         u8 *data = (u8 *)buf;
10137         int i, rc;
10138         u32 magic, csum;
10139
10140         rc = bnx2x_nvram_read(bp, 0, data, 4);
10141         if (rc) {
10142                 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
10143                 goto test_nvram_exit;
10144         }
10145
10146         magic = be32_to_cpu(buf[0]);
10147         if (magic != 0x669955aa) {
10148                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10149                 rc = -ENODEV;
10150                 goto test_nvram_exit;
10151         }
10152
10153         for (i = 0; nvram_tbl[i].size; i++) {
10154
10155                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10156                                       nvram_tbl[i].size);
10157                 if (rc) {
10158                         DP(NETIF_MSG_PROBE,
10159                            "nvram_tbl[%d] read data (rc %d)\n", i, rc);
10160                         goto test_nvram_exit;
10161                 }
10162
10163                 csum = ether_crc_le(nvram_tbl[i].size, data);
10164                 if (csum != CRC32_RESIDUAL) {
10165                         DP(NETIF_MSG_PROBE,
10166                            "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
10167                         rc = -ENODEV;
10168                         goto test_nvram_exit;
10169                 }
10170         }
10171
10172 test_nvram_exit:
10173         return rc;
10174 }
10175
10176 static int bnx2x_test_intr(struct bnx2x *bp)
10177 {
10178         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10179         int i, rc;
10180
10181         if (!netif_running(bp->dev))
10182                 return -ENODEV;
10183
10184         config->hdr.length = 0;
10185         if (CHIP_IS_E1(bp))
10186                 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10187         else
10188                 config->hdr.offset = BP_FUNC(bp);
10189         config->hdr.client_id = bp->fp->cl_id;
10190         config->hdr.reserved1 = 0;
10191
10192         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10193                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10194                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10195         if (rc == 0) {
10196                 bp->set_mac_pending++;
10197                 for (i = 0; i < 10; i++) {
10198                         if (!bp->set_mac_pending)
10199                                 break;
10200                         msleep_interruptible(10);
10201                 }
10202                 if (i == 10)
10203                         rc = -ENODEV;
10204         }
10205
10206         return rc;
10207 }
10208
10209 static void bnx2x_self_test(struct net_device *dev,
10210                             struct ethtool_test *etest, u64 *buf)
10211 {
10212         struct bnx2x *bp = netdev_priv(dev);
10213
10214         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10215
10216         if (!netif_running(dev))
10217                 return;
10218
10219         /* offline tests are not supported in MF mode */
10220         if (IS_E1HMF(bp))
10221                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10222
10223         if (etest->flags & ETH_TEST_FL_OFFLINE) {
10224                 int port = BP_PORT(bp);
10225                 u32 val;
10226                 u8 link_up;
10227
10228                 /* save current value of input enable for TX port IF */
10229                 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10230                 /* disable input for TX port IF */
10231                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10232
10233                 link_up = bp->link_vars.link_up;
10234                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10235                 bnx2x_nic_load(bp, LOAD_DIAG);
10236                 /* wait until link state is restored */
10237                 bnx2x_wait_for_link(bp, link_up);
10238
10239                 if (bnx2x_test_registers(bp) != 0) {
10240                         buf[0] = 1;
10241                         etest->flags |= ETH_TEST_FL_FAILED;
10242                 }
10243                 if (bnx2x_test_memory(bp) != 0) {
10244                         buf[1] = 1;
10245                         etest->flags |= ETH_TEST_FL_FAILED;
10246                 }
10247                 buf[2] = bnx2x_test_loopback(bp, link_up);
10248                 if (buf[2] != 0)
10249                         etest->flags |= ETH_TEST_FL_FAILED;
10250
10251                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10252
10253                 /* restore input for TX port IF */
10254                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10255
10256                 bnx2x_nic_load(bp, LOAD_NORMAL);
10257                 /* wait until link state is restored */
10258                 bnx2x_wait_for_link(bp, link_up);
10259         }
10260         if (bnx2x_test_nvram(bp) != 0) {
10261                 buf[3] = 1;
10262                 etest->flags |= ETH_TEST_FL_FAILED;
10263         }
10264         if (bnx2x_test_intr(bp) != 0) {
10265                 buf[4] = 1;
10266                 etest->flags |= ETH_TEST_FL_FAILED;
10267         }
10268         if (bp->port.pmf)
10269                 if (bnx2x_link_test(bp) != 0) {
10270                         buf[5] = 1;
10271                         etest->flags |= ETH_TEST_FL_FAILED;
10272                 }
10273
10274 #ifdef BNX2X_EXTRA_DEBUG
10275         bnx2x_panic_dump(bp);
10276 #endif
10277 }
10278
10279 static const struct {
10280         long offset;
10281         int size;
10282         u8 string[ETH_GSTRING_LEN];
10283 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10284 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10285         { Q_STATS_OFFSET32(error_bytes_received_hi),
10286                                                 8, "[%d]: rx_error_bytes" },
10287         { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10288                                                 8, "[%d]: rx_ucast_packets" },
10289         { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10290                                                 8, "[%d]: rx_mcast_packets" },
10291         { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10292                                                 8, "[%d]: rx_bcast_packets" },
10293         { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10294         { Q_STATS_OFFSET32(rx_err_discard_pkt),
10295                                          4, "[%d]: rx_phy_ip_err_discards"},
10296         { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10297                                          4, "[%d]: rx_skb_alloc_discard" },
10298         { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10299
10300 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10301         { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10302                                                         8, "[%d]: tx_packets" }
10303 };
10304
10305 static const struct {
10306         long offset;
10307         int size;
10308         u32 flags;
10309 #define STATS_FLAGS_PORT                1
10310 #define STATS_FLAGS_FUNC                2
10311 #define STATS_FLAGS_BOTH                (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
10312         u8 string[ETH_GSTRING_LEN];
10313 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
10314 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10315                                 8, STATS_FLAGS_BOTH, "rx_bytes" },
10316         { STATS_OFFSET32(error_bytes_received_hi),
10317                                 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
10318         { STATS_OFFSET32(total_unicast_packets_received_hi),
10319                                 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
10320         { STATS_OFFSET32(total_multicast_packets_received_hi),
10321                                 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
10322         { STATS_OFFSET32(total_broadcast_packets_received_hi),
10323                                 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
10324         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
10325                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
10326         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
10327                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
10328         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10329                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10330         { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10331                                 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10332 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10333                                 8, STATS_FLAGS_PORT, "rx_fragments" },
10334         { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10335                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
10336         { STATS_OFFSET32(no_buff_discard_hi),
10337                                 8, STATS_FLAGS_BOTH, "rx_discards" },
10338         { STATS_OFFSET32(mac_filter_discard),
10339                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10340         { STATS_OFFSET32(xxoverflow_discard),
10341                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10342         { STATS_OFFSET32(brb_drop_hi),
10343                                 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10344         { STATS_OFFSET32(brb_truncate_hi),
10345                                 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10346         { STATS_OFFSET32(pause_frames_received_hi),
10347                                 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10348         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10349                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10350         { STATS_OFFSET32(nig_timer_max),
10351                         4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10352 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10353                                 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10354         { STATS_OFFSET32(rx_skb_alloc_failed),
10355                                 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10356         { STATS_OFFSET32(hw_csum_err),
10357                                 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10358
10359         { STATS_OFFSET32(total_bytes_transmitted_hi),
10360                                 8, STATS_FLAGS_BOTH, "tx_bytes" },
10361         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10362                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10363         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10364                                 8, STATS_FLAGS_BOTH, "tx_packets" },
10365         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10366                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10367         { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10368                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
10369         { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
10370                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
10371         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
10372                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
10373 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
10374                                 8, STATS_FLAGS_PORT, "tx_deferred" },
10375         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
10376                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
10377         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
10378                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
10379         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
10380                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
10381         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
10382                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
10383         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
10384                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
10385         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
10386                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
10387         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
10388                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
10389         { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
10390                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
10391         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
10392                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
10393 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
10394                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
10395         { STATS_OFFSET32(pause_frames_sent_hi),
10396                                 8, STATS_FLAGS_PORT, "tx_pause_frames" }
10397 };
10398
10399 #define IS_PORT_STAT(i) \
10400         ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10401 #define IS_FUNC_STAT(i)         (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10402 #define IS_E1HMF_MODE_STAT(bp) \
10403                         (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
10404
10405 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10406 {
10407         struct bnx2x *bp = netdev_priv(dev);
10408         int i, j, k;
10409
10410         switch (stringset) {
10411         case ETH_SS_STATS:
10412                 if (is_multi(bp)) {
10413                         k = 0;
10414                         for_each_rx_queue(bp, i) {
10415                                 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10416                                         sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10417                                                 bnx2x_q_stats_arr[j].string, i);
10418                                 k += BNX2X_NUM_Q_STATS;
10419                         }
10420                         if (IS_E1HMF_MODE_STAT(bp))
10421                                 break;
10422                         for (j = 0; j < BNX2X_NUM_STATS; j++)
10423                                 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10424                                        bnx2x_stats_arr[j].string);
10425                 } else {
10426                         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10427                                 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10428                                         continue;
10429                                 strcpy(buf + j*ETH_GSTRING_LEN,
10430                                        bnx2x_stats_arr[i].string);
10431                                 j++;
10432                         }
10433                 }
10434                 break;
10435
10436         case ETH_SS_TEST:
10437                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10438                 break;
10439         }
10440 }
10441
10442 static int bnx2x_get_stats_count(struct net_device *dev)
10443 {
10444         struct bnx2x *bp = netdev_priv(dev);
10445         int i, num_stats;
10446
10447         if (is_multi(bp)) {
10448                 num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
10449                 if (!IS_E1HMF_MODE_STAT(bp))
10450                         num_stats += BNX2X_NUM_STATS;
10451         } else {
10452                 if (IS_E1HMF_MODE_STAT(bp)) {
10453                         num_stats = 0;
10454                         for (i = 0; i < BNX2X_NUM_STATS; i++)
10455                                 if (IS_FUNC_STAT(i))
10456                                         num_stats++;
10457                 } else
10458                         num_stats = BNX2X_NUM_STATS;
10459         }
10460
10461         return num_stats;
10462 }
10463
10464 static void bnx2x_get_ethtool_stats(struct net_device *dev,
10465                                     struct ethtool_stats *stats, u64 *buf)
10466 {
10467         struct bnx2x *bp = netdev_priv(dev);
10468         u32 *hw_stats, *offset;
10469         int i, j, k;
10470
10471         if (is_multi(bp)) {
10472                 k = 0;
10473                 for_each_rx_queue(bp, i) {
10474                         hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10475                         for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10476                                 if (bnx2x_q_stats_arr[j].size == 0) {
10477                                         /* skip this counter */
10478                                         buf[k + j] = 0;
10479                                         continue;
10480                                 }
10481                                 offset = (hw_stats +
10482                                           bnx2x_q_stats_arr[j].offset);
10483                                 if (bnx2x_q_stats_arr[j].size == 4) {
10484                                         /* 4-byte counter */
10485                                         buf[k + j] = (u64) *offset;
10486                                         continue;
10487                                 }
10488                                 /* 8-byte counter */
10489                                 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10490                         }
10491                         k += BNX2X_NUM_Q_STATS;
10492                 }
10493                 if (IS_E1HMF_MODE_STAT(bp))
10494                         return;
10495                 hw_stats = (u32 *)&bp->eth_stats;
10496                 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10497                         if (bnx2x_stats_arr[j].size == 0) {
10498                                 /* skip this counter */
10499                                 buf[k + j] = 0;
10500                                 continue;
10501                         }
10502                         offset = (hw_stats + bnx2x_stats_arr[j].offset);
10503                         if (bnx2x_stats_arr[j].size == 4) {
10504                                 /* 4-byte counter */
10505                                 buf[k + j] = (u64) *offset;
10506                                 continue;
10507                         }
10508                         /* 8-byte counter */
10509                         buf[k + j] = HILO_U64(*offset, *(offset + 1));
10510                 }
10511         } else {
10512                 hw_stats = (u32 *)&bp->eth_stats;
10513                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10514                         if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10515                                 continue;
10516                         if (bnx2x_stats_arr[i].size == 0) {
10517                                 /* skip this counter */
10518                                 buf[j] = 0;
10519                                 j++;
10520                                 continue;
10521                         }
10522                         offset = (hw_stats + bnx2x_stats_arr[i].offset);
10523                         if (bnx2x_stats_arr[i].size == 4) {
10524                                 /* 4-byte counter */
10525                                 buf[j] = (u64) *offset;
10526                                 j++;
10527                                 continue;
10528                         }
10529                         /* 8-byte counter */
10530                         buf[j] = HILO_U64(*offset, *(offset + 1));
10531                         j++;
10532                 }
10533         }
10534 }
10535
10536 static int bnx2x_phys_id(struct net_device *dev, u32 data)
10537 {
10538         struct bnx2x *bp = netdev_priv(dev);
10539         int port = BP_PORT(bp);
10540         int i;
10541
10542         if (!netif_running(dev))
10543                 return 0;
10544
10545         if (!bp->port.pmf)
10546                 return 0;
10547
10548         if (data == 0)
10549                 data = 2;
10550
10551         for (i = 0; i < (data * 2); i++) {
10552                 if ((i % 2) == 0)
10553                         bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
10554                                       bp->link_params.hw_led_mode,
10555                                       bp->link_params.chip_id);
10556                 else
10557                         bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
10558                                       bp->link_params.hw_led_mode,
10559                                       bp->link_params.chip_id);
10560
10561                 msleep_interruptible(500);
10562                 if (signal_pending(current))
10563                         break;
10564         }
10565
10566         if (bp->link_vars.link_up)
10567                 bnx2x_set_led(bp, port, LED_MODE_OPER,
10568                               bp->link_vars.line_speed,
10569                               bp->link_params.hw_led_mode,
10570                               bp->link_params.chip_id);
10571
10572         return 0;
10573 }
10574
10575 static struct ethtool_ops bnx2x_ethtool_ops = {
10576         .get_settings           = bnx2x_get_settings,
10577         .set_settings           = bnx2x_set_settings,
10578         .get_drvinfo            = bnx2x_get_drvinfo,
10579         .get_regs_len           = bnx2x_get_regs_len,
10580         .get_regs               = bnx2x_get_regs,
10581         .get_wol                = bnx2x_get_wol,
10582         .set_wol                = bnx2x_set_wol,
10583         .get_msglevel           = bnx2x_get_msglevel,
10584         .set_msglevel           = bnx2x_set_msglevel,
10585         .nway_reset             = bnx2x_nway_reset,
10586         .get_link               = bnx2x_get_link,
10587         .get_eeprom_len         = bnx2x_get_eeprom_len,
10588         .get_eeprom             = bnx2x_get_eeprom,
10589         .set_eeprom             = bnx2x_set_eeprom,
10590         .get_coalesce           = bnx2x_get_coalesce,
10591         .set_coalesce           = bnx2x_set_coalesce,
10592         .get_ringparam          = bnx2x_get_ringparam,
10593         .set_ringparam          = bnx2x_set_ringparam,
10594         .get_pauseparam         = bnx2x_get_pauseparam,
10595         .set_pauseparam         = bnx2x_set_pauseparam,
10596         .get_rx_csum            = bnx2x_get_rx_csum,
10597         .set_rx_csum            = bnx2x_set_rx_csum,
10598         .get_tx_csum            = ethtool_op_get_tx_csum,
10599         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
10600         .set_flags              = bnx2x_set_flags,
10601         .get_flags              = ethtool_op_get_flags,
10602         .get_sg                 = ethtool_op_get_sg,
10603         .set_sg                 = ethtool_op_set_sg,
10604         .get_tso                = ethtool_op_get_tso,
10605         .set_tso                = bnx2x_set_tso,
10606         .self_test_count        = bnx2x_self_test_count,
10607         .self_test              = bnx2x_self_test,
10608         .get_strings            = bnx2x_get_strings,
10609         .phys_id                = bnx2x_phys_id,
10610         .get_stats_count        = bnx2x_get_stats_count,
10611         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
10612 };
10613
10614 /* end of ethtool_ops */
10615
10616 /****************************************************************************
10617 * General service functions
10618 ****************************************************************************/
10619
10620 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10621 {
10622         u16 pmcsr;
10623
10624         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10625
10626         switch (state) {
10627         case PCI_D0:
10628                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10629                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10630                                        PCI_PM_CTRL_PME_STATUS));
10631
10632                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10633                         /* delay required during transition out of D3hot */
10634                         msleep(20);
10635                 break;
10636
10637         case PCI_D3hot:
10638                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10639                 pmcsr |= 3;
10640
10641                 if (bp->wol)
10642                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10643
10644                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10645                                       pmcsr);
10646
10647                 /* No more memory access after this point until
10648                 * device is brought back to D0.
10649                 */
10650                 break;
10651
10652         default:
10653                 return -EINVAL;
10654         }
10655         return 0;
10656 }
10657
10658 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10659 {
10660         u16 rx_cons_sb;
10661
10662         /* Tell compiler that status block fields can change */
10663         barrier();
10664         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10665         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10666                 rx_cons_sb++;
10667         return (fp->rx_comp_cons != rx_cons_sb);
10668 }
10669
10670 /*
10671  * net_device service functions
10672  */
10673
10674 static int bnx2x_poll(struct napi_struct *napi, int budget)
10675 {
10676         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10677                                                  napi);
10678         struct bnx2x *bp = fp->bp;
10679         int work_done = 0;
10680
10681 #ifdef BNX2X_STOP_ON_ERROR
10682         if (unlikely(bp->panic))
10683                 goto poll_panic;
10684 #endif
10685
10686         prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10687         prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10688
10689         bnx2x_update_fpsb_idx(fp);
10690
10691         if (bnx2x_has_rx_work(fp)) {
10692                 work_done = bnx2x_rx_int(fp, budget);
10693
10694                 /* must not complete if we consumed full budget */
10695                 if (work_done >= budget)
10696                         goto poll_again;
10697         }
10698
10699         /* bnx2x_has_rx_work() reads the status block, thus we need to
10700          * ensure that status block indices have been actually read
10701          * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work)
10702          * so that we won't write the "newer" value of the status block to IGU
10703          * (if there was a DMA right after bnx2x_has_rx_work and
10704          * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10705          * may be postponed to right before bnx2x_ack_sb). In this case
10706          * there will never be another interrupt until there is another update
10707          * of the status block, while there is still unhandled work.
10708          */
10709         rmb();
10710
10711         if (!bnx2x_has_rx_work(fp)) {
10712 #ifdef BNX2X_STOP_ON_ERROR
10713 poll_panic:
10714 #endif
10715                 napi_complete(napi);
10716
10717                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10718                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
10719                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10720                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10721         }
10722
10723 poll_again:
10724         return work_done;
10725 }
10726
10727
10728 /* we split the first BD into headers and data BDs
10729  * to ease the pain of our fellow microcode engineers
10730  * we use one mapping for both BDs
10731  * So far this has only been observed to happen
10732  * in Other Operating Systems(TM)
10733  */
10734 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10735                                    struct bnx2x_fastpath *fp,
10736                                    struct sw_tx_bd *tx_buf,
10737                                    struct eth_tx_start_bd **tx_bd, u16 hlen,
10738                                    u16 bd_prod, int nbd)
10739 {
10740         struct eth_tx_start_bd *h_tx_bd = *tx_bd;
10741         struct eth_tx_bd *d_tx_bd;
10742         dma_addr_t mapping;
10743         int old_len = le16_to_cpu(h_tx_bd->nbytes);
10744
10745         /* first fix first BD */
10746         h_tx_bd->nbd = cpu_to_le16(nbd);
10747         h_tx_bd->nbytes = cpu_to_le16(hlen);
10748
10749         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10750            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10751            h_tx_bd->addr_lo, h_tx_bd->nbd);
10752
10753         /* now get a new data BD
10754          * (after the pbd) and fill it */
10755         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10756         d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
10757
10758         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10759                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10760
10761         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10762         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10763         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10764
10765         /* this marks the BD as one that has no individual mapping */
10766         tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
10767
10768         DP(NETIF_MSG_TX_QUEUED,
10769            "TSO split data size is %d (%x:%x)\n",
10770            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10771
10772         /* update tx_bd */
10773         *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
10774
10775         return bd_prod;
10776 }
10777
10778 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10779 {
10780         if (fix > 0)
10781                 csum = (u16) ~csum_fold(csum_sub(csum,
10782                                 csum_partial(t_header - fix, fix, 0)));
10783
10784         else if (fix < 0)
10785                 csum = (u16) ~csum_fold(csum_add(csum,
10786                                 csum_partial(t_header, -fix, 0)));
10787
10788         return swab16(csum);
10789 }
10790
10791 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10792 {
10793         u32 rc;
10794
10795         if (skb->ip_summed != CHECKSUM_PARTIAL)
10796                 rc = XMIT_PLAIN;
10797
10798         else {
10799                 if (skb->protocol == htons(ETH_P_IPV6)) {
10800                         rc = XMIT_CSUM_V6;
10801                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10802                                 rc |= XMIT_CSUM_TCP;
10803
10804                 } else {
10805                         rc = XMIT_CSUM_V4;
10806                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10807                                 rc |= XMIT_CSUM_TCP;
10808                 }
10809         }
10810
10811         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10812                 rc |= XMIT_GSO_V4;
10813
10814         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10815                 rc |= XMIT_GSO_V6;
10816
10817         return rc;
10818 }
10819
10820 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10821 /* check if packet requires linearization (packet is too fragmented)
10822    no need to check fragmentation if page size > 8K (there will be no
10823    violation to FW restrictions) */
10824 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10825                              u32 xmit_type)
10826 {
10827         int to_copy = 0;
10828         int hlen = 0;
10829         int first_bd_sz = 0;
10830
10831         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10832         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10833
10834                 if (xmit_type & XMIT_GSO) {
10835                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10836                         /* Check if LSO packet needs to be copied:
10837                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10838                         int wnd_size = MAX_FETCH_BD - 3;
10839                         /* Number of windows to check */
10840                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10841                         int wnd_idx = 0;
10842                         int frag_idx = 0;
10843                         u32 wnd_sum = 0;
10844
10845                         /* Headers length */
10846                         hlen = (int)(skb_transport_header(skb) - skb->data) +
10847                                 tcp_hdrlen(skb);
10848
10849                         /* Amount of data (w/o headers) on linear part of SKB*/
10850                         first_bd_sz = skb_headlen(skb) - hlen;
10851
10852                         wnd_sum  = first_bd_sz;
10853
10854                         /* Calculate the first sum - it's special */
10855                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10856                                 wnd_sum +=
10857                                         skb_shinfo(skb)->frags[frag_idx].size;
10858
10859                         /* If there was data on linear skb data - check it */
10860                         if (first_bd_sz > 0) {
10861                                 if (unlikely(wnd_sum < lso_mss)) {
10862                                         to_copy = 1;
10863                                         goto exit_lbl;
10864                                 }
10865
10866                                 wnd_sum -= first_bd_sz;
10867                         }
10868
10869                         /* Others are easier: run through the frag list and
10870                            check all windows */
10871                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10872                                 wnd_sum +=
10873                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10874
10875                                 if (unlikely(wnd_sum < lso_mss)) {
10876                                         to_copy = 1;
10877                                         break;
10878                                 }
10879                                 wnd_sum -=
10880                                         skb_shinfo(skb)->frags[wnd_idx].size;
10881                         }
10882                 } else {
10883                         /* in non-LSO too fragmented packet should always
10884                            be linearized */
10885                         to_copy = 1;
10886                 }
10887         }
10888
10889 exit_lbl:
10890         if (unlikely(to_copy))
10891                 DP(NETIF_MSG_TX_QUEUED,
10892                    "Linearization IS REQUIRED for %s packet. "
10893                    "num_frags %d  hlen %d  first_bd_sz %d\n",
10894                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10895                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10896
10897         return to_copy;
10898 }
10899 #endif
10900
10901 /* called with netif_tx_lock
10902  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
10903  * netif_wake_queue()
10904  */
10905 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10906 {
10907         struct bnx2x *bp = netdev_priv(dev);
10908         struct bnx2x_fastpath *fp, *fp_stat;
10909         struct netdev_queue *txq;
10910         struct sw_tx_bd *tx_buf;
10911         struct eth_tx_start_bd *tx_start_bd;
10912         struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
10913         struct eth_tx_parse_bd *pbd = NULL;
10914         u16 pkt_prod, bd_prod;
10915         int nbd, fp_index;
10916         dma_addr_t mapping;
10917         u32 xmit_type = bnx2x_xmit_type(bp, skb);
10918         int i;
10919         u8 hlen = 0;
10920         __le16 pkt_size = 0;
10921
10922 #ifdef BNX2X_STOP_ON_ERROR
10923         if (unlikely(bp->panic))
10924                 return NETDEV_TX_BUSY;
10925 #endif
10926
10927         fp_index = skb_get_queue_mapping(skb);
10928         txq = netdev_get_tx_queue(dev, fp_index);
10929
10930         fp = &bp->fp[fp_index + bp->num_rx_queues];
10931         fp_stat = &bp->fp[fp_index];
10932
10933         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
10934                 fp_stat->eth_q_stats.driver_xoff++;
10935                 netif_tx_stop_queue(txq);
10936                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10937                 return NETDEV_TX_BUSY;
10938         }
10939
10940         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
10941            "  gso type %x  xmit_type %x\n",
10942            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10943            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10944
10945 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10946         /* First, check if we need to linearize the skb (due to FW
10947            restrictions). No need to check fragmentation if page size > 8K
10948            (there will be no violation to FW restrictions) */
10949         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10950                 /* Statistics of linearization */
10951                 bp->lin_cnt++;
10952                 if (skb_linearize(skb) != 0) {
10953                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10954                            "silently dropping this SKB\n");
10955                         dev_kfree_skb_any(skb);
10956                         return NETDEV_TX_OK;
10957                 }
10958         }
10959 #endif
10960
10961         /*
10962         Please read carefully. First we use one BD which we mark as start,
10963         then we have a parsing info BD (used for TSO or xsum),
10964         and only then we have the rest of the TSO BDs.
10965         (don't forget to mark the last one as last,
10966         and to unmap only AFTER you write to the BD ...)
10967         And above all, all pdb sizes are in words - NOT DWORDS!
10968         */
10969
10970         pkt_prod = fp->tx_pkt_prod++;
10971         bd_prod = TX_BD(fp->tx_bd_prod);
10972
10973         /* get a tx_buf and first BD */
10974         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10975         tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
10976
10977         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10978         tx_start_bd->general_data = (UNICAST_ADDRESS <<
10979                                      ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
10980         /* header nbd */
10981         tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
10982
10983         /* remember the first BD of the packet */
10984         tx_buf->first_bd = fp->tx_bd_prod;
10985         tx_buf->skb = skb;
10986         tx_buf->flags = 0;
10987
10988         DP(NETIF_MSG_TX_QUEUED,
10989            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
10990            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
10991
10992 #ifdef BCM_VLAN
10993         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10994             (bp->flags & HW_VLAN_TX_FLAG)) {
10995                 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10996                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10997         } else
10998 #endif
10999                 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11000
11001         /* turn on parsing and get a BD */
11002         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11003         pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
11004
11005         memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
11006
11007         if (xmit_type & XMIT_CSUM) {
11008                 hlen = (skb_network_header(skb) - skb->data) / 2;
11009
11010                 /* for now NS flag is not used in Linux */
11011                 pbd->global_data =
11012                         (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11013                                  ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
11014
11015                 pbd->ip_hlen = (skb_transport_header(skb) -
11016                                 skb_network_header(skb)) / 2;
11017
11018                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
11019
11020                 pbd->total_hlen = cpu_to_le16(hlen);
11021                 hlen = hlen*2;
11022
11023                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
11024
11025                 if (xmit_type & XMIT_CSUM_V4)
11026                         tx_start_bd->bd_flags.as_bitfield |=
11027                                                 ETH_TX_BD_FLAGS_IP_CSUM;
11028                 else
11029                         tx_start_bd->bd_flags.as_bitfield |=
11030                                                 ETH_TX_BD_FLAGS_IPV6;
11031
11032                 if (xmit_type & XMIT_CSUM_TCP) {
11033                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11034
11035                 } else {
11036                         s8 fix = SKB_CS_OFF(skb); /* signed! */
11037
11038                         pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
11039
11040                         DP(NETIF_MSG_TX_QUEUED,
11041                            "hlen %d  fix %d  csum before fix %x\n",
11042                            le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
11043
11044                         /* HW bug: fixup the CSUM */
11045                         pbd->tcp_pseudo_csum =
11046                                 bnx2x_csum_fix(skb_transport_header(skb),
11047                                                SKB_CS(skb), fix);
11048
11049                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11050                            pbd->tcp_pseudo_csum);
11051                 }
11052         }
11053
11054         mapping = pci_map_single(bp->pdev, skb->data,
11055                                  skb_headlen(skb), PCI_DMA_TODEVICE);
11056
11057         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11058         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11059         nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11060         tx_start_bd->nbd = cpu_to_le16(nbd);
11061         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11062         pkt_size = tx_start_bd->nbytes;
11063
11064         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
11065            "  nbytes %d  flags %x  vlan %x\n",
11066            tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11067            le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11068            tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
11069
11070         if (xmit_type & XMIT_GSO) {
11071
11072                 DP(NETIF_MSG_TX_QUEUED,
11073                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
11074                    skb->len, hlen, skb_headlen(skb),
11075                    skb_shinfo(skb)->gso_size);
11076
11077                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
11078
11079                 if (unlikely(skb_headlen(skb) > hlen))
11080                         bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11081                                                  hlen, bd_prod, ++nbd);
11082
11083                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11084                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
11085                 pbd->tcp_flags = pbd_tcp_flags(skb);
11086
11087                 if (xmit_type & XMIT_GSO_V4) {
11088                         pbd->ip_id = swab16(ip_hdr(skb)->id);
11089                         pbd->tcp_pseudo_csum =
11090                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11091                                                           ip_hdr(skb)->daddr,
11092                                                           0, IPPROTO_TCP, 0));
11093
11094                 } else
11095                         pbd->tcp_pseudo_csum =
11096                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11097                                                         &ipv6_hdr(skb)->daddr,
11098                                                         0, IPPROTO_TCP, 0));
11099
11100                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11101         }
11102         tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
11103
11104         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11105                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
11106
11107                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11108                 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11109                 if (total_pkt_bd == NULL)
11110                         total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11111
11112                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11113                                        frag->size, PCI_DMA_TODEVICE);
11114
11115                 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11116                 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11117                 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11118                 le16_add_cpu(&pkt_size, frag->size);
11119
11120                 DP(NETIF_MSG_TX_QUEUED,
11121                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
11122                    i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11123                    le16_to_cpu(tx_data_bd->nbytes));
11124         }
11125
11126         DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
11127
11128         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11129
11130         /* now send a tx doorbell, counting the next BD
11131          * if the packet contains or ends with it
11132          */
11133         if (TX_BD_POFF(bd_prod) < nbd)
11134                 nbd++;
11135
11136         if (total_pkt_bd != NULL)
11137                 total_pkt_bd->total_pkt_bytes = pkt_size;
11138
11139         if (pbd)
11140                 DP(NETIF_MSG_TX_QUEUED,
11141                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
11142                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
11143                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11144                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
11145                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
11146
11147         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
11148
11149         /*
11150          * Make sure that the BD data is updated before updating the producer
11151          * since FW might read the BD right after the producer is updated.
11152          * This is only applicable for weak-ordered memory model archs such
11153          * as IA-64. The following barrier is also mandatory since FW will
11154          * assumes packets must have BDs.
11155          */
11156         wmb();
11157
11158         fp->tx_db.data.prod += nbd;
11159         barrier();
11160         DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw);
11161
11162         mmiowb();
11163
11164         fp->tx_bd_prod += nbd;
11165
11166         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
11167                 netif_tx_stop_queue(txq);
11168                 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11169                    if we put Tx into XOFF state. */
11170                 smp_mb();
11171                 fp_stat->eth_q_stats.driver_xoff++;
11172                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
11173                         netif_tx_wake_queue(txq);
11174         }
11175         fp_stat->tx_pkt++;
11176
11177         return NETDEV_TX_OK;
11178 }
11179
11180 /* called with rtnl_lock */
11181 static int bnx2x_open(struct net_device *dev)
11182 {
11183         struct bnx2x *bp = netdev_priv(dev);
11184
11185         netif_carrier_off(dev);
11186
11187         bnx2x_set_power_state(bp, PCI_D0);
11188
11189         return bnx2x_nic_load(bp, LOAD_OPEN);
11190 }
11191
11192 /* called with rtnl_lock */
11193 static int bnx2x_close(struct net_device *dev)
11194 {
11195         struct bnx2x *bp = netdev_priv(dev);
11196
11197         /* Unload the driver, release IRQs */
11198         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11199         if (atomic_read(&bp->pdev->enable_cnt) == 1)
11200                 if (!CHIP_REV_IS_SLOW(bp))
11201                         bnx2x_set_power_state(bp, PCI_D3hot);
11202
11203         return 0;
11204 }
11205
11206 /* called with netif_tx_lock from dev_mcast.c */
11207 static void bnx2x_set_rx_mode(struct net_device *dev)
11208 {
11209         struct bnx2x *bp = netdev_priv(dev);
11210         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11211         int port = BP_PORT(bp);
11212
11213         if (bp->state != BNX2X_STATE_OPEN) {
11214                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11215                 return;
11216         }
11217
11218         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11219
11220         if (dev->flags & IFF_PROMISC)
11221                 rx_mode = BNX2X_RX_MODE_PROMISC;
11222
11223         else if ((dev->flags & IFF_ALLMULTI) ||
11224                  ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11225                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11226
11227         else { /* some multicasts */
11228                 if (CHIP_IS_E1(bp)) {
11229                         int i, old, offset;
11230                         struct dev_mc_list *mclist;
11231                         struct mac_configuration_cmd *config =
11232                                                 bnx2x_sp(bp, mcast_config);
11233
11234                         for (i = 0, mclist = dev->mc_list;
11235                              mclist && (i < dev->mc_count);
11236                              i++, mclist = mclist->next) {
11237
11238                                 config->config_table[i].
11239                                         cam_entry.msb_mac_addr =
11240                                         swab16(*(u16 *)&mclist->dmi_addr[0]);
11241                                 config->config_table[i].
11242                                         cam_entry.middle_mac_addr =
11243                                         swab16(*(u16 *)&mclist->dmi_addr[2]);
11244                                 config->config_table[i].
11245                                         cam_entry.lsb_mac_addr =
11246                                         swab16(*(u16 *)&mclist->dmi_addr[4]);
11247                                 config->config_table[i].cam_entry.flags =
11248                                                         cpu_to_le16(port);
11249                                 config->config_table[i].
11250                                         target_table_entry.flags = 0;
11251                                 config->config_table[i].target_table_entry.
11252                                         clients_bit_vector =
11253                                                 cpu_to_le32(1 << BP_L_ID(bp));
11254                                 config->config_table[i].
11255                                         target_table_entry.vlan_id = 0;
11256
11257                                 DP(NETIF_MSG_IFUP,
11258                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11259                                    config->config_table[i].
11260                                                 cam_entry.msb_mac_addr,
11261                                    config->config_table[i].
11262                                                 cam_entry.middle_mac_addr,
11263                                    config->config_table[i].
11264                                                 cam_entry.lsb_mac_addr);
11265                         }
11266                         old = config->hdr.length;
11267                         if (old > i) {
11268                                 for (; i < old; i++) {
11269                                         if (CAM_IS_INVALID(config->
11270                                                            config_table[i])) {
11271                                                 /* already invalidated */
11272                                                 break;
11273                                         }
11274                                         /* invalidate */
11275                                         CAM_INVALIDATE(config->
11276                                                        config_table[i]);
11277                                 }
11278                         }
11279
11280                         if (CHIP_REV_IS_SLOW(bp))
11281                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11282                         else
11283                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
11284
11285                         config->hdr.length = i;
11286                         config->hdr.offset = offset;
11287                         config->hdr.client_id = bp->fp->cl_id;
11288                         config->hdr.reserved1 = 0;
11289
11290                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11291                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11292                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11293                                       0);
11294                 } else { /* E1H */
11295                         /* Accept one or more multicasts */
11296                         struct dev_mc_list *mclist;
11297                         u32 mc_filter[MC_HASH_SIZE];
11298                         u32 crc, bit, regidx;
11299                         int i;
11300
11301                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11302
11303                         for (i = 0, mclist = dev->mc_list;
11304                              mclist && (i < dev->mc_count);
11305                              i++, mclist = mclist->next) {
11306
11307                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11308                                    mclist->dmi_addr);
11309
11310                                 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11311                                 bit = (crc >> 24) & 0xff;
11312                                 regidx = bit >> 5;
11313                                 bit &= 0x1f;
11314                                 mc_filter[regidx] |= (1 << bit);
11315                         }
11316
11317                         for (i = 0; i < MC_HASH_SIZE; i++)
11318                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11319                                        mc_filter[i]);
11320                 }
11321         }
11322
11323         bp->rx_mode = rx_mode;
11324         bnx2x_set_storm_rx_mode(bp);
11325 }
11326
11327 /* called with rtnl_lock */
11328 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11329 {
11330         struct sockaddr *addr = p;
11331         struct bnx2x *bp = netdev_priv(dev);
11332
11333         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
11334                 return -EINVAL;
11335
11336         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
11337         if (netif_running(dev)) {
11338                 if (CHIP_IS_E1(bp))
11339                         bnx2x_set_mac_addr_e1(bp, 1);
11340                 else
11341                         bnx2x_set_mac_addr_e1h(bp, 1);
11342         }
11343
11344         return 0;
11345 }
11346
11347 /* called with rtnl_lock */
11348 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11349                            int devad, u16 addr)
11350 {
11351         struct bnx2x *bp = netdev_priv(netdev);
11352         u16 value;
11353         int rc;
11354         u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11355
11356         DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11357            prtad, devad, addr);
11358
11359         if (prtad != bp->mdio.prtad) {
11360                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11361                    prtad, bp->mdio.prtad);
11362                 return -EINVAL;
11363         }
11364
11365         /* The HW expects different devad if CL22 is used */
11366         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11367
11368         bnx2x_acquire_phy_lock(bp);
11369         rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11370                              devad, addr, &value);
11371         bnx2x_release_phy_lock(bp);
11372         DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
11373
11374         if (!rc)
11375                 rc = value;
11376         return rc;
11377 }
11378
11379 /* called with rtnl_lock */
11380 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11381                             u16 addr, u16 value)
11382 {
11383         struct bnx2x *bp = netdev_priv(netdev);
11384         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11385         int rc;
11386
11387         DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11388                            " value 0x%x\n", prtad, devad, addr, value);
11389
11390         if (prtad != bp->mdio.prtad) {
11391                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11392                    prtad, bp->mdio.prtad);
11393                 return -EINVAL;
11394         }
11395
11396         /* The HW expects different devad if CL22 is used */
11397         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11398
11399         bnx2x_acquire_phy_lock(bp);
11400         rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11401                               devad, addr, value);
11402         bnx2x_release_phy_lock(bp);
11403         return rc;
11404 }
11405
11406 /* called with rtnl_lock */
11407 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11408 {
11409         struct bnx2x *bp = netdev_priv(dev);
11410         struct mii_ioctl_data *mdio = if_mii(ifr);
11411
11412         DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11413            mdio->phy_id, mdio->reg_num, mdio->val_in);
11414
11415         if (!netif_running(dev))
11416                 return -EAGAIN;
11417
11418         return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
11419 }
11420
11421 /* called with rtnl_lock */
11422 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11423 {
11424         struct bnx2x *bp = netdev_priv(dev);
11425         int rc = 0;
11426
11427         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11428             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11429                 return -EINVAL;
11430
11431         /* This does not race with packet allocation
11432          * because the actual alloc size is
11433          * only updated as part of load
11434          */
11435         dev->mtu = new_mtu;
11436
11437         if (netif_running(dev)) {
11438                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11439                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11440         }
11441
11442         return rc;
11443 }
11444
11445 static void bnx2x_tx_timeout(struct net_device *dev)
11446 {
11447         struct bnx2x *bp = netdev_priv(dev);
11448
11449 #ifdef BNX2X_STOP_ON_ERROR
11450         if (!bp->panic)
11451                 bnx2x_panic();
11452 #endif
11453         /* This allows the netif to be shutdown gracefully before resetting */
11454         schedule_work(&bp->reset_task);
11455 }
11456
11457 #ifdef BCM_VLAN
11458 /* called with rtnl_lock */
11459 static void bnx2x_vlan_rx_register(struct net_device *dev,
11460                                    struct vlan_group *vlgrp)
11461 {
11462         struct bnx2x *bp = netdev_priv(dev);
11463
11464         bp->vlgrp = vlgrp;
11465
11466         /* Set flags according to the required capabilities */
11467         bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11468
11469         if (dev->features & NETIF_F_HW_VLAN_TX)
11470                 bp->flags |= HW_VLAN_TX_FLAG;
11471
11472         if (dev->features & NETIF_F_HW_VLAN_RX)
11473                 bp->flags |= HW_VLAN_RX_FLAG;
11474
11475         if (netif_running(dev))
11476                 bnx2x_set_client_config(bp);
11477 }
11478
11479 #endif
11480
11481 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11482 static void poll_bnx2x(struct net_device *dev)
11483 {
11484         struct bnx2x *bp = netdev_priv(dev);
11485
11486         disable_irq(bp->pdev->irq);
11487         bnx2x_interrupt(bp->pdev->irq, dev);
11488         enable_irq(bp->pdev->irq);
11489 }
11490 #endif
11491
11492 static const struct net_device_ops bnx2x_netdev_ops = {
11493         .ndo_open               = bnx2x_open,
11494         .ndo_stop               = bnx2x_close,
11495         .ndo_start_xmit         = bnx2x_start_xmit,
11496         .ndo_set_multicast_list = bnx2x_set_rx_mode,
11497         .ndo_set_mac_address    = bnx2x_change_mac_addr,
11498         .ndo_validate_addr      = eth_validate_addr,
11499         .ndo_do_ioctl           = bnx2x_ioctl,
11500         .ndo_change_mtu         = bnx2x_change_mtu,
11501         .ndo_tx_timeout         = bnx2x_tx_timeout,
11502 #ifdef BCM_VLAN
11503         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
11504 #endif
11505 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11506         .ndo_poll_controller    = poll_bnx2x,
11507 #endif
11508 };
11509
11510 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11511                                     struct net_device *dev)
11512 {
11513         struct bnx2x *bp;
11514         int rc;
11515
11516         SET_NETDEV_DEV(dev, &pdev->dev);
11517         bp = netdev_priv(dev);
11518
11519         bp->dev = dev;
11520         bp->pdev = pdev;
11521         bp->flags = 0;
11522         bp->func = PCI_FUNC(pdev->devfn);
11523
11524         rc = pci_enable_device(pdev);
11525         if (rc) {
11526                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11527                 goto err_out;
11528         }
11529
11530         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11531                 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11532                        " aborting\n");
11533                 rc = -ENODEV;
11534                 goto err_out_disable;
11535         }
11536
11537         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11538                 printk(KERN_ERR PFX "Cannot find second PCI device"
11539                        " base address, aborting\n");
11540                 rc = -ENODEV;
11541                 goto err_out_disable;
11542         }
11543
11544         if (atomic_read(&pdev->enable_cnt) == 1) {
11545                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11546                 if (rc) {
11547                         printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11548                                " aborting\n");
11549                         goto err_out_disable;
11550                 }
11551
11552                 pci_set_master(pdev);
11553                 pci_save_state(pdev);
11554         }
11555
11556         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11557         if (bp->pm_cap == 0) {
11558                 printk(KERN_ERR PFX "Cannot find power management"
11559                        " capability, aborting\n");
11560                 rc = -EIO;
11561                 goto err_out_release;
11562         }
11563
11564         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11565         if (bp->pcie_cap == 0) {
11566                 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11567                        " aborting\n");
11568                 rc = -EIO;
11569                 goto err_out_release;
11570         }
11571
11572         if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
11573                 bp->flags |= USING_DAC_FLAG;
11574                 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
11575                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11576                                " failed, aborting\n");
11577                         rc = -EIO;
11578                         goto err_out_release;
11579                 }
11580
11581         } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
11582                 printk(KERN_ERR PFX "System does not support DMA,"
11583                        " aborting\n");
11584                 rc = -EIO;
11585                 goto err_out_release;
11586         }
11587
11588         dev->mem_start = pci_resource_start(pdev, 0);
11589         dev->base_addr = dev->mem_start;
11590         dev->mem_end = pci_resource_end(pdev, 0);
11591
11592         dev->irq = pdev->irq;
11593
11594         bp->regview = pci_ioremap_bar(pdev, 0);
11595         if (!bp->regview) {
11596                 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11597                 rc = -ENOMEM;
11598                 goto err_out_release;
11599         }
11600
11601         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11602                                         min_t(u64, BNX2X_DB_SIZE,
11603                                               pci_resource_len(pdev, 2)));
11604         if (!bp->doorbells) {
11605                 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11606                 rc = -ENOMEM;
11607                 goto err_out_unmap;
11608         }
11609
11610         bnx2x_set_power_state(bp, PCI_D0);
11611
11612         /* clean indirect addresses */
11613         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11614                                PCICFG_VENDOR_ID_OFFSET);
11615         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11616         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11617         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11618         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11619
11620         dev->watchdog_timeo = TX_TIMEOUT;
11621
11622         dev->netdev_ops = &bnx2x_netdev_ops;
11623         dev->ethtool_ops = &bnx2x_ethtool_ops;
11624         dev->features |= NETIF_F_SG;
11625         dev->features |= NETIF_F_HW_CSUM;
11626         if (bp->flags & USING_DAC_FLAG)
11627                 dev->features |= NETIF_F_HIGHDMA;
11628         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11629         dev->features |= NETIF_F_TSO6;
11630 #ifdef BCM_VLAN
11631         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
11632         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11633
11634         dev->vlan_features |= NETIF_F_SG;
11635         dev->vlan_features |= NETIF_F_HW_CSUM;
11636         if (bp->flags & USING_DAC_FLAG)
11637                 dev->vlan_features |= NETIF_F_HIGHDMA;
11638         dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11639         dev->vlan_features |= NETIF_F_TSO6;
11640 #endif
11641
11642         /* get_port_hwinfo() will set prtad and mmds properly */
11643         bp->mdio.prtad = MDIO_PRTAD_NONE;
11644         bp->mdio.mmds = 0;
11645         bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11646         bp->mdio.dev = dev;
11647         bp->mdio.mdio_read = bnx2x_mdio_read;
11648         bp->mdio.mdio_write = bnx2x_mdio_write;
11649
11650         return 0;
11651
11652 err_out_unmap:
11653         if (bp->regview) {
11654                 iounmap(bp->regview);
11655                 bp->regview = NULL;
11656         }
11657         if (bp->doorbells) {
11658                 iounmap(bp->doorbells);
11659                 bp->doorbells = NULL;
11660         }
11661
11662 err_out_release:
11663         if (atomic_read(&pdev->enable_cnt) == 1)
11664                 pci_release_regions(pdev);
11665
11666 err_out_disable:
11667         pci_disable_device(pdev);
11668         pci_set_drvdata(pdev, NULL);
11669
11670 err_out:
11671         return rc;
11672 }
11673
11674 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11675                                                  int *width, int *speed)
11676 {
11677         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11678
11679         *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11680
11681         /* return value of 1=2.5GHz 2=5GHz */
11682         *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11683 }
11684
11685 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11686 {
11687         const struct firmware *firmware = bp->firmware;
11688         struct bnx2x_fw_file_hdr *fw_hdr;
11689         struct bnx2x_fw_file_section *sections;
11690         u32 offset, len, num_ops;
11691         u16 *ops_offsets;
11692         int i;
11693         const u8 *fw_ver;
11694
11695         if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11696                 return -EINVAL;
11697
11698         fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11699         sections = (struct bnx2x_fw_file_section *)fw_hdr;
11700
11701         /* Make sure none of the offsets and sizes make us read beyond
11702          * the end of the firmware data */
11703         for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11704                 offset = be32_to_cpu(sections[i].offset);
11705                 len = be32_to_cpu(sections[i].len);
11706                 if (offset + len > firmware->size) {
11707                         printk(KERN_ERR PFX "Section %d length is out of "
11708                                             "bounds\n", i);
11709                         return -EINVAL;
11710                 }
11711         }
11712
11713         /* Likewise for the init_ops offsets */
11714         offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11715         ops_offsets = (u16 *)(firmware->data + offset);
11716         num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11717
11718         for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11719                 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11720                         printk(KERN_ERR PFX "Section offset %d is out of "
11721                                             "bounds\n", i);
11722                         return -EINVAL;
11723                 }
11724         }
11725
11726         /* Check FW version */
11727         offset = be32_to_cpu(fw_hdr->fw_version.offset);
11728         fw_ver = firmware->data + offset;
11729         if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11730             (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11731             (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11732             (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11733                 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11734                                     " Should be %d.%d.%d.%d\n",
11735                        fw_ver[0], fw_ver[1], fw_ver[2],
11736                        fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11737                        BCM_5710_FW_MINOR_VERSION,
11738                        BCM_5710_FW_REVISION_VERSION,
11739                        BCM_5710_FW_ENGINEERING_VERSION);
11740                 return -EINVAL;
11741         }
11742
11743         return 0;
11744 }
11745
11746 static void inline be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11747 {
11748         u32 i;
11749         const __be32 *source = (const __be32*)_source;
11750         u32 *target = (u32*)_target;
11751
11752         for (i = 0; i < n/4; i++)
11753                 target[i] = be32_to_cpu(source[i]);
11754 }
11755
11756 /*
11757    Ops array is stored in the following format:
11758    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11759  */
11760 static void inline bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11761 {
11762         u32 i, j, tmp;
11763         const __be32 *source = (const __be32*)_source;
11764         struct raw_op *target = (struct raw_op*)_target;
11765
11766         for (i = 0, j = 0; i < n/8; i++, j+=2) {
11767                 tmp = be32_to_cpu(source[j]);
11768                 target[i].op = (tmp >> 24) & 0xff;
11769                 target[i].offset =  tmp & 0xffffff;
11770                 target[i].raw_data = be32_to_cpu(source[j+1]);
11771         }
11772 }
11773 static void inline be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11774 {
11775         u32 i;
11776         u16 *target = (u16*)_target;
11777         const __be16 *source = (const __be16*)_source;
11778
11779         for (i = 0; i < n/2; i++)
11780                 target[i] = be16_to_cpu(source[i]);
11781 }
11782
11783 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11784         do {   \
11785                 u32 len = be32_to_cpu(fw_hdr->arr.len);   \
11786                 bp->arr = kmalloc(len, GFP_KERNEL);  \
11787                 if (!bp->arr) { \
11788                         printk(KERN_ERR PFX "Failed to allocate %d bytes for "#arr"\n", len); \
11789                         goto lbl; \
11790                 } \
11791                 func(bp->firmware->data + \
11792                         be32_to_cpu(fw_hdr->arr.offset), \
11793                         (u8*)bp->arr, len); \
11794         } while (0)
11795
11796
11797 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11798 {
11799         char fw_file_name[40] = {0};
11800         int rc, offset;
11801         struct bnx2x_fw_file_hdr *fw_hdr;
11802
11803         /* Create a FW file name */
11804         if (CHIP_IS_E1(bp))
11805                 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
11806         else
11807                 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
11808
11809         sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11810                 BCM_5710_FW_MAJOR_VERSION,
11811                 BCM_5710_FW_MINOR_VERSION,
11812                 BCM_5710_FW_REVISION_VERSION,
11813                 BCM_5710_FW_ENGINEERING_VERSION);
11814
11815         printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11816
11817         rc = request_firmware(&bp->firmware, fw_file_name, dev);
11818         if (rc) {
11819                 printk(KERN_ERR PFX "Can't load firmware file %s\n", fw_file_name);
11820                 goto request_firmware_exit;
11821         }
11822
11823         rc = bnx2x_check_firmware(bp);
11824         if (rc) {
11825                 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
11826                 goto request_firmware_exit;
11827         }
11828
11829         fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11830
11831         /* Initialize the pointers to the init arrays */
11832         /* Blob */
11833         BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11834
11835         /* Opcodes */
11836         BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11837
11838         /* Offsets */
11839         BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, be16_to_cpu_n);
11840
11841         /* STORMs firmware */
11842         bp->tsem_int_table_data = bp->firmware->data +
11843                 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11844         bp->tsem_pram_data      = bp->firmware->data +
11845                 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11846         bp->usem_int_table_data = bp->firmware->data +
11847                 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11848         bp->usem_pram_data      = bp->firmware->data +
11849                 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11850         bp->xsem_int_table_data = bp->firmware->data +
11851                 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11852         bp->xsem_pram_data      = bp->firmware->data +
11853                 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11854         bp->csem_int_table_data = bp->firmware->data +
11855                 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11856         bp->csem_pram_data      = bp->firmware->data +
11857                 be32_to_cpu(fw_hdr->csem_pram_data.offset);
11858
11859         return 0;
11860 init_offsets_alloc_err:
11861         kfree(bp->init_ops);
11862 init_ops_alloc_err:
11863         kfree(bp->init_data);
11864 request_firmware_exit:
11865         release_firmware(bp->firmware);
11866
11867         return rc;
11868 }
11869
11870
11871
11872 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11873                                     const struct pci_device_id *ent)
11874 {
11875         static int version_printed;
11876         struct net_device *dev = NULL;
11877         struct bnx2x *bp;
11878         int pcie_width, pcie_speed;
11879         int rc;
11880
11881         if (version_printed++ == 0)
11882                 printk(KERN_INFO "%s", version);
11883
11884         /* dev zeroed in init_etherdev */
11885         dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
11886         if (!dev) {
11887                 printk(KERN_ERR PFX "Cannot allocate net device\n");
11888                 return -ENOMEM;
11889         }
11890
11891         bp = netdev_priv(dev);
11892         bp->msglevel = debug;
11893
11894         pci_set_drvdata(pdev, dev);
11895
11896         rc = bnx2x_init_dev(pdev, dev);
11897         if (rc < 0) {
11898                 free_netdev(dev);
11899                 return rc;
11900         }
11901
11902         rc = bnx2x_init_bp(bp);
11903         if (rc)
11904                 goto init_one_exit;
11905
11906         /* Set init arrays */
11907         rc = bnx2x_init_firmware(bp, &pdev->dev);
11908         if (rc) {
11909                 printk(KERN_ERR PFX "Error loading firmware\n");
11910                 goto init_one_exit;
11911         }
11912
11913         rc = register_netdev(dev);
11914         if (rc) {
11915                 dev_err(&pdev->dev, "Cannot register net device\n");
11916                 goto init_one_exit;
11917         }
11918
11919         bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
11920         printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
11921                " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
11922                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
11923                pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
11924                dev->base_addr, bp->pdev->irq);
11925         printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
11926
11927         return 0;
11928
11929 init_one_exit:
11930         if (bp->regview)
11931                 iounmap(bp->regview);
11932
11933         if (bp->doorbells)
11934                 iounmap(bp->doorbells);
11935
11936         free_netdev(dev);
11937
11938         if (atomic_read(&pdev->enable_cnt) == 1)
11939                 pci_release_regions(pdev);
11940
11941         pci_disable_device(pdev);
11942         pci_set_drvdata(pdev, NULL);
11943
11944         return rc;
11945 }
11946
11947 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11948 {
11949         struct net_device *dev = pci_get_drvdata(pdev);
11950         struct bnx2x *bp;
11951
11952         if (!dev) {
11953                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11954                 return;
11955         }
11956         bp = netdev_priv(dev);
11957
11958         unregister_netdev(dev);
11959
11960         kfree(bp->init_ops_offsets);
11961         kfree(bp->init_ops);
11962         kfree(bp->init_data);
11963         release_firmware(bp->firmware);
11964
11965         if (bp->regview)
11966                 iounmap(bp->regview);
11967
11968         if (bp->doorbells)
11969                 iounmap(bp->doorbells);
11970
11971         free_netdev(dev);
11972
11973         if (atomic_read(&pdev->enable_cnt) == 1)
11974                 pci_release_regions(pdev);
11975
11976         pci_disable_device(pdev);
11977         pci_set_drvdata(pdev, NULL);
11978 }
11979
11980 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11981 {
11982         struct net_device *dev = pci_get_drvdata(pdev);
11983         struct bnx2x *bp;
11984
11985         if (!dev) {
11986                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11987                 return -ENODEV;
11988         }
11989         bp = netdev_priv(dev);
11990
11991         rtnl_lock();
11992
11993         pci_save_state(pdev);
11994
11995         if (!netif_running(dev)) {
11996                 rtnl_unlock();
11997                 return 0;
11998         }
11999
12000         netif_device_detach(dev);
12001
12002         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
12003
12004         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
12005
12006         rtnl_unlock();
12007
12008         return 0;
12009 }
12010
12011 static int bnx2x_resume(struct pci_dev *pdev)
12012 {
12013         struct net_device *dev = pci_get_drvdata(pdev);
12014         struct bnx2x *bp;
12015         int rc;
12016
12017         if (!dev) {
12018                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12019                 return -ENODEV;
12020         }
12021         bp = netdev_priv(dev);
12022
12023         rtnl_lock();
12024
12025         pci_restore_state(pdev);
12026
12027         if (!netif_running(dev)) {
12028                 rtnl_unlock();
12029                 return 0;
12030         }
12031
12032         bnx2x_set_power_state(bp, PCI_D0);
12033         netif_device_attach(dev);
12034
12035         rc = bnx2x_nic_load(bp, LOAD_OPEN);
12036
12037         rtnl_unlock();
12038
12039         return rc;
12040 }
12041
12042 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12043 {
12044         int i;
12045
12046         bp->state = BNX2X_STATE_ERROR;
12047
12048         bp->rx_mode = BNX2X_RX_MODE_NONE;
12049
12050         bnx2x_netif_stop(bp, 0);
12051
12052         del_timer_sync(&bp->timer);
12053         bp->stats_state = STATS_STATE_DISABLED;
12054         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12055
12056         /* Release IRQs */
12057         bnx2x_free_irq(bp);
12058
12059         if (CHIP_IS_E1(bp)) {
12060                 struct mac_configuration_cmd *config =
12061                                                 bnx2x_sp(bp, mcast_config);
12062
12063                 for (i = 0; i < config->hdr.length; i++)
12064                         CAM_INVALIDATE(config->config_table[i]);
12065         }
12066
12067         /* Free SKBs, SGEs, TPA pool and driver internals */
12068         bnx2x_free_skbs(bp);
12069         for_each_rx_queue(bp, i)
12070                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
12071         for_each_rx_queue(bp, i)
12072                 netif_napi_del(&bnx2x_fp(bp, i, napi));
12073         bnx2x_free_mem(bp);
12074
12075         bp->state = BNX2X_STATE_CLOSED;
12076
12077         netif_carrier_off(bp->dev);
12078
12079         return 0;
12080 }
12081
12082 static void bnx2x_eeh_recover(struct bnx2x *bp)
12083 {
12084         u32 val;
12085
12086         mutex_init(&bp->port.phy_mutex);
12087
12088         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12089         bp->link_params.shmem_base = bp->common.shmem_base;
12090         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12091
12092         if (!bp->common.shmem_base ||
12093             (bp->common.shmem_base < 0xA0000) ||
12094             (bp->common.shmem_base >= 0xC0000)) {
12095                 BNX2X_DEV_INFO("MCP not active\n");
12096                 bp->flags |= NO_MCP_FLAG;
12097                 return;
12098         }
12099
12100         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12101         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12102                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12103                 BNX2X_ERR("BAD MCP validity signature\n");
12104
12105         if (!BP_NOMCP(bp)) {
12106                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12107                               & DRV_MSG_SEQ_NUMBER_MASK);
12108                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12109         }
12110 }
12111
12112 /**
12113  * bnx2x_io_error_detected - called when PCI error is detected
12114  * @pdev: Pointer to PCI device
12115  * @state: The current pci connection state
12116  *
12117  * This function is called after a PCI bus error affecting
12118  * this device has been detected.
12119  */
12120 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12121                                                 pci_channel_state_t state)
12122 {
12123         struct net_device *dev = pci_get_drvdata(pdev);
12124         struct bnx2x *bp = netdev_priv(dev);
12125
12126         rtnl_lock();
12127
12128         netif_device_detach(dev);
12129
12130         if (state == pci_channel_io_perm_failure) {
12131                 rtnl_unlock();
12132                 return PCI_ERS_RESULT_DISCONNECT;
12133         }
12134
12135         if (netif_running(dev))
12136                 bnx2x_eeh_nic_unload(bp);
12137
12138         pci_disable_device(pdev);
12139
12140         rtnl_unlock();
12141
12142         /* Request a slot reset */
12143         return PCI_ERS_RESULT_NEED_RESET;
12144 }
12145
12146 /**
12147  * bnx2x_io_slot_reset - called after the PCI bus has been reset
12148  * @pdev: Pointer to PCI device
12149  *
12150  * Restart the card from scratch, as if from a cold-boot.
12151  */
12152 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12153 {
12154         struct net_device *dev = pci_get_drvdata(pdev);
12155         struct bnx2x *bp = netdev_priv(dev);
12156
12157         rtnl_lock();
12158
12159         if (pci_enable_device(pdev)) {
12160                 dev_err(&pdev->dev,
12161                         "Cannot re-enable PCI device after reset\n");
12162                 rtnl_unlock();
12163                 return PCI_ERS_RESULT_DISCONNECT;
12164         }
12165
12166         pci_set_master(pdev);
12167         pci_restore_state(pdev);
12168
12169         if (netif_running(dev))
12170                 bnx2x_set_power_state(bp, PCI_D0);
12171
12172         rtnl_unlock();
12173
12174         return PCI_ERS_RESULT_RECOVERED;
12175 }
12176
12177 /**
12178  * bnx2x_io_resume - called when traffic can start flowing again
12179  * @pdev: Pointer to PCI device
12180  *
12181  * This callback is called when the error recovery driver tells us that
12182  * its OK to resume normal operation.
12183  */
12184 static void bnx2x_io_resume(struct pci_dev *pdev)
12185 {
12186         struct net_device *dev = pci_get_drvdata(pdev);
12187         struct bnx2x *bp = netdev_priv(dev);
12188
12189         rtnl_lock();
12190
12191         bnx2x_eeh_recover(bp);
12192
12193         if (netif_running(dev))
12194                 bnx2x_nic_load(bp, LOAD_NORMAL);
12195
12196         netif_device_attach(dev);
12197
12198         rtnl_unlock();
12199 }
12200
12201 static struct pci_error_handlers bnx2x_err_handler = {
12202         .error_detected = bnx2x_io_error_detected,
12203         .slot_reset     = bnx2x_io_slot_reset,
12204         .resume         = bnx2x_io_resume,
12205 };
12206
12207 static struct pci_driver bnx2x_pci_driver = {
12208         .name        = DRV_MODULE_NAME,
12209         .id_table    = bnx2x_pci_tbl,
12210         .probe       = bnx2x_init_one,
12211         .remove      = __devexit_p(bnx2x_remove_one),
12212         .suspend     = bnx2x_suspend,
12213         .resume      = bnx2x_resume,
12214         .err_handler = &bnx2x_err_handler,
12215 };
12216
12217 static int __init bnx2x_init(void)
12218 {
12219         int ret;
12220
12221         bnx2x_wq = create_singlethread_workqueue("bnx2x");
12222         if (bnx2x_wq == NULL) {
12223                 printk(KERN_ERR PFX "Cannot create workqueue\n");
12224                 return -ENOMEM;
12225         }
12226
12227         ret = pci_register_driver(&bnx2x_pci_driver);
12228         if (ret) {
12229                 printk(KERN_ERR PFX "Cannot register driver\n");
12230                 destroy_workqueue(bnx2x_wq);
12231         }
12232         return ret;
12233 }
12234
12235 static void __exit bnx2x_cleanup(void)
12236 {
12237         pci_unregister_driver(&bnx2x_pci_driver);
12238
12239         destroy_workqueue(bnx2x_wq);
12240 }
12241
12242 module_init(bnx2x_init);
12243 module_exit(bnx2x_cleanup);
12244
12245