ca4ed634d55e6f733ef8c28111583e6c89cfb207
[safe/jmp/linux-2.6] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52 #include <linux/stringify.h>
53
54
55 #include "bnx2x.h"
56 #include "bnx2x_init.h"
57 #include "bnx2x_init_ops.h"
58 #include "bnx2x_dump.h"
59
60 #define DRV_MODULE_VERSION      "1.52.1-5"
61 #define DRV_MODULE_RELDATE      "2009/11/09"
62 #define BNX2X_BC_VER            0x040200
63
64 #include <linux/firmware.h>
65 #include "bnx2x_fw_file_hdr.h"
66 /* FW files */
67 #define FW_FILE_VERSION                                 \
68         __stringify(BCM_5710_FW_MAJOR_VERSION) "."      \
69         __stringify(BCM_5710_FW_MINOR_VERSION) "."      \
70         __stringify(BCM_5710_FW_REVISION_VERSION) "."   \
71         __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72 #define FW_FILE_NAME_E1         "bnx2x-e1-" FW_FILE_VERSION ".fw"
73 #define FW_FILE_NAME_E1H        "bnx2x-e1h-" FW_FILE_VERSION ".fw"
74
75 /* Time in jiffies before concluding the transmitter is hung */
76 #define TX_TIMEOUT              (5*HZ)
77
78 static char version[] __devinitdata =
79         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
80         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
81
82 MODULE_AUTHOR("Eliezer Tamir");
83 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
84 MODULE_LICENSE("GPL");
85 MODULE_VERSION(DRV_MODULE_VERSION);
86 MODULE_FIRMWARE(FW_FILE_NAME_E1);
87 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
88
89 static int multi_mode = 1;
90 module_param(multi_mode, int, 0);
91 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92                              "(0 Disable; 1 Enable (default))");
93
94 static int num_queues;
95 module_param(num_queues, int, 0);
96 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97                                 " (default is as a number of CPUs)");
98
99 static int disable_tpa;
100 module_param(disable_tpa, int, 0);
101 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
102
103 static int int_mode;
104 module_param(int_mode, int, 0);
105 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
106
107 static int dropless_fc;
108 module_param(dropless_fc, int, 0);
109 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
110
111 static int poll;
112 module_param(poll, int, 0);
113 MODULE_PARM_DESC(poll, " Use polling (for debug)");
114
115 static int mrrs = -1;
116 module_param(mrrs, int, 0);
117 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
118
119 static int debug;
120 module_param(debug, int, 0);
121 MODULE_PARM_DESC(debug, " Default debug msglevel");
122
123 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
124
125 static struct workqueue_struct *bnx2x_wq;
126
127 enum bnx2x_board_type {
128         BCM57710 = 0,
129         BCM57711 = 1,
130         BCM57711E = 2,
131 };
132
133 /* indexed by board_type, above */
134 static struct {
135         char *name;
136 } board_info[] __devinitdata = {
137         { "Broadcom NetXtreme II BCM57710 XGb" },
138         { "Broadcom NetXtreme II BCM57711 XGb" },
139         { "Broadcom NetXtreme II BCM57711E XGb" }
140 };
141
142
143 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
144         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
145         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
146         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
147         { 0 }
148 };
149
150 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
151
152 /****************************************************************************
153 * General service functions
154 ****************************************************************************/
155
156 /* used only at init
157  * locking is done by mcp
158  */
159 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
160 {
161         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
162         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
163         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
164                                PCICFG_VENDOR_ID_OFFSET);
165 }
166
167 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
168 {
169         u32 val;
170
171         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
172         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
173         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
174                                PCICFG_VENDOR_ID_OFFSET);
175
176         return val;
177 }
178
179 static const u32 dmae_reg_go_c[] = {
180         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
181         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
182         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
183         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
184 };
185
186 /* copy command into DMAE command memory and set DMAE command go */
187 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
188                             int idx)
189 {
190         u32 cmd_offset;
191         int i;
192
193         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
194         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
195                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
196
197                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
198                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
199         }
200         REG_WR(bp, dmae_reg_go_c[idx], 1);
201 }
202
203 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
204                       u32 len32)
205 {
206         struct dmae_command dmae;
207         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
208         int cnt = 200;
209
210         if (!bp->dmae_ready) {
211                 u32 *data = bnx2x_sp(bp, wb_data[0]);
212
213                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
214                    "  using indirect\n", dst_addr, len32);
215                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
216                 return;
217         }
218
219         memset(&dmae, 0, sizeof(struct dmae_command));
220
221         dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
222                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
223                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
224 #ifdef __BIG_ENDIAN
225                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
226 #else
227                        DMAE_CMD_ENDIANITY_DW_SWAP |
228 #endif
229                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
230                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
231         dmae.src_addr_lo = U64_LO(dma_addr);
232         dmae.src_addr_hi = U64_HI(dma_addr);
233         dmae.dst_addr_lo = dst_addr >> 2;
234         dmae.dst_addr_hi = 0;
235         dmae.len = len32;
236         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
237         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
238         dmae.comp_val = DMAE_COMP_VAL;
239
240         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
241            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
242                     "dst_addr [%x:%08x (%08x)]\n"
243            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
244            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
245            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
246            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
247         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
248            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
249            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
250
251         mutex_lock(&bp->dmae_mutex);
252
253         *wb_comp = 0;
254
255         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
256
257         udelay(5);
258
259         while (*wb_comp != DMAE_COMP_VAL) {
260                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
261
262                 if (!cnt) {
263                         BNX2X_ERR("DMAE timeout!\n");
264                         break;
265                 }
266                 cnt--;
267                 /* adjust delay for emulation/FPGA */
268                 if (CHIP_REV_IS_SLOW(bp))
269                         msleep(100);
270                 else
271                         udelay(5);
272         }
273
274         mutex_unlock(&bp->dmae_mutex);
275 }
276
277 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
278 {
279         struct dmae_command dmae;
280         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
281         int cnt = 200;
282
283         if (!bp->dmae_ready) {
284                 u32 *data = bnx2x_sp(bp, wb_data[0]);
285                 int i;
286
287                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
288                    "  using indirect\n", src_addr, len32);
289                 for (i = 0; i < len32; i++)
290                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
291                 return;
292         }
293
294         memset(&dmae, 0, sizeof(struct dmae_command));
295
296         dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
297                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
298                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
299 #ifdef __BIG_ENDIAN
300                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
301 #else
302                        DMAE_CMD_ENDIANITY_DW_SWAP |
303 #endif
304                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
305                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
306         dmae.src_addr_lo = src_addr >> 2;
307         dmae.src_addr_hi = 0;
308         dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
309         dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
310         dmae.len = len32;
311         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
312         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
313         dmae.comp_val = DMAE_COMP_VAL;
314
315         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
316            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
317                     "dst_addr [%x:%08x (%08x)]\n"
318            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
319            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
320            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
321            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
322
323         mutex_lock(&bp->dmae_mutex);
324
325         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
326         *wb_comp = 0;
327
328         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
329
330         udelay(5);
331
332         while (*wb_comp != DMAE_COMP_VAL) {
333
334                 if (!cnt) {
335                         BNX2X_ERR("DMAE timeout!\n");
336                         break;
337                 }
338                 cnt--;
339                 /* adjust delay for emulation/FPGA */
340                 if (CHIP_REV_IS_SLOW(bp))
341                         msleep(100);
342                 else
343                         udelay(5);
344         }
345         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
346            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
347            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
348
349         mutex_unlock(&bp->dmae_mutex);
350 }
351
352 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
353                                u32 addr, u32 len)
354 {
355         int offset = 0;
356
357         while (len > DMAE_LEN32_WR_MAX) {
358                 bnx2x_write_dmae(bp, phys_addr + offset,
359                                  addr + offset, DMAE_LEN32_WR_MAX);
360                 offset += DMAE_LEN32_WR_MAX * 4;
361                 len -= DMAE_LEN32_WR_MAX;
362         }
363
364         bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
365 }
366
367 /* used only for slowpath so not inlined */
368 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
369 {
370         u32 wb_write[2];
371
372         wb_write[0] = val_hi;
373         wb_write[1] = val_lo;
374         REG_WR_DMAE(bp, reg, wb_write, 2);
375 }
376
377 #ifdef USE_WB_RD
378 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
379 {
380         u32 wb_data[2];
381
382         REG_RD_DMAE(bp, reg, wb_data, 2);
383
384         return HILO_U64(wb_data[0], wb_data[1]);
385 }
386 #endif
387
388 static int bnx2x_mc_assert(struct bnx2x *bp)
389 {
390         char last_idx;
391         int i, rc = 0;
392         u32 row0, row1, row2, row3;
393
394         /* XSTORM */
395         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
396                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
397         if (last_idx)
398                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
399
400         /* print the asserts */
401         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
402
403                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
404                               XSTORM_ASSERT_LIST_OFFSET(i));
405                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
407                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
408                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
409                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
410                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
411
412                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
413                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
414                                   " 0x%08x 0x%08x 0x%08x\n",
415                                   i, row3, row2, row1, row0);
416                         rc++;
417                 } else {
418                         break;
419                 }
420         }
421
422         /* TSTORM */
423         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
424                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
425         if (last_idx)
426                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
427
428         /* print the asserts */
429         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
430
431                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
432                               TSTORM_ASSERT_LIST_OFFSET(i));
433                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
435                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
436                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
437                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
438                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
439
440                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
441                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
442                                   " 0x%08x 0x%08x 0x%08x\n",
443                                   i, row3, row2, row1, row0);
444                         rc++;
445                 } else {
446                         break;
447                 }
448         }
449
450         /* CSTORM */
451         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
452                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
453         if (last_idx)
454                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
455
456         /* print the asserts */
457         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
458
459                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
460                               CSTORM_ASSERT_LIST_OFFSET(i));
461                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
463                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
464                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
465                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
466                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
467
468                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
469                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
470                                   " 0x%08x 0x%08x 0x%08x\n",
471                                   i, row3, row2, row1, row0);
472                         rc++;
473                 } else {
474                         break;
475                 }
476         }
477
478         /* USTORM */
479         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
480                            USTORM_ASSERT_LIST_INDEX_OFFSET);
481         if (last_idx)
482                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
483
484         /* print the asserts */
485         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
486
487                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
488                               USTORM_ASSERT_LIST_OFFSET(i));
489                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
490                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
491                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
492                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
493                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
494                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
495
496                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
497                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
498                                   " 0x%08x 0x%08x 0x%08x\n",
499                                   i, row3, row2, row1, row0);
500                         rc++;
501                 } else {
502                         break;
503                 }
504         }
505
506         return rc;
507 }
508
509 static void bnx2x_fw_dump(struct bnx2x *bp)
510 {
511         u32 mark, offset;
512         __be32 data[9];
513         int word;
514
515         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
516         mark = ((mark + 0x3) & ~0x3);
517         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
518
519         printk(KERN_ERR PFX);
520         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
521                 for (word = 0; word < 8; word++)
522                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
523                                                   offset + 4*word));
524                 data[8] = 0x0;
525                 printk(KERN_CONT "%s", (char *)data);
526         }
527         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
528                 for (word = 0; word < 8; word++)
529                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
530                                                   offset + 4*word));
531                 data[8] = 0x0;
532                 printk(KERN_CONT "%s", (char *)data);
533         }
534         printk(KERN_ERR PFX "end of fw dump\n");
535 }
536
537 static void bnx2x_panic_dump(struct bnx2x *bp)
538 {
539         int i;
540         u16 j, start, end;
541
542         bp->stats_state = STATS_STATE_DISABLED;
543         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
544
545         BNX2X_ERR("begin crash dump -----------------\n");
546
547         /* Indices */
548         /* Common */
549         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
550                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
551                   "  spq_prod_idx(%u)\n",
552                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
553                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
554
555         /* Rx */
556         for_each_queue(bp, i) {
557                 struct bnx2x_fastpath *fp = &bp->fp[i];
558
559                 BNX2X_ERR("fp%d: rx_bd_prod(%x)  rx_bd_cons(%x)"
560                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
561                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
562                           i, fp->rx_bd_prod, fp->rx_bd_cons,
563                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
564                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
565                 BNX2X_ERR("      rx_sge_prod(%x)  last_max_sge(%x)"
566                           "  fp_u_idx(%x) *sb_u_idx(%x)\n",
567                           fp->rx_sge_prod, fp->last_max_sge,
568                           le16_to_cpu(fp->fp_u_idx),
569                           fp->status_blk->u_status_block.status_block_index);
570         }
571
572         /* Tx */
573         for_each_queue(bp, i) {
574                 struct bnx2x_fastpath *fp = &bp->fp[i];
575
576                 BNX2X_ERR("fp%d: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
577                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
578                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
579                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
580                 BNX2X_ERR("      fp_c_idx(%x)  *sb_c_idx(%x)"
581                           "  tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
582                           fp->status_blk->c_status_block.status_block_index,
583                           fp->tx_db.data.prod);
584         }
585
586         /* Rings */
587         /* Rx */
588         for_each_queue(bp, i) {
589                 struct bnx2x_fastpath *fp = &bp->fp[i];
590
591                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
592                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
593                 for (j = start; j != end; j = RX_BD(j + 1)) {
594                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
595                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
596
597                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
598                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
599                 }
600
601                 start = RX_SGE(fp->rx_sge_prod);
602                 end = RX_SGE(fp->last_max_sge);
603                 for (j = start; j != end; j = RX_SGE(j + 1)) {
604                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
605                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
606
607                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
608                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
609                 }
610
611                 start = RCQ_BD(fp->rx_comp_cons - 10);
612                 end = RCQ_BD(fp->rx_comp_cons + 503);
613                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
614                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
615
616                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
617                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
618                 }
619         }
620
621         /* Tx */
622         for_each_queue(bp, i) {
623                 struct bnx2x_fastpath *fp = &bp->fp[i];
624
625                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
626                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
627                 for (j = start; j != end; j = TX_BD(j + 1)) {
628                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
629
630                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
631                                   i, j, sw_bd->skb, sw_bd->first_bd);
632                 }
633
634                 start = TX_BD(fp->tx_bd_cons - 10);
635                 end = TX_BD(fp->tx_bd_cons + 254);
636                 for (j = start; j != end; j = TX_BD(j + 1)) {
637                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
638
639                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
640                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
641                 }
642         }
643
644         bnx2x_fw_dump(bp);
645         bnx2x_mc_assert(bp);
646         BNX2X_ERR("end crash dump -----------------\n");
647 }
648
649 static void bnx2x_int_enable(struct bnx2x *bp)
650 {
651         int port = BP_PORT(bp);
652         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
653         u32 val = REG_RD(bp, addr);
654         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
655         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
656
657         if (msix) {
658                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
659                          HC_CONFIG_0_REG_INT_LINE_EN_0);
660                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
661                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
662         } else if (msi) {
663                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
664                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
665                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
666                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
667         } else {
668                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
669                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
670                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
671                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
672
673                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
674                    val, port, addr);
675
676                 REG_WR(bp, addr, val);
677
678                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
679         }
680
681         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
682            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
683
684         REG_WR(bp, addr, val);
685         /*
686          * Ensure that HC_CONFIG is written before leading/trailing edge config
687          */
688         mmiowb();
689         barrier();
690
691         if (CHIP_IS_E1H(bp)) {
692                 /* init leading/trailing edge */
693                 if (IS_E1HMF(bp)) {
694                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
695                         if (bp->port.pmf)
696                                 /* enable nig and gpio3 attention */
697                                 val |= 0x1100;
698                 } else
699                         val = 0xffff;
700
701                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
702                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
703         }
704
705         /* Make sure that interrupts are indeed enabled from here on */
706         mmiowb();
707 }
708
709 static void bnx2x_int_disable(struct bnx2x *bp)
710 {
711         int port = BP_PORT(bp);
712         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
713         u32 val = REG_RD(bp, addr);
714
715         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
716                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
717                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
718                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
719
720         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
721            val, port, addr);
722
723         /* flush all outstanding writes */
724         mmiowb();
725
726         REG_WR(bp, addr, val);
727         if (REG_RD(bp, addr) != val)
728                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
729 }
730
731 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
732 {
733         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
734         int i, offset;
735
736         /* disable interrupt handling */
737         atomic_inc(&bp->intr_sem);
738         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
739
740         if (disable_hw)
741                 /* prevent the HW from sending interrupts */
742                 bnx2x_int_disable(bp);
743
744         /* make sure all ISRs are done */
745         if (msix) {
746                 synchronize_irq(bp->msix_table[0].vector);
747                 offset = 1;
748 #ifdef BCM_CNIC
749                 offset++;
750 #endif
751                 for_each_queue(bp, i)
752                         synchronize_irq(bp->msix_table[i + offset].vector);
753         } else
754                 synchronize_irq(bp->pdev->irq);
755
756         /* make sure sp_task is not running */
757         cancel_delayed_work(&bp->sp_task);
758         flush_workqueue(bnx2x_wq);
759 }
760
761 /* fast path */
762
763 /*
764  * General service functions
765  */
766
767 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
768                                 u8 storm, u16 index, u8 op, u8 update)
769 {
770         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
771                        COMMAND_REG_INT_ACK);
772         struct igu_ack_register igu_ack;
773
774         igu_ack.status_block_index = index;
775         igu_ack.sb_id_and_flags =
776                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
777                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
778                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
779                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
780
781         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
782            (*(u32 *)&igu_ack), hc_addr);
783         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
784
785         /* Make sure that ACK is written */
786         mmiowb();
787         barrier();
788 }
789
790 static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
791 {
792         struct host_status_block *fpsb = fp->status_blk;
793
794         barrier(); /* status block is written to by the chip */
795         fp->fp_c_idx = fpsb->c_status_block.status_block_index;
796         fp->fp_u_idx = fpsb->u_status_block.status_block_index;
797 }
798
799 static u16 bnx2x_ack_int(struct bnx2x *bp)
800 {
801         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
802                        COMMAND_REG_SIMD_MASK);
803         u32 result = REG_RD(bp, hc_addr);
804
805         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
806            result, hc_addr);
807
808         return result;
809 }
810
811
812 /*
813  * fast path service functions
814  */
815
816 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
817 {
818         /* Tell compiler that consumer and producer can change */
819         barrier();
820         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
821 }
822
823 /* free skb in the packet ring at pos idx
824  * return idx of last bd freed
825  */
826 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
827                              u16 idx)
828 {
829         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
830         struct eth_tx_start_bd *tx_start_bd;
831         struct eth_tx_bd *tx_data_bd;
832         struct sk_buff *skb = tx_buf->skb;
833         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
834         int nbd;
835
836         /* prefetch skb end pointer to speedup dev_kfree_skb() */
837         prefetch(&skb->end);
838
839         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
840            idx, tx_buf, skb);
841
842         /* unmap first bd */
843         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
844         tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
845         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
846                          BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
847
848         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
849 #ifdef BNX2X_STOP_ON_ERROR
850         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
851                 BNX2X_ERR("BAD nbd!\n");
852                 bnx2x_panic();
853         }
854 #endif
855         new_cons = nbd + tx_buf->first_bd;
856
857         /* Get the next bd */
858         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
859
860         /* Skip a parse bd... */
861         --nbd;
862         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
863
864         /* ...and the TSO split header bd since they have no mapping */
865         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
866                 --nbd;
867                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
868         }
869
870         /* now free frags */
871         while (nbd > 0) {
872
873                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
874                 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
875                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
876                                BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
877                 if (--nbd)
878                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
879         }
880
881         /* release skb */
882         WARN_ON(!skb);
883         dev_kfree_skb(skb);
884         tx_buf->first_bd = 0;
885         tx_buf->skb = NULL;
886
887         return new_cons;
888 }
889
890 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
891 {
892         s16 used;
893         u16 prod;
894         u16 cons;
895
896         barrier(); /* Tell compiler that prod and cons can change */
897         prod = fp->tx_bd_prod;
898         cons = fp->tx_bd_cons;
899
900         /* NUM_TX_RINGS = number of "next-page" entries
901            It will be used as a threshold */
902         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
903
904 #ifdef BNX2X_STOP_ON_ERROR
905         WARN_ON(used < 0);
906         WARN_ON(used > fp->bp->tx_ring_size);
907         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
908 #endif
909
910         return (s16)(fp->bp->tx_ring_size) - used;
911 }
912
913 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
914 {
915         u16 hw_cons;
916
917         /* Tell compiler that status block fields can change */
918         barrier();
919         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
920         return hw_cons != fp->tx_pkt_cons;
921 }
922
923 static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
924 {
925         struct bnx2x *bp = fp->bp;
926         struct netdev_queue *txq;
927         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
928
929 #ifdef BNX2X_STOP_ON_ERROR
930         if (unlikely(bp->panic))
931                 return -1;
932 #endif
933
934         txq = netdev_get_tx_queue(bp->dev, fp->index);
935         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
936         sw_cons = fp->tx_pkt_cons;
937
938         while (sw_cons != hw_cons) {
939                 u16 pkt_cons;
940
941                 pkt_cons = TX_BD(sw_cons);
942
943                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
944
945                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
946                    hw_cons, sw_cons, pkt_cons);
947
948 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
949                         rmb();
950                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
951                 }
952 */
953                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
954                 sw_cons++;
955         }
956
957         fp->tx_pkt_cons = sw_cons;
958         fp->tx_bd_cons = bd_cons;
959
960         /* TBD need a thresh? */
961         if (unlikely(netif_tx_queue_stopped(txq))) {
962
963                 /* Need to make the tx_bd_cons update visible to start_xmit()
964                  * before checking for netif_tx_queue_stopped().  Without the
965                  * memory barrier, there is a small possibility that
966                  * start_xmit() will miss it and cause the queue to be stopped
967                  * forever.
968                  */
969                 smp_mb();
970
971                 if ((netif_tx_queue_stopped(txq)) &&
972                     (bp->state == BNX2X_STATE_OPEN) &&
973                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
974                         netif_tx_wake_queue(txq);
975         }
976         return 0;
977 }
978
979 #ifdef BCM_CNIC
980 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
981 #endif
982
983 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
984                            union eth_rx_cqe *rr_cqe)
985 {
986         struct bnx2x *bp = fp->bp;
987         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
988         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
989
990         DP(BNX2X_MSG_SP,
991            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
992            fp->index, cid, command, bp->state,
993            rr_cqe->ramrod_cqe.ramrod_type);
994
995         bp->spq_left++;
996
997         if (fp->index) {
998                 switch (command | fp->state) {
999                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1000                                                 BNX2X_FP_STATE_OPENING):
1001                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1002                            cid);
1003                         fp->state = BNX2X_FP_STATE_OPEN;
1004                         break;
1005
1006                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1007                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1008                            cid);
1009                         fp->state = BNX2X_FP_STATE_HALTED;
1010                         break;
1011
1012                 default:
1013                         BNX2X_ERR("unexpected MC reply (%d)  "
1014                                   "fp->state is %x\n", command, fp->state);
1015                         break;
1016                 }
1017                 mb(); /* force bnx2x_wait_ramrod() to see the change */
1018                 return;
1019         }
1020
1021         switch (command | bp->state) {
1022         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1023                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1024                 bp->state = BNX2X_STATE_OPEN;
1025                 break;
1026
1027         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1028                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1029                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1030                 fp->state = BNX2X_FP_STATE_HALTED;
1031                 break;
1032
1033         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1034                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1035                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1036                 break;
1037
1038 #ifdef BCM_CNIC
1039         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1040                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1041                 bnx2x_cnic_cfc_comp(bp, cid);
1042                 break;
1043 #endif
1044
1045         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1046         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1047                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1048                 bp->set_mac_pending--;
1049                 smp_wmb();
1050                 break;
1051
1052         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1053                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1054                 bp->set_mac_pending--;
1055                 smp_wmb();
1056                 break;
1057
1058         default:
1059                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
1060                           command, bp->state);
1061                 break;
1062         }
1063         mb(); /* force bnx2x_wait_ramrod() to see the change */
1064 }
1065
1066 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1067                                      struct bnx2x_fastpath *fp, u16 index)
1068 {
1069         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1070         struct page *page = sw_buf->page;
1071         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1072
1073         /* Skip "next page" elements */
1074         if (!page)
1075                 return;
1076
1077         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1078                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1079         __free_pages(page, PAGES_PER_SGE_SHIFT);
1080
1081         sw_buf->page = NULL;
1082         sge->addr_hi = 0;
1083         sge->addr_lo = 0;
1084 }
1085
1086 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1087                                            struct bnx2x_fastpath *fp, int last)
1088 {
1089         int i;
1090
1091         for (i = 0; i < last; i++)
1092                 bnx2x_free_rx_sge(bp, fp, i);
1093 }
1094
1095 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1096                                      struct bnx2x_fastpath *fp, u16 index)
1097 {
1098         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1099         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1100         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1101         dma_addr_t mapping;
1102
1103         if (unlikely(page == NULL))
1104                 return -ENOMEM;
1105
1106         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1107                                PCI_DMA_FROMDEVICE);
1108         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1109                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1110                 return -ENOMEM;
1111         }
1112
1113         sw_buf->page = page;
1114         pci_unmap_addr_set(sw_buf, mapping, mapping);
1115
1116         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1117         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1118
1119         return 0;
1120 }
1121
1122 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1123                                      struct bnx2x_fastpath *fp, u16 index)
1124 {
1125         struct sk_buff *skb;
1126         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1127         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1128         dma_addr_t mapping;
1129
1130         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1131         if (unlikely(skb == NULL))
1132                 return -ENOMEM;
1133
1134         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1135                                  PCI_DMA_FROMDEVICE);
1136         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1137                 dev_kfree_skb(skb);
1138                 return -ENOMEM;
1139         }
1140
1141         rx_buf->skb = skb;
1142         pci_unmap_addr_set(rx_buf, mapping, mapping);
1143
1144         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1145         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1146
1147         return 0;
1148 }
1149
1150 /* note that we are not allocating a new skb,
1151  * we are just moving one from cons to prod
1152  * we are not creating a new mapping,
1153  * so there is no need to check for dma_mapping_error().
1154  */
1155 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1156                                struct sk_buff *skb, u16 cons, u16 prod)
1157 {
1158         struct bnx2x *bp = fp->bp;
1159         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1160         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1161         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1162         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1163
1164         pci_dma_sync_single_for_device(bp->pdev,
1165                                        pci_unmap_addr(cons_rx_buf, mapping),
1166                                        RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1167
1168         prod_rx_buf->skb = cons_rx_buf->skb;
1169         pci_unmap_addr_set(prod_rx_buf, mapping,
1170                            pci_unmap_addr(cons_rx_buf, mapping));
1171         *prod_bd = *cons_bd;
1172 }
1173
1174 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1175                                              u16 idx)
1176 {
1177         u16 last_max = fp->last_max_sge;
1178
1179         if (SUB_S16(idx, last_max) > 0)
1180                 fp->last_max_sge = idx;
1181 }
1182
1183 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1184 {
1185         int i, j;
1186
1187         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1188                 int idx = RX_SGE_CNT * i - 1;
1189
1190                 for (j = 0; j < 2; j++) {
1191                         SGE_MASK_CLEAR_BIT(fp, idx);
1192                         idx--;
1193                 }
1194         }
1195 }
1196
1197 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1198                                   struct eth_fast_path_rx_cqe *fp_cqe)
1199 {
1200         struct bnx2x *bp = fp->bp;
1201         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1202                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1203                       SGE_PAGE_SHIFT;
1204         u16 last_max, last_elem, first_elem;
1205         u16 delta = 0;
1206         u16 i;
1207
1208         if (!sge_len)
1209                 return;
1210
1211         /* First mark all used pages */
1212         for (i = 0; i < sge_len; i++)
1213                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1214
1215         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1216            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1217
1218         /* Here we assume that the last SGE index is the biggest */
1219         prefetch((void *)(fp->sge_mask));
1220         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1221
1222         last_max = RX_SGE(fp->last_max_sge);
1223         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1224         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1225
1226         /* If ring is not full */
1227         if (last_elem + 1 != first_elem)
1228                 last_elem++;
1229
1230         /* Now update the prod */
1231         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1232                 if (likely(fp->sge_mask[i]))
1233                         break;
1234
1235                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1236                 delta += RX_SGE_MASK_ELEM_SZ;
1237         }
1238
1239         if (delta > 0) {
1240                 fp->rx_sge_prod += delta;
1241                 /* clear page-end entries */
1242                 bnx2x_clear_sge_mask_next_elems(fp);
1243         }
1244
1245         DP(NETIF_MSG_RX_STATUS,
1246            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1247            fp->last_max_sge, fp->rx_sge_prod);
1248 }
1249
1250 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1251 {
1252         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1253         memset(fp->sge_mask, 0xff,
1254                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1255
1256         /* Clear the two last indices in the page to 1:
1257            these are the indices that correspond to the "next" element,
1258            hence will never be indicated and should be removed from
1259            the calculations. */
1260         bnx2x_clear_sge_mask_next_elems(fp);
1261 }
1262
1263 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1264                             struct sk_buff *skb, u16 cons, u16 prod)
1265 {
1266         struct bnx2x *bp = fp->bp;
1267         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1268         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1269         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1270         dma_addr_t mapping;
1271
1272         /* move empty skb from pool to prod and map it */
1273         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1274         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1275                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1276         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1277
1278         /* move partial skb from cons to pool (don't unmap yet) */
1279         fp->tpa_pool[queue] = *cons_rx_buf;
1280
1281         /* mark bin state as start - print error if current state != stop */
1282         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1283                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1284
1285         fp->tpa_state[queue] = BNX2X_TPA_START;
1286
1287         /* point prod_bd to new skb */
1288         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1289         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1290
1291 #ifdef BNX2X_STOP_ON_ERROR
1292         fp->tpa_queue_used |= (1 << queue);
1293 #ifdef __powerpc64__
1294         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1295 #else
1296         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1297 #endif
1298            fp->tpa_queue_used);
1299 #endif
1300 }
1301
1302 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1303                                struct sk_buff *skb,
1304                                struct eth_fast_path_rx_cqe *fp_cqe,
1305                                u16 cqe_idx)
1306 {
1307         struct sw_rx_page *rx_pg, old_rx_pg;
1308         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1309         u32 i, frag_len, frag_size, pages;
1310         int err;
1311         int j;
1312
1313         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1314         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1315
1316         /* This is needed in order to enable forwarding support */
1317         if (frag_size)
1318                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1319                                                max(frag_size, (u32)len_on_bd));
1320
1321 #ifdef BNX2X_STOP_ON_ERROR
1322         if (pages >
1323             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1324                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1325                           pages, cqe_idx);
1326                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1327                           fp_cqe->pkt_len, len_on_bd);
1328                 bnx2x_panic();
1329                 return -EINVAL;
1330         }
1331 #endif
1332
1333         /* Run through the SGL and compose the fragmented skb */
1334         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1335                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1336
1337                 /* FW gives the indices of the SGE as if the ring is an array
1338                    (meaning that "next" element will consume 2 indices) */
1339                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1340                 rx_pg = &fp->rx_page_ring[sge_idx];
1341                 old_rx_pg = *rx_pg;
1342
1343                 /* If we fail to allocate a substitute page, we simply stop
1344                    where we are and drop the whole packet */
1345                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1346                 if (unlikely(err)) {
1347                         fp->eth_q_stats.rx_skb_alloc_failed++;
1348                         return err;
1349                 }
1350
1351                 /* Unmap the page as we r going to pass it to the stack */
1352                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1353                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1354
1355                 /* Add one frag and update the appropriate fields in the skb */
1356                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1357
1358                 skb->data_len += frag_len;
1359                 skb->truesize += frag_len;
1360                 skb->len += frag_len;
1361
1362                 frag_size -= frag_len;
1363         }
1364
1365         return 0;
1366 }
1367
1368 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1369                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1370                            u16 cqe_idx)
1371 {
1372         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1373         struct sk_buff *skb = rx_buf->skb;
1374         /* alloc new skb */
1375         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1376
1377         /* Unmap skb in the pool anyway, as we are going to change
1378            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1379            fails. */
1380         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1381                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1382
1383         if (likely(new_skb)) {
1384                 /* fix ip xsum and give it to the stack */
1385                 /* (no need to map the new skb) */
1386 #ifdef BCM_VLAN
1387                 int is_vlan_cqe =
1388                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1389                          PARSING_FLAGS_VLAN);
1390                 int is_not_hwaccel_vlan_cqe =
1391                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1392 #endif
1393
1394                 prefetch(skb);
1395                 prefetch(((char *)(skb)) + 128);
1396
1397 #ifdef BNX2X_STOP_ON_ERROR
1398                 if (pad + len > bp->rx_buf_size) {
1399                         BNX2X_ERR("skb_put is about to fail...  "
1400                                   "pad %d  len %d  rx_buf_size %d\n",
1401                                   pad, len, bp->rx_buf_size);
1402                         bnx2x_panic();
1403                         return;
1404                 }
1405 #endif
1406
1407                 skb_reserve(skb, pad);
1408                 skb_put(skb, len);
1409
1410                 skb->protocol = eth_type_trans(skb, bp->dev);
1411                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1412
1413                 {
1414                         struct iphdr *iph;
1415
1416                         iph = (struct iphdr *)skb->data;
1417 #ifdef BCM_VLAN
1418                         /* If there is no Rx VLAN offloading -
1419                            take VLAN tag into an account */
1420                         if (unlikely(is_not_hwaccel_vlan_cqe))
1421                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1422 #endif
1423                         iph->check = 0;
1424                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1425                 }
1426
1427                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1428                                          &cqe->fast_path_cqe, cqe_idx)) {
1429 #ifdef BCM_VLAN
1430                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1431                             (!is_not_hwaccel_vlan_cqe))
1432                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1433                                                 le16_to_cpu(cqe->fast_path_cqe.
1434                                                             vlan_tag));
1435                         else
1436 #endif
1437                                 netif_receive_skb(skb);
1438                 } else {
1439                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1440                            " - dropping packet!\n");
1441                         dev_kfree_skb(skb);
1442                 }
1443
1444
1445                 /* put new skb in bin */
1446                 fp->tpa_pool[queue].skb = new_skb;
1447
1448         } else {
1449                 /* else drop the packet and keep the buffer in the bin */
1450                 DP(NETIF_MSG_RX_STATUS,
1451                    "Failed to allocate new skb - dropping packet!\n");
1452                 fp->eth_q_stats.rx_skb_alloc_failed++;
1453         }
1454
1455         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1456 }
1457
1458 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1459                                         struct bnx2x_fastpath *fp,
1460                                         u16 bd_prod, u16 rx_comp_prod,
1461                                         u16 rx_sge_prod)
1462 {
1463         struct ustorm_eth_rx_producers rx_prods = {0};
1464         int i;
1465
1466         /* Update producers */
1467         rx_prods.bd_prod = bd_prod;
1468         rx_prods.cqe_prod = rx_comp_prod;
1469         rx_prods.sge_prod = rx_sge_prod;
1470
1471         /*
1472          * Make sure that the BD and SGE data is updated before updating the
1473          * producers since FW might read the BD/SGE right after the producer
1474          * is updated.
1475          * This is only applicable for weak-ordered memory model archs such
1476          * as IA-64. The following barrier is also mandatory since FW will
1477          * assumes BDs must have buffers.
1478          */
1479         wmb();
1480
1481         for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1482                 REG_WR(bp, BAR_USTRORM_INTMEM +
1483                        USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1484                        ((u32 *)&rx_prods)[i]);
1485
1486         mmiowb(); /* keep prod updates ordered */
1487
1488         DP(NETIF_MSG_RX_STATUS,
1489            "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
1490            fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1491 }
1492
1493 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1494 {
1495         struct bnx2x *bp = fp->bp;
1496         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1497         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1498         int rx_pkt = 0;
1499
1500 #ifdef BNX2X_STOP_ON_ERROR
1501         if (unlikely(bp->panic))
1502                 return 0;
1503 #endif
1504
1505         /* CQ "next element" is of the size of the regular element,
1506            that's why it's ok here */
1507         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1508         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1509                 hw_comp_cons++;
1510
1511         bd_cons = fp->rx_bd_cons;
1512         bd_prod = fp->rx_bd_prod;
1513         bd_prod_fw = bd_prod;
1514         sw_comp_cons = fp->rx_comp_cons;
1515         sw_comp_prod = fp->rx_comp_prod;
1516
1517         /* Memory barrier necessary as speculative reads of the rx
1518          * buffer can be ahead of the index in the status block
1519          */
1520         rmb();
1521
1522         DP(NETIF_MSG_RX_STATUS,
1523            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1524            fp->index, hw_comp_cons, sw_comp_cons);
1525
1526         while (sw_comp_cons != hw_comp_cons) {
1527                 struct sw_rx_bd *rx_buf = NULL;
1528                 struct sk_buff *skb;
1529                 union eth_rx_cqe *cqe;
1530                 u8 cqe_fp_flags;
1531                 u16 len, pad;
1532
1533                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1534                 bd_prod = RX_BD(bd_prod);
1535                 bd_cons = RX_BD(bd_cons);
1536
1537                 /* Prefetch the page containing the BD descriptor
1538                    at producer's index. It will be needed when new skb is
1539                    allocated */
1540                 prefetch((void *)(PAGE_ALIGN((unsigned long)
1541                                              (&fp->rx_desc_ring[bd_prod])) -
1542                                   PAGE_SIZE + 1));
1543
1544                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1545                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1546
1547                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1548                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1549                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1550                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1551                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1552                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1553
1554                 /* is this a slowpath msg? */
1555                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1556                         bnx2x_sp_event(fp, cqe);
1557                         goto next_cqe;
1558
1559                 /* this is an rx packet */
1560                 } else {
1561                         rx_buf = &fp->rx_buf_ring[bd_cons];
1562                         skb = rx_buf->skb;
1563                         prefetch(skb);
1564                         prefetch((u8 *)skb + 256);
1565                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1566                         pad = cqe->fast_path_cqe.placement_offset;
1567
1568                         /* If CQE is marked both TPA_START and TPA_END
1569                            it is a non-TPA CQE */
1570                         if ((!fp->disable_tpa) &&
1571                             (TPA_TYPE(cqe_fp_flags) !=
1572                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1573                                 u16 queue = cqe->fast_path_cqe.queue_index;
1574
1575                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1576                                         DP(NETIF_MSG_RX_STATUS,
1577                                            "calling tpa_start on queue %d\n",
1578                                            queue);
1579
1580                                         bnx2x_tpa_start(fp, queue, skb,
1581                                                         bd_cons, bd_prod);
1582                                         goto next_rx;
1583                                 }
1584
1585                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1586                                         DP(NETIF_MSG_RX_STATUS,
1587                                            "calling tpa_stop on queue %d\n",
1588                                            queue);
1589
1590                                         if (!BNX2X_RX_SUM_FIX(cqe))
1591                                                 BNX2X_ERR("STOP on none TCP "
1592                                                           "data\n");
1593
1594                                         /* This is a size of the linear data
1595                                            on this skb */
1596                                         len = le16_to_cpu(cqe->fast_path_cqe.
1597                                                                 len_on_bd);
1598                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1599                                                     len, cqe, comp_ring_cons);
1600 #ifdef BNX2X_STOP_ON_ERROR
1601                                         if (bp->panic)
1602                                                 return 0;
1603 #endif
1604
1605                                         bnx2x_update_sge_prod(fp,
1606                                                         &cqe->fast_path_cqe);
1607                                         goto next_cqe;
1608                                 }
1609                         }
1610
1611                         pci_dma_sync_single_for_device(bp->pdev,
1612                                         pci_unmap_addr(rx_buf, mapping),
1613                                                        pad + RX_COPY_THRESH,
1614                                                        PCI_DMA_FROMDEVICE);
1615                         prefetch(skb);
1616                         prefetch(((char *)(skb)) + 128);
1617
1618                         /* is this an error packet? */
1619                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1620                                 DP(NETIF_MSG_RX_ERR,
1621                                    "ERROR  flags %x  rx packet %u\n",
1622                                    cqe_fp_flags, sw_comp_cons);
1623                                 fp->eth_q_stats.rx_err_discard_pkt++;
1624                                 goto reuse_rx;
1625                         }
1626
1627                         /* Since we don't have a jumbo ring
1628                          * copy small packets if mtu > 1500
1629                          */
1630                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1631                             (len <= RX_COPY_THRESH)) {
1632                                 struct sk_buff *new_skb;
1633
1634                                 new_skb = netdev_alloc_skb(bp->dev,
1635                                                            len + pad);
1636                                 if (new_skb == NULL) {
1637                                         DP(NETIF_MSG_RX_ERR,
1638                                            "ERROR  packet dropped "
1639                                            "because of alloc failure\n");
1640                                         fp->eth_q_stats.rx_skb_alloc_failed++;
1641                                         goto reuse_rx;
1642                                 }
1643
1644                                 /* aligned copy */
1645                                 skb_copy_from_linear_data_offset(skb, pad,
1646                                                     new_skb->data + pad, len);
1647                                 skb_reserve(new_skb, pad);
1648                                 skb_put(new_skb, len);
1649
1650                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1651
1652                                 skb = new_skb;
1653
1654                         } else
1655                         if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1656                                 pci_unmap_single(bp->pdev,
1657                                         pci_unmap_addr(rx_buf, mapping),
1658                                                  bp->rx_buf_size,
1659                                                  PCI_DMA_FROMDEVICE);
1660                                 skb_reserve(skb, pad);
1661                                 skb_put(skb, len);
1662
1663                         } else {
1664                                 DP(NETIF_MSG_RX_ERR,
1665                                    "ERROR  packet dropped because "
1666                                    "of alloc failure\n");
1667                                 fp->eth_q_stats.rx_skb_alloc_failed++;
1668 reuse_rx:
1669                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1670                                 goto next_rx;
1671                         }
1672
1673                         skb->protocol = eth_type_trans(skb, bp->dev);
1674
1675                         skb->ip_summed = CHECKSUM_NONE;
1676                         if (bp->rx_csum) {
1677                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1678                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1679                                 else
1680                                         fp->eth_q_stats.hw_csum_err++;
1681                         }
1682                 }
1683
1684                 skb_record_rx_queue(skb, fp->index);
1685
1686 #ifdef BCM_VLAN
1687                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1688                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1689                      PARSING_FLAGS_VLAN))
1690                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1691                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1692                 else
1693 #endif
1694                         netif_receive_skb(skb);
1695
1696
1697 next_rx:
1698                 rx_buf->skb = NULL;
1699
1700                 bd_cons = NEXT_RX_IDX(bd_cons);
1701                 bd_prod = NEXT_RX_IDX(bd_prod);
1702                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1703                 rx_pkt++;
1704 next_cqe:
1705                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1706                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1707
1708                 if (rx_pkt == budget)
1709                         break;
1710         } /* while */
1711
1712         fp->rx_bd_cons = bd_cons;
1713         fp->rx_bd_prod = bd_prod_fw;
1714         fp->rx_comp_cons = sw_comp_cons;
1715         fp->rx_comp_prod = sw_comp_prod;
1716
1717         /* Update producers */
1718         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1719                              fp->rx_sge_prod);
1720
1721         fp->rx_pkt += rx_pkt;
1722         fp->rx_calls++;
1723
1724         return rx_pkt;
1725 }
1726
1727 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1728 {
1729         struct bnx2x_fastpath *fp = fp_cookie;
1730         struct bnx2x *bp = fp->bp;
1731
1732         /* Return here if interrupt is disabled */
1733         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1734                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1735                 return IRQ_HANDLED;
1736         }
1737
1738         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1739            fp->index, fp->sb_id);
1740         bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1741
1742 #ifdef BNX2X_STOP_ON_ERROR
1743         if (unlikely(bp->panic))
1744                 return IRQ_HANDLED;
1745 #endif
1746
1747         /* Handle Rx and Tx according to MSI-X vector */
1748         prefetch(fp->rx_cons_sb);
1749         prefetch(fp->tx_cons_sb);
1750         prefetch(&fp->status_blk->u_status_block.status_block_index);
1751         prefetch(&fp->status_blk->c_status_block.status_block_index);
1752         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1753
1754         return IRQ_HANDLED;
1755 }
1756
1757 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1758 {
1759         struct bnx2x *bp = netdev_priv(dev_instance);
1760         u16 status = bnx2x_ack_int(bp);
1761         u16 mask;
1762         int i;
1763
1764         /* Return here if interrupt is shared and it's not for us */
1765         if (unlikely(status == 0)) {
1766                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1767                 return IRQ_NONE;
1768         }
1769         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1770
1771         /* Return here if interrupt is disabled */
1772         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1773                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1774                 return IRQ_HANDLED;
1775         }
1776
1777 #ifdef BNX2X_STOP_ON_ERROR
1778         if (unlikely(bp->panic))
1779                 return IRQ_HANDLED;
1780 #endif
1781
1782         for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1783                 struct bnx2x_fastpath *fp = &bp->fp[i];
1784
1785                 mask = 0x2 << fp->sb_id;
1786                 if (status & mask) {
1787                         /* Handle Rx and Tx according to SB id */
1788                         prefetch(fp->rx_cons_sb);
1789                         prefetch(&fp->status_blk->u_status_block.
1790                                                 status_block_index);
1791                         prefetch(fp->tx_cons_sb);
1792                         prefetch(&fp->status_blk->c_status_block.
1793                                                 status_block_index);
1794                         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1795                         status &= ~mask;
1796                 }
1797         }
1798
1799 #ifdef BCM_CNIC
1800         mask = 0x2 << CNIC_SB_ID(bp);
1801         if (status & (mask | 0x1)) {
1802                 struct cnic_ops *c_ops = NULL;
1803
1804                 rcu_read_lock();
1805                 c_ops = rcu_dereference(bp->cnic_ops);
1806                 if (c_ops)
1807                         c_ops->cnic_handler(bp->cnic_data, NULL);
1808                 rcu_read_unlock();
1809
1810                 status &= ~mask;
1811         }
1812 #endif
1813
1814         if (unlikely(status & 0x1)) {
1815                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1816
1817                 status &= ~0x1;
1818                 if (!status)
1819                         return IRQ_HANDLED;
1820         }
1821
1822         if (status)
1823                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1824                    status);
1825
1826         return IRQ_HANDLED;
1827 }
1828
1829 /* end of fast path */
1830
1831 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1832
1833 /* Link */
1834
1835 /*
1836  * General service functions
1837  */
1838
1839 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1840 {
1841         u32 lock_status;
1842         u32 resource_bit = (1 << resource);
1843         int func = BP_FUNC(bp);
1844         u32 hw_lock_control_reg;
1845         int cnt;
1846
1847         /* Validating that the resource is within range */
1848         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1849                 DP(NETIF_MSG_HW,
1850                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1851                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1852                 return -EINVAL;
1853         }
1854
1855         if (func <= 5) {
1856                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1857         } else {
1858                 hw_lock_control_reg =
1859                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1860         }
1861
1862         /* Validating that the resource is not already taken */
1863         lock_status = REG_RD(bp, hw_lock_control_reg);
1864         if (lock_status & resource_bit) {
1865                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1866                    lock_status, resource_bit);
1867                 return -EEXIST;
1868         }
1869
1870         /* Try for 5 second every 5ms */
1871         for (cnt = 0; cnt < 1000; cnt++) {
1872                 /* Try to acquire the lock */
1873                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1874                 lock_status = REG_RD(bp, hw_lock_control_reg);
1875                 if (lock_status & resource_bit)
1876                         return 0;
1877
1878                 msleep(5);
1879         }
1880         DP(NETIF_MSG_HW, "Timeout\n");
1881         return -EAGAIN;
1882 }
1883
1884 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1885 {
1886         u32 lock_status;
1887         u32 resource_bit = (1 << resource);
1888         int func = BP_FUNC(bp);
1889         u32 hw_lock_control_reg;
1890
1891         /* Validating that the resource is within range */
1892         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1893                 DP(NETIF_MSG_HW,
1894                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1895                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1896                 return -EINVAL;
1897         }
1898
1899         if (func <= 5) {
1900                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1901         } else {
1902                 hw_lock_control_reg =
1903                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1904         }
1905
1906         /* Validating that the resource is currently taken */
1907         lock_status = REG_RD(bp, hw_lock_control_reg);
1908         if (!(lock_status & resource_bit)) {
1909                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1910                    lock_status, resource_bit);
1911                 return -EFAULT;
1912         }
1913
1914         REG_WR(bp, hw_lock_control_reg, resource_bit);
1915         return 0;
1916 }
1917
1918 /* HW Lock for shared dual port PHYs */
1919 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1920 {
1921         mutex_lock(&bp->port.phy_mutex);
1922
1923         if (bp->port.need_hw_lock)
1924                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1925 }
1926
1927 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1928 {
1929         if (bp->port.need_hw_lock)
1930                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1931
1932         mutex_unlock(&bp->port.phy_mutex);
1933 }
1934
1935 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1936 {
1937         /* The GPIO should be swapped if swap register is set and active */
1938         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1939                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1940         int gpio_shift = gpio_num +
1941                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1942         u32 gpio_mask = (1 << gpio_shift);
1943         u32 gpio_reg;
1944         int value;
1945
1946         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1947                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1948                 return -EINVAL;
1949         }
1950
1951         /* read GPIO value */
1952         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1953
1954         /* get the requested pin value */
1955         if ((gpio_reg & gpio_mask) == gpio_mask)
1956                 value = 1;
1957         else
1958                 value = 0;
1959
1960         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1961
1962         return value;
1963 }
1964
1965 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1966 {
1967         /* The GPIO should be swapped if swap register is set and active */
1968         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1969                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1970         int gpio_shift = gpio_num +
1971                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1972         u32 gpio_mask = (1 << gpio_shift);
1973         u32 gpio_reg;
1974
1975         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1976                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1977                 return -EINVAL;
1978         }
1979
1980         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1981         /* read GPIO and mask except the float bits */
1982         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1983
1984         switch (mode) {
1985         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1986                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1987                    gpio_num, gpio_shift);
1988                 /* clear FLOAT and set CLR */
1989                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1990                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1991                 break;
1992
1993         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1994                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1995                    gpio_num, gpio_shift);
1996                 /* clear FLOAT and set SET */
1997                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1998                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1999                 break;
2000
2001         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2002                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2003                    gpio_num, gpio_shift);
2004                 /* set FLOAT */
2005                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2006                 break;
2007
2008         default:
2009                 break;
2010         }
2011
2012         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2013         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2014
2015         return 0;
2016 }
2017
2018 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2019 {
2020         /* The GPIO should be swapped if swap register is set and active */
2021         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2022                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2023         int gpio_shift = gpio_num +
2024                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2025         u32 gpio_mask = (1 << gpio_shift);
2026         u32 gpio_reg;
2027
2028         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2029                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2030                 return -EINVAL;
2031         }
2032
2033         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2034         /* read GPIO int */
2035         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2036
2037         switch (mode) {
2038         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2039                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2040                                    "output low\n", gpio_num, gpio_shift);
2041                 /* clear SET and set CLR */
2042                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2043                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2044                 break;
2045
2046         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2047                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2048                                    "output high\n", gpio_num, gpio_shift);
2049                 /* clear CLR and set SET */
2050                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2051                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2052                 break;
2053
2054         default:
2055                 break;
2056         }
2057
2058         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2059         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2060
2061         return 0;
2062 }
2063
2064 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2065 {
2066         u32 spio_mask = (1 << spio_num);
2067         u32 spio_reg;
2068
2069         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2070             (spio_num > MISC_REGISTERS_SPIO_7)) {
2071                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2072                 return -EINVAL;
2073         }
2074
2075         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2076         /* read SPIO and mask except the float bits */
2077         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2078
2079         switch (mode) {
2080         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2081                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2082                 /* clear FLOAT and set CLR */
2083                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2084                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2085                 break;
2086
2087         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2088                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2089                 /* clear FLOAT and set SET */
2090                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2091                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2092                 break;
2093
2094         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2095                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2096                 /* set FLOAT */
2097                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2098                 break;
2099
2100         default:
2101                 break;
2102         }
2103
2104         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2105         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2106
2107         return 0;
2108 }
2109
2110 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2111 {
2112         switch (bp->link_vars.ieee_fc &
2113                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2114         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2115                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2116                                           ADVERTISED_Pause);
2117                 break;
2118
2119         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2120                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2121                                          ADVERTISED_Pause);
2122                 break;
2123
2124         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2125                 bp->port.advertising |= ADVERTISED_Asym_Pause;
2126                 break;
2127
2128         default:
2129                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2130                                           ADVERTISED_Pause);
2131                 break;
2132         }
2133 }
2134
2135 static void bnx2x_link_report(struct bnx2x *bp)
2136 {
2137         if (bp->flags & MF_FUNC_DIS) {
2138                 netif_carrier_off(bp->dev);
2139                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2140                 return;
2141         }
2142
2143         if (bp->link_vars.link_up) {
2144                 u16 line_speed;
2145
2146                 if (bp->state == BNX2X_STATE_OPEN)
2147                         netif_carrier_on(bp->dev);
2148                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2149
2150                 line_speed = bp->link_vars.line_speed;
2151                 if (IS_E1HMF(bp)) {
2152                         u16 vn_max_rate;
2153
2154                         vn_max_rate =
2155                                 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2156                                  FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2157                         if (vn_max_rate < line_speed)
2158                                 line_speed = vn_max_rate;
2159                 }
2160                 printk("%d Mbps ", line_speed);
2161
2162                 if (bp->link_vars.duplex == DUPLEX_FULL)
2163                         printk("full duplex");
2164                 else
2165                         printk("half duplex");
2166
2167                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2168                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2169                                 printk(", receive ");
2170                                 if (bp->link_vars.flow_ctrl &
2171                                     BNX2X_FLOW_CTRL_TX)
2172                                         printk("& transmit ");
2173                         } else {
2174                                 printk(", transmit ");
2175                         }
2176                         printk("flow control ON");
2177                 }
2178                 printk("\n");
2179
2180         } else { /* link_down */
2181                 netif_carrier_off(bp->dev);
2182                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2183         }
2184 }
2185
2186 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2187 {
2188         if (!BP_NOMCP(bp)) {
2189                 u8 rc;
2190
2191                 /* Initialize link parameters structure variables */
2192                 /* It is recommended to turn off RX FC for jumbo frames
2193                    for better performance */
2194                 if (bp->dev->mtu > 5000)
2195                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2196                 else
2197                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2198
2199                 bnx2x_acquire_phy_lock(bp);
2200
2201                 if (load_mode == LOAD_DIAG)
2202                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2203
2204                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2205
2206                 bnx2x_release_phy_lock(bp);
2207
2208                 bnx2x_calc_fc_adv(bp);
2209
2210                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2211                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2212                         bnx2x_link_report(bp);
2213                 }
2214
2215                 return rc;
2216         }
2217         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2218         return -EINVAL;
2219 }
2220
2221 static void bnx2x_link_set(struct bnx2x *bp)
2222 {
2223         if (!BP_NOMCP(bp)) {
2224                 bnx2x_acquire_phy_lock(bp);
2225                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2226                 bnx2x_release_phy_lock(bp);
2227
2228                 bnx2x_calc_fc_adv(bp);
2229         } else
2230                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2231 }
2232
2233 static void bnx2x__link_reset(struct bnx2x *bp)
2234 {
2235         if (!BP_NOMCP(bp)) {
2236                 bnx2x_acquire_phy_lock(bp);
2237                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2238                 bnx2x_release_phy_lock(bp);
2239         } else
2240                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2241 }
2242
2243 static u8 bnx2x_link_test(struct bnx2x *bp)
2244 {
2245         u8 rc;
2246
2247         bnx2x_acquire_phy_lock(bp);
2248         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2249         bnx2x_release_phy_lock(bp);
2250
2251         return rc;
2252 }
2253
2254 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2255 {
2256         u32 r_param = bp->link_vars.line_speed / 8;
2257         u32 fair_periodic_timeout_usec;
2258         u32 t_fair;
2259
2260         memset(&(bp->cmng.rs_vars), 0,
2261                sizeof(struct rate_shaping_vars_per_port));
2262         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2263
2264         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2265         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2266
2267         /* this is the threshold below which no timer arming will occur
2268            1.25 coefficient is for the threshold to be a little bigger
2269            than the real time, to compensate for timer in-accuracy */
2270         bp->cmng.rs_vars.rs_threshold =
2271                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2272
2273         /* resolution of fairness timer */
2274         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2275         /* for 10G it is 1000usec. for 1G it is 10000usec. */
2276         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2277
2278         /* this is the threshold below which we won't arm the timer anymore */
2279         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2280
2281         /* we multiply by 1e3/8 to get bytes/msec.
2282            We don't want the credits to pass a credit
2283            of the t_fair*FAIR_MEM (algorithm resolution) */
2284         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2285         /* since each tick is 4 usec */
2286         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2287 }
2288
2289 /* Calculates the sum of vn_min_rates.
2290    It's needed for further normalizing of the min_rates.
2291    Returns:
2292      sum of vn_min_rates.
2293        or
2294      0 - if all the min_rates are 0.
2295      In the later case fainess algorithm should be deactivated.
2296      If not all min_rates are zero then those that are zeroes will be set to 1.
2297  */
2298 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2299 {
2300         int all_zero = 1;
2301         int port = BP_PORT(bp);
2302         int vn;
2303
2304         bp->vn_weight_sum = 0;
2305         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2306                 int func = 2*vn + port;
2307                 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2308                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2309                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2310
2311                 /* Skip hidden vns */
2312                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2313                         continue;
2314
2315                 /* If min rate is zero - set it to 1 */
2316                 if (!vn_min_rate)
2317                         vn_min_rate = DEF_MIN_RATE;
2318                 else
2319                         all_zero = 0;
2320
2321                 bp->vn_weight_sum += vn_min_rate;
2322         }
2323
2324         /* ... only if all min rates are zeros - disable fairness */
2325         if (all_zero) {
2326                 bp->cmng.flags.cmng_enables &=
2327                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2328                 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2329                    "  fairness will be disabled\n");
2330         } else
2331                 bp->cmng.flags.cmng_enables |=
2332                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2333 }
2334
2335 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2336 {
2337         struct rate_shaping_vars_per_vn m_rs_vn;
2338         struct fairness_vars_per_vn m_fair_vn;
2339         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2340         u16 vn_min_rate, vn_max_rate;
2341         int i;
2342
2343         /* If function is hidden - set min and max to zeroes */
2344         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2345                 vn_min_rate = 0;
2346                 vn_max_rate = 0;
2347
2348         } else {
2349                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2350                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2351                 /* If min rate is zero - set it to 1 */
2352                 if (!vn_min_rate)
2353                         vn_min_rate = DEF_MIN_RATE;
2354                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2355                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2356         }
2357         DP(NETIF_MSG_IFUP,
2358            "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
2359            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2360
2361         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2362         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2363
2364         /* global vn counter - maximal Mbps for this vn */
2365         m_rs_vn.vn_counter.rate = vn_max_rate;
2366
2367         /* quota - number of bytes transmitted in this period */
2368         m_rs_vn.vn_counter.quota =
2369                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2370
2371         if (bp->vn_weight_sum) {
2372                 /* credit for each period of the fairness algorithm:
2373                    number of bytes in T_FAIR (the vn share the port rate).
2374                    vn_weight_sum should not be larger than 10000, thus
2375                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2376                    than zero */
2377                 m_fair_vn.vn_credit_delta =
2378                         max((u32)(vn_min_rate * (T_FAIR_COEF /
2379                                                  (8 * bp->vn_weight_sum))),
2380                             (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2381                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2382                    m_fair_vn.vn_credit_delta);
2383         }
2384
2385         /* Store it to internal memory */
2386         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2387                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2388                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2389                        ((u32 *)(&m_rs_vn))[i]);
2390
2391         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2392                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2393                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2394                        ((u32 *)(&m_fair_vn))[i]);
2395 }
2396
2397
2398 /* This function is called upon link interrupt */
2399 static void bnx2x_link_attn(struct bnx2x *bp)
2400 {
2401         /* Make sure that we are synced with the current statistics */
2402         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2403
2404         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2405
2406         if (bp->link_vars.link_up) {
2407
2408                 /* dropless flow control */
2409                 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
2410                         int port = BP_PORT(bp);
2411                         u32 pause_enabled = 0;
2412
2413                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2414                                 pause_enabled = 1;
2415
2416                         REG_WR(bp, BAR_USTRORM_INTMEM +
2417                                USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2418                                pause_enabled);
2419                 }
2420
2421                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2422                         struct host_port_stats *pstats;
2423
2424                         pstats = bnx2x_sp(bp, port_stats);
2425                         /* reset old bmac stats */
2426                         memset(&(pstats->mac_stx[0]), 0,
2427                                sizeof(struct mac_stx));
2428                 }
2429                 if (bp->state == BNX2X_STATE_OPEN)
2430                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2431         }
2432
2433         /* indicate link status */
2434         bnx2x_link_report(bp);
2435
2436         if (IS_E1HMF(bp)) {
2437                 int port = BP_PORT(bp);
2438                 int func;
2439                 int vn;
2440
2441                 /* Set the attention towards other drivers on the same port */
2442                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2443                         if (vn == BP_E1HVN(bp))
2444                                 continue;
2445
2446                         func = ((vn << 1) | port);
2447                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2448                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2449                 }
2450
2451                 if (bp->link_vars.link_up) {
2452                         int i;
2453
2454                         /* Init rate shaping and fairness contexts */
2455                         bnx2x_init_port_minmax(bp);
2456
2457                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2458                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2459
2460                         /* Store it to internal memory */
2461                         for (i = 0;
2462                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
2463                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2464                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2465                                        ((u32 *)(&bp->cmng))[i]);
2466                 }
2467         }
2468 }
2469
2470 static void bnx2x__link_status_update(struct bnx2x *bp)
2471 {
2472         if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2473                 return;
2474
2475         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2476
2477         if (bp->link_vars.link_up)
2478                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2479         else
2480                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2481
2482         bnx2x_calc_vn_weight_sum(bp);
2483
2484         /* indicate link status */
2485         bnx2x_link_report(bp);
2486 }
2487
2488 static void bnx2x_pmf_update(struct bnx2x *bp)
2489 {
2490         int port = BP_PORT(bp);
2491         u32 val;
2492
2493         bp->port.pmf = 1;
2494         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2495
2496         /* enable nig attention */
2497         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2498         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2499         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2500
2501         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2502 }
2503
2504 /* end of Link */
2505
2506 /* slow path */
2507
2508 /*
2509  * General service functions
2510  */
2511
2512 /* send the MCP a request, block until there is a reply */
2513 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2514 {
2515         int func = BP_FUNC(bp);
2516         u32 seq = ++bp->fw_seq;
2517         u32 rc = 0;
2518         u32 cnt = 1;
2519         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2520
2521         mutex_lock(&bp->fw_mb_mutex);
2522         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2523         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2524
2525         do {
2526                 /* let the FW do it's magic ... */
2527                 msleep(delay);
2528
2529                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2530
2531                 /* Give the FW up to 5 second (500*10ms) */
2532         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2533
2534         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2535            cnt*delay, rc, seq);
2536
2537         /* is this a reply to our command? */
2538         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2539                 rc &= FW_MSG_CODE_MASK;
2540         else {
2541                 /* FW BUG! */
2542                 BNX2X_ERR("FW failed to respond!\n");
2543                 bnx2x_fw_dump(bp);
2544                 rc = 0;
2545         }
2546         mutex_unlock(&bp->fw_mb_mutex);
2547
2548         return rc;
2549 }
2550
2551 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2552 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2553 static void bnx2x_set_rx_mode(struct net_device *dev);
2554
2555 static void bnx2x_e1h_disable(struct bnx2x *bp)
2556 {
2557         int port = BP_PORT(bp);
2558
2559         netif_tx_disable(bp->dev);
2560
2561         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2562
2563         netif_carrier_off(bp->dev);
2564 }
2565
2566 static void bnx2x_e1h_enable(struct bnx2x *bp)
2567 {
2568         int port = BP_PORT(bp);
2569
2570         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2571
2572         /* Tx queue should be only reenabled */
2573         netif_tx_wake_all_queues(bp->dev);
2574
2575         /*
2576          * Should not call netif_carrier_on since it will be called if the link
2577          * is up when checking for link state
2578          */
2579 }
2580
2581 static void bnx2x_update_min_max(struct bnx2x *bp)
2582 {
2583         int port = BP_PORT(bp);
2584         int vn, i;
2585
2586         /* Init rate shaping and fairness contexts */
2587         bnx2x_init_port_minmax(bp);
2588
2589         bnx2x_calc_vn_weight_sum(bp);
2590
2591         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2592                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2593
2594         if (bp->port.pmf) {
2595                 int func;
2596
2597                 /* Set the attention towards other drivers on the same port */
2598                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2599                         if (vn == BP_E1HVN(bp))
2600                                 continue;
2601
2602                         func = ((vn << 1) | port);
2603                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2604                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2605                 }
2606
2607                 /* Store it to internal memory */
2608                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2609                         REG_WR(bp, BAR_XSTRORM_INTMEM +
2610                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2611                                ((u32 *)(&bp->cmng))[i]);
2612         }
2613 }
2614
2615 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2616 {
2617         DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2618
2619         if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2620
2621                 /*
2622                  * This is the only place besides the function initialization
2623                  * where the bp->flags can change so it is done without any
2624                  * locks
2625                  */
2626                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2627                         DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2628                         bp->flags |= MF_FUNC_DIS;
2629
2630                         bnx2x_e1h_disable(bp);
2631                 } else {
2632                         DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2633                         bp->flags &= ~MF_FUNC_DIS;
2634
2635                         bnx2x_e1h_enable(bp);
2636                 }
2637                 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2638         }
2639         if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2640
2641                 bnx2x_update_min_max(bp);
2642                 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2643         }
2644
2645         /* Report results to MCP */
2646         if (dcc_event)
2647                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2648         else
2649                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2650 }
2651
2652 /* must be called under the spq lock */
2653 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2654 {
2655         struct eth_spe *next_spe = bp->spq_prod_bd;
2656
2657         if (bp->spq_prod_bd == bp->spq_last_bd) {
2658                 bp->spq_prod_bd = bp->spq;
2659                 bp->spq_prod_idx = 0;
2660                 DP(NETIF_MSG_TIMER, "end of spq\n");
2661         } else {
2662                 bp->spq_prod_bd++;
2663                 bp->spq_prod_idx++;
2664         }
2665         return next_spe;
2666 }
2667
2668 /* must be called under the spq lock */
2669 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2670 {
2671         int func = BP_FUNC(bp);
2672
2673         /* Make sure that BD data is updated before writing the producer */
2674         wmb();
2675
2676         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2677                bp->spq_prod_idx);
2678         mmiowb();
2679 }
2680
2681 /* the slow path queue is odd since completions arrive on the fastpath ring */
2682 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2683                          u32 data_hi, u32 data_lo, int common)
2684 {
2685         struct eth_spe *spe;
2686
2687         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2688            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2689            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2690            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2691            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2692
2693 #ifdef BNX2X_STOP_ON_ERROR
2694         if (unlikely(bp->panic))
2695                 return -EIO;
2696 #endif
2697
2698         spin_lock_bh(&bp->spq_lock);
2699
2700         if (!bp->spq_left) {
2701                 BNX2X_ERR("BUG! SPQ ring full!\n");
2702                 spin_unlock_bh(&bp->spq_lock);
2703                 bnx2x_panic();
2704                 return -EBUSY;
2705         }
2706
2707         spe = bnx2x_sp_get_next(bp);
2708
2709         /* CID needs port number to be encoded int it */
2710         spe->hdr.conn_and_cmd_data =
2711                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2712                                      HW_CID(bp, cid)));
2713         spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2714         if (common)
2715                 spe->hdr.type |=
2716                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2717
2718         spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2719         spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2720
2721         bp->spq_left--;
2722
2723         bnx2x_sp_prod_update(bp);
2724         spin_unlock_bh(&bp->spq_lock);
2725         return 0;
2726 }
2727
2728 /* acquire split MCP access lock register */
2729 static int bnx2x_acquire_alr(struct bnx2x *bp)
2730 {
2731         u32 i, j, val;
2732         int rc = 0;
2733
2734         might_sleep();
2735         i = 100;
2736         for (j = 0; j < i*10; j++) {
2737                 val = (1UL << 31);
2738                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2739                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2740                 if (val & (1L << 31))
2741                         break;
2742
2743                 msleep(5);
2744         }
2745         if (!(val & (1L << 31))) {
2746                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2747                 rc = -EBUSY;
2748         }
2749
2750         return rc;
2751 }
2752
2753 /* release split MCP access lock register */
2754 static void bnx2x_release_alr(struct bnx2x *bp)
2755 {
2756         u32 val = 0;
2757
2758         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2759 }
2760
2761 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2762 {
2763         struct host_def_status_block *def_sb = bp->def_status_blk;
2764         u16 rc = 0;
2765
2766         barrier(); /* status block is written to by the chip */
2767         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2768                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2769                 rc |= 1;
2770         }
2771         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2772                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2773                 rc |= 2;
2774         }
2775         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2776                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2777                 rc |= 4;
2778         }
2779         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2780                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2781                 rc |= 8;
2782         }
2783         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2784                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2785                 rc |= 16;
2786         }
2787         return rc;
2788 }
2789
2790 /*
2791  * slow path service functions
2792  */
2793
2794 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2795 {
2796         int port = BP_PORT(bp);
2797         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2798                        COMMAND_REG_ATTN_BITS_SET);
2799         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2800                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2801         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2802                                        NIG_REG_MASK_INTERRUPT_PORT0;
2803         u32 aeu_mask;
2804         u32 nig_mask = 0;
2805
2806         if (bp->attn_state & asserted)
2807                 BNX2X_ERR("IGU ERROR\n");
2808
2809         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2810         aeu_mask = REG_RD(bp, aeu_addr);
2811
2812         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2813            aeu_mask, asserted);
2814         aeu_mask &= ~(asserted & 0xff);
2815         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2816
2817         REG_WR(bp, aeu_addr, aeu_mask);
2818         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2819
2820         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2821         bp->attn_state |= asserted;
2822         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2823
2824         if (asserted & ATTN_HARD_WIRED_MASK) {
2825                 if (asserted & ATTN_NIG_FOR_FUNC) {
2826
2827                         bnx2x_acquire_phy_lock(bp);
2828
2829                         /* save nig interrupt mask */
2830                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2831                         REG_WR(bp, nig_int_mask_addr, 0);
2832
2833                         bnx2x_link_attn(bp);
2834
2835                         /* handle unicore attn? */
2836                 }
2837                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2838                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2839
2840                 if (asserted & GPIO_2_FUNC)
2841                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2842
2843                 if (asserted & GPIO_3_FUNC)
2844                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2845
2846                 if (asserted & GPIO_4_FUNC)
2847                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2848
2849                 if (port == 0) {
2850                         if (asserted & ATTN_GENERAL_ATTN_1) {
2851                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2852                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2853                         }
2854                         if (asserted & ATTN_GENERAL_ATTN_2) {
2855                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2856                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2857                         }
2858                         if (asserted & ATTN_GENERAL_ATTN_3) {
2859                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2860                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2861                         }
2862                 } else {
2863                         if (asserted & ATTN_GENERAL_ATTN_4) {
2864                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2865                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2866                         }
2867                         if (asserted & ATTN_GENERAL_ATTN_5) {
2868                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2869                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2870                         }
2871                         if (asserted & ATTN_GENERAL_ATTN_6) {
2872                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2873                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2874                         }
2875                 }
2876
2877         } /* if hardwired */
2878
2879         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2880            asserted, hc_addr);
2881         REG_WR(bp, hc_addr, asserted);
2882
2883         /* now set back the mask */
2884         if (asserted & ATTN_NIG_FOR_FUNC) {
2885                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2886                 bnx2x_release_phy_lock(bp);
2887         }
2888 }
2889
2890 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2891 {
2892         int port = BP_PORT(bp);
2893
2894         /* mark the failure */
2895         bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2896         bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2897         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2898                  bp->link_params.ext_phy_config);
2899
2900         /* log the failure */
2901         printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2902                " the driver to shutdown the card to prevent permanent"
2903                " damage.  Please contact Dell Support for assistance\n",
2904                bp->dev->name);
2905 }
2906
2907 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2908 {
2909         int port = BP_PORT(bp);
2910         int reg_offset;
2911         u32 val, swap_val, swap_override;
2912
2913         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2914                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2915
2916         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2917
2918                 val = REG_RD(bp, reg_offset);
2919                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2920                 REG_WR(bp, reg_offset, val);
2921
2922                 BNX2X_ERR("SPIO5 hw attention\n");
2923
2924                 /* Fan failure attention */
2925                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2926                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2927                         /* Low power mode is controlled by GPIO 2 */
2928                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2929                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2930                         /* The PHY reset is controlled by GPIO 1 */
2931                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2932                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2933                         break;
2934
2935                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2936                         /* The PHY reset is controlled by GPIO 1 */
2937                         /* fake the port number to cancel the swap done in
2938                            set_gpio() */
2939                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2940                         swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2941                         port = (swap_val && swap_override) ^ 1;
2942                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2943                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2944                         break;
2945
2946                 default:
2947                         break;
2948                 }
2949                 bnx2x_fan_failure(bp);
2950         }
2951
2952         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2953                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2954                 bnx2x_acquire_phy_lock(bp);
2955                 bnx2x_handle_module_detect_int(&bp->link_params);
2956                 bnx2x_release_phy_lock(bp);
2957         }
2958
2959         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2960
2961                 val = REG_RD(bp, reg_offset);
2962                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2963                 REG_WR(bp, reg_offset, val);
2964
2965                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2966                           (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2967                 bnx2x_panic();
2968         }
2969 }
2970
2971 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2972 {
2973         u32 val;
2974
2975         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2976
2977                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2978                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2979                 /* DORQ discard attention */
2980                 if (val & 0x2)
2981                         BNX2X_ERR("FATAL error from DORQ\n");
2982         }
2983
2984         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2985
2986                 int port = BP_PORT(bp);
2987                 int reg_offset;
2988
2989                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2990                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2991
2992                 val = REG_RD(bp, reg_offset);
2993                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2994                 REG_WR(bp, reg_offset, val);
2995
2996                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2997                           (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
2998                 bnx2x_panic();
2999         }
3000 }
3001
3002 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3003 {
3004         u32 val;
3005
3006         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3007
3008                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3009                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3010                 /* CFC error attention */
3011                 if (val & 0x2)
3012                         BNX2X_ERR("FATAL error from CFC\n");
3013         }
3014
3015         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3016
3017                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3018                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3019                 /* RQ_USDMDP_FIFO_OVERFLOW */
3020                 if (val & 0x18000)
3021                         BNX2X_ERR("FATAL error from PXP\n");
3022         }
3023
3024         if (attn & HW_INTERRUT_ASSERT_SET_2) {
3025
3026                 int port = BP_PORT(bp);
3027                 int reg_offset;
3028
3029                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3030                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3031
3032                 val = REG_RD(bp, reg_offset);
3033                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3034                 REG_WR(bp, reg_offset, val);
3035
3036                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3037                           (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3038                 bnx2x_panic();
3039         }
3040 }
3041
3042 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3043 {
3044         u32 val;
3045
3046         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3047
3048                 if (attn & BNX2X_PMF_LINK_ASSERT) {
3049                         int func = BP_FUNC(bp);
3050
3051                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3052                         bp->mf_config = SHMEM_RD(bp,
3053                                            mf_cfg.func_mf_config[func].config);
3054                         val = SHMEM_RD(bp, func_mb[func].drv_status);
3055                         if (val & DRV_STATUS_DCC_EVENT_MASK)
3056                                 bnx2x_dcc_event(bp,
3057                                             (val & DRV_STATUS_DCC_EVENT_MASK));
3058                         bnx2x__link_status_update(bp);
3059                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3060                                 bnx2x_pmf_update(bp);
3061
3062                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3063
3064                         BNX2X_ERR("MC assert!\n");
3065                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3066                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3067                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3068                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3069                         bnx2x_panic();
3070
3071                 } else if (attn & BNX2X_MCP_ASSERT) {
3072
3073                         BNX2X_ERR("MCP assert!\n");
3074                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3075                         bnx2x_fw_dump(bp);
3076
3077                 } else
3078                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3079         }
3080
3081         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3082                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3083                 if (attn & BNX2X_GRC_TIMEOUT) {
3084                         val = CHIP_IS_E1H(bp) ?
3085                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3086                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
3087                 }
3088                 if (attn & BNX2X_GRC_RSV) {
3089                         val = CHIP_IS_E1H(bp) ?
3090                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3091                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
3092                 }
3093                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3094         }
3095 }
3096
3097 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3098 {
3099         struct attn_route attn;
3100         struct attn_route group_mask;
3101         int port = BP_PORT(bp);
3102         int index;
3103         u32 reg_addr;
3104         u32 val;
3105         u32 aeu_mask;
3106
3107         /* need to take HW lock because MCP or other port might also
3108            try to handle this event */
3109         bnx2x_acquire_alr(bp);
3110
3111         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3112         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3113         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3114         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3115         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3116            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3117
3118         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3119                 if (deasserted & (1 << index)) {
3120                         group_mask = bp->attn_group[index];
3121
3122                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3123                            index, group_mask.sig[0], group_mask.sig[1],
3124                            group_mask.sig[2], group_mask.sig[3]);
3125
3126                         bnx2x_attn_int_deasserted3(bp,
3127                                         attn.sig[3] & group_mask.sig[3]);
3128                         bnx2x_attn_int_deasserted1(bp,
3129                                         attn.sig[1] & group_mask.sig[1]);
3130                         bnx2x_attn_int_deasserted2(bp,
3131                                         attn.sig[2] & group_mask.sig[2]);
3132                         bnx2x_attn_int_deasserted0(bp,
3133                                         attn.sig[0] & group_mask.sig[0]);
3134
3135                         if ((attn.sig[0] & group_mask.sig[0] &
3136                                                 HW_PRTY_ASSERT_SET_0) ||
3137                             (attn.sig[1] & group_mask.sig[1] &
3138                                                 HW_PRTY_ASSERT_SET_1) ||
3139                             (attn.sig[2] & group_mask.sig[2] &
3140                                                 HW_PRTY_ASSERT_SET_2))
3141                                 BNX2X_ERR("FATAL HW block parity attention\n");
3142                 }
3143         }
3144
3145         bnx2x_release_alr(bp);
3146
3147         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3148
3149         val = ~deasserted;
3150         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3151            val, reg_addr);
3152         REG_WR(bp, reg_addr, val);
3153
3154         if (~bp->attn_state & deasserted)
3155                 BNX2X_ERR("IGU ERROR\n");
3156
3157         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3158                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
3159
3160         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3161         aeu_mask = REG_RD(bp, reg_addr);
3162
3163         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
3164            aeu_mask, deasserted);
3165         aeu_mask |= (deasserted & 0xff);
3166         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3167
3168         REG_WR(bp, reg_addr, aeu_mask);
3169         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3170
3171         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3172         bp->attn_state &= ~deasserted;
3173         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3174 }
3175
3176 static void bnx2x_attn_int(struct bnx2x *bp)
3177 {
3178         /* read local copy of bits */
3179         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3180                                                                 attn_bits);
3181         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3182                                                                 attn_bits_ack);
3183         u32 attn_state = bp->attn_state;
3184
3185         /* look for changed bits */
3186         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
3187         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
3188
3189         DP(NETIF_MSG_HW,
3190            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
3191            attn_bits, attn_ack, asserted, deasserted);
3192
3193         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3194                 BNX2X_ERR("BAD attention state\n");
3195
3196         /* handle bits that were raised */
3197         if (asserted)
3198                 bnx2x_attn_int_asserted(bp, asserted);
3199
3200         if (deasserted)
3201                 bnx2x_attn_int_deasserted(bp, deasserted);
3202 }
3203
3204 static void bnx2x_sp_task(struct work_struct *work)
3205 {
3206         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3207         u16 status;
3208
3209
3210         /* Return here if interrupt is disabled */
3211         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3212                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3213                 return;
3214         }
3215
3216         status = bnx2x_update_dsb_idx(bp);
3217 /*      if (status == 0)                                     */
3218 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
3219
3220         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
3221
3222         /* HW attentions */
3223         if (status & 0x1)
3224                 bnx2x_attn_int(bp);
3225
3226         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3227                      IGU_INT_NOP, 1);
3228         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3229                      IGU_INT_NOP, 1);
3230         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3231                      IGU_INT_NOP, 1);
3232         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3233                      IGU_INT_NOP, 1);
3234         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3235                      IGU_INT_ENABLE, 1);
3236
3237 }
3238
3239 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3240 {
3241         struct net_device *dev = dev_instance;
3242         struct bnx2x *bp = netdev_priv(dev);
3243
3244         /* Return here if interrupt is disabled */
3245         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3246                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3247                 return IRQ_HANDLED;
3248         }
3249
3250         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3251
3252 #ifdef BNX2X_STOP_ON_ERROR
3253         if (unlikely(bp->panic))
3254                 return IRQ_HANDLED;
3255 #endif
3256
3257 #ifdef BCM_CNIC
3258         {
3259                 struct cnic_ops *c_ops;
3260
3261                 rcu_read_lock();
3262                 c_ops = rcu_dereference(bp->cnic_ops);
3263                 if (c_ops)
3264                         c_ops->cnic_handler(bp->cnic_data, NULL);
3265                 rcu_read_unlock();
3266         }
3267 #endif
3268         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3269
3270         return IRQ_HANDLED;
3271 }
3272
3273 /* end of slow path */
3274
3275 /* Statistics */
3276
3277 /****************************************************************************
3278 * Macros
3279 ****************************************************************************/
3280
3281 /* sum[hi:lo] += add[hi:lo] */
3282 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3283         do { \
3284                 s_lo += a_lo; \
3285                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3286         } while (0)
3287
3288 /* difference = minuend - subtrahend */
3289 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3290         do { \
3291                 if (m_lo < s_lo) { \
3292                         /* underflow */ \
3293                         d_hi = m_hi - s_hi; \
3294                         if (d_hi > 0) { \
3295                                 /* we can 'loan' 1 */ \
3296                                 d_hi--; \
3297                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3298                         } else { \
3299                                 /* m_hi <= s_hi */ \
3300                                 d_hi = 0; \
3301                                 d_lo = 0; \
3302                         } \
3303                 } else { \
3304                         /* m_lo >= s_lo */ \
3305                         if (m_hi < s_hi) { \
3306                                 d_hi = 0; \
3307                                 d_lo = 0; \
3308                         } else { \
3309                                 /* m_hi >= s_hi */ \
3310                                 d_hi = m_hi - s_hi; \
3311                                 d_lo = m_lo - s_lo; \
3312                         } \
3313                 } \
3314         } while (0)
3315
3316 #define UPDATE_STAT64(s, t) \
3317         do { \
3318                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3319                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3320                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3321                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3322                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3323                        pstats->mac_stx[1].t##_lo, diff.lo); \
3324         } while (0)
3325
3326 #define UPDATE_STAT64_NIG(s, t) \
3327         do { \
3328                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3329                         diff.lo, new->s##_lo, old->s##_lo); \
3330                 ADD_64(estats->t##_hi, diff.hi, \
3331                        estats->t##_lo, diff.lo); \
3332         } while (0)
3333
3334 /* sum[hi:lo] += add */
3335 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3336         do { \
3337                 s_lo += a; \
3338                 s_hi += (s_lo < a) ? 1 : 0; \
3339         } while (0)
3340
3341 #define UPDATE_EXTEND_STAT(s) \
3342         do { \
3343                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3344                               pstats->mac_stx[1].s##_lo, \
3345                               new->s); \
3346         } while (0)
3347
3348 #define UPDATE_EXTEND_TSTAT(s, t) \
3349         do { \
3350                 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3351                 old_tclient->s = tclient->s; \
3352                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3353         } while (0)
3354
3355 #define UPDATE_EXTEND_USTAT(s, t) \
3356         do { \
3357                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3358                 old_uclient->s = uclient->s; \
3359                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3360         } while (0)
3361
3362 #define UPDATE_EXTEND_XSTAT(s, t) \
3363         do { \
3364                 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3365                 old_xclient->s = xclient->s; \
3366                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3367         } while (0)
3368
3369 /* minuend -= subtrahend */
3370 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3371         do { \
3372                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3373         } while (0)
3374
3375 /* minuend[hi:lo] -= subtrahend */
3376 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3377         do { \
3378                 SUB_64(m_hi, 0, m_lo, s); \
3379         } while (0)
3380
3381 #define SUB_EXTEND_USTAT(s, t) \
3382         do { \
3383                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3384                 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3385         } while (0)
3386
3387 /*
3388  * General service functions
3389  */
3390
3391 static inline long bnx2x_hilo(u32 *hiref)
3392 {
3393         u32 lo = *(hiref + 1);
3394 #if (BITS_PER_LONG == 64)
3395         u32 hi = *hiref;
3396
3397         return HILO_U64(hi, lo);
3398 #else
3399         return lo;
3400 #endif
3401 }
3402
3403 /*
3404  * Init service functions
3405  */
3406
3407 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3408 {
3409         if (!bp->stats_pending) {
3410                 struct eth_query_ramrod_data ramrod_data = {0};
3411                 int i, rc;
3412
3413                 ramrod_data.drv_counter = bp->stats_counter++;
3414                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3415                 for_each_queue(bp, i)
3416                         ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3417
3418                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3419                                    ((u32 *)&ramrod_data)[1],
3420                                    ((u32 *)&ramrod_data)[0], 0);
3421                 if (rc == 0) {
3422                         /* stats ramrod has it's own slot on the spq */
3423                         bp->spq_left++;
3424                         bp->stats_pending = 1;
3425                 }
3426         }
3427 }
3428
3429 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3430 {
3431         struct dmae_command *dmae = &bp->stats_dmae;
3432         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3433
3434         *stats_comp = DMAE_COMP_VAL;
3435         if (CHIP_REV_IS_SLOW(bp))
3436                 return;
3437
3438         /* loader */
3439         if (bp->executer_idx) {
3440                 int loader_idx = PMF_DMAE_C(bp);
3441
3442                 memset(dmae, 0, sizeof(struct dmae_command));
3443
3444                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3445                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3446                                 DMAE_CMD_DST_RESET |
3447 #ifdef __BIG_ENDIAN
3448                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3449 #else
3450                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3451 #endif
3452                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3453                                                DMAE_CMD_PORT_0) |
3454                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3455                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3456                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3457                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3458                                      sizeof(struct dmae_command) *
3459                                      (loader_idx + 1)) >> 2;
3460                 dmae->dst_addr_hi = 0;
3461                 dmae->len = sizeof(struct dmae_command) >> 2;
3462                 if (CHIP_IS_E1(bp))
3463                         dmae->len--;
3464                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3465                 dmae->comp_addr_hi = 0;
3466                 dmae->comp_val = 1;
3467
3468                 *stats_comp = 0;
3469                 bnx2x_post_dmae(bp, dmae, loader_idx);
3470
3471         } else if (bp->func_stx) {
3472                 *stats_comp = 0;
3473                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3474         }
3475 }
3476
3477 static int bnx2x_stats_comp(struct bnx2x *bp)
3478 {
3479         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3480         int cnt = 10;
3481
3482         might_sleep();
3483         while (*stats_comp != DMAE_COMP_VAL) {
3484                 if (!cnt) {
3485                         BNX2X_ERR("timeout waiting for stats finished\n");
3486                         break;
3487                 }
3488                 cnt--;
3489                 msleep(1);
3490         }
3491         return 1;
3492 }
3493
3494 /*
3495  * Statistics service functions
3496  */
3497
3498 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3499 {
3500         struct dmae_command *dmae;
3501         u32 opcode;
3502         int loader_idx = PMF_DMAE_C(bp);
3503         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3504
3505         /* sanity */
3506         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3507                 BNX2X_ERR("BUG!\n");
3508                 return;
3509         }
3510
3511         bp->executer_idx = 0;
3512
3513         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3514                   DMAE_CMD_C_ENABLE |
3515                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3516 #ifdef __BIG_ENDIAN
3517                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3518 #else
3519                   DMAE_CMD_ENDIANITY_DW_SWAP |
3520 #endif
3521                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3522                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3523
3524         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3525         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3526         dmae->src_addr_lo = bp->port.port_stx >> 2;
3527         dmae->src_addr_hi = 0;
3528         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3529         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3530         dmae->len = DMAE_LEN32_RD_MAX;
3531         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3532         dmae->comp_addr_hi = 0;
3533         dmae->comp_val = 1;
3534
3535         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3536         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3537         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3538         dmae->src_addr_hi = 0;
3539         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3540                                    DMAE_LEN32_RD_MAX * 4);
3541         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3542                                    DMAE_LEN32_RD_MAX * 4);
3543         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3544         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3545         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3546         dmae->comp_val = DMAE_COMP_VAL;
3547
3548         *stats_comp = 0;
3549         bnx2x_hw_stats_post(bp);
3550         bnx2x_stats_comp(bp);
3551 }
3552
3553 static void bnx2x_port_stats_init(struct bnx2x *bp)
3554 {
3555         struct dmae_command *dmae;
3556         int port = BP_PORT(bp);
3557         int vn = BP_E1HVN(bp);
3558         u32 opcode;
3559         int loader_idx = PMF_DMAE_C(bp);
3560         u32 mac_addr;
3561         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3562
3563         /* sanity */
3564         if (!bp->link_vars.link_up || !bp->port.pmf) {
3565                 BNX2X_ERR("BUG!\n");
3566                 return;
3567         }
3568
3569         bp->executer_idx = 0;
3570
3571         /* MCP */
3572         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3573                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3574                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3575 #ifdef __BIG_ENDIAN
3576                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3577 #else
3578                   DMAE_CMD_ENDIANITY_DW_SWAP |
3579 #endif
3580                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3581                   (vn << DMAE_CMD_E1HVN_SHIFT));
3582
3583         if (bp->port.port_stx) {
3584
3585                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3586                 dmae->opcode = opcode;
3587                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3588                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3589                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3590                 dmae->dst_addr_hi = 0;
3591                 dmae->len = sizeof(struct host_port_stats) >> 2;
3592                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3593                 dmae->comp_addr_hi = 0;
3594                 dmae->comp_val = 1;
3595         }
3596
3597         if (bp->func_stx) {
3598
3599                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3600                 dmae->opcode = opcode;
3601                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3602                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3603                 dmae->dst_addr_lo = bp->func_stx >> 2;
3604                 dmae->dst_addr_hi = 0;
3605                 dmae->len = sizeof(struct host_func_stats) >> 2;
3606                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3607                 dmae->comp_addr_hi = 0;
3608                 dmae->comp_val = 1;
3609         }
3610
3611         /* MAC */
3612         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3613                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3614                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3615 #ifdef __BIG_ENDIAN
3616                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3617 #else
3618                   DMAE_CMD_ENDIANITY_DW_SWAP |
3619 #endif
3620                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3621                   (vn << DMAE_CMD_E1HVN_SHIFT));
3622
3623         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3624
3625                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3626                                    NIG_REG_INGRESS_BMAC0_MEM);
3627
3628                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3629                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3630                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3631                 dmae->opcode = opcode;
3632                 dmae->src_addr_lo = (mac_addr +
3633                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3634                 dmae->src_addr_hi = 0;
3635                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3636                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3637                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3638                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3639                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3640                 dmae->comp_addr_hi = 0;
3641                 dmae->comp_val = 1;
3642
3643                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3644                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3645                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3646                 dmae->opcode = opcode;
3647                 dmae->src_addr_lo = (mac_addr +
3648                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3649                 dmae->src_addr_hi = 0;
3650                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3651                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3652                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3653                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3654                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3655                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3656                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3657                 dmae->comp_addr_hi = 0;
3658                 dmae->comp_val = 1;
3659
3660         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3661
3662                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3663
3664                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3665                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3666                 dmae->opcode = opcode;
3667                 dmae->src_addr_lo = (mac_addr +
3668                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3669                 dmae->src_addr_hi = 0;
3670                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3671                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3672                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3673                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3674                 dmae->comp_addr_hi = 0;
3675                 dmae->comp_val = 1;
3676
3677                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3678                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3679                 dmae->opcode = opcode;
3680                 dmae->src_addr_lo = (mac_addr +
3681                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3682                 dmae->src_addr_hi = 0;
3683                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3684                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3685                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3686                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3687                 dmae->len = 1;
3688                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3689                 dmae->comp_addr_hi = 0;
3690                 dmae->comp_val = 1;
3691
3692                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3693                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3694                 dmae->opcode = opcode;
3695                 dmae->src_addr_lo = (mac_addr +
3696                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3697                 dmae->src_addr_hi = 0;
3698                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3699                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3700                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3701                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3702                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3703                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3704                 dmae->comp_addr_hi = 0;
3705                 dmae->comp_val = 1;
3706         }
3707
3708         /* NIG */
3709         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3710         dmae->opcode = opcode;
3711         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3712                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3713         dmae->src_addr_hi = 0;
3714         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3715         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3716         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3717         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3718         dmae->comp_addr_hi = 0;
3719         dmae->comp_val = 1;
3720
3721         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3722         dmae->opcode = opcode;
3723         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3724                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3725         dmae->src_addr_hi = 0;
3726         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3727                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3728         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3729                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3730         dmae->len = (2*sizeof(u32)) >> 2;
3731         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3732         dmae->comp_addr_hi = 0;
3733         dmae->comp_val = 1;
3734
3735         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3736         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3737                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3738                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3739 #ifdef __BIG_ENDIAN
3740                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3741 #else
3742                         DMAE_CMD_ENDIANITY_DW_SWAP |
3743 #endif
3744                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3745                         (vn << DMAE_CMD_E1HVN_SHIFT));
3746         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3747                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3748         dmae->src_addr_hi = 0;
3749         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3750                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3751         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3752                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3753         dmae->len = (2*sizeof(u32)) >> 2;
3754         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3755         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3756         dmae->comp_val = DMAE_COMP_VAL;
3757
3758         *stats_comp = 0;
3759 }
3760
3761 static void bnx2x_func_stats_init(struct bnx2x *bp)
3762 {
3763         struct dmae_command *dmae = &bp->stats_dmae;
3764         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3765
3766         /* sanity */
3767         if (!bp->func_stx) {
3768                 BNX2X_ERR("BUG!\n");
3769                 return;
3770         }
3771
3772         bp->executer_idx = 0;
3773         memset(dmae, 0, sizeof(struct dmae_command));
3774
3775         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3776                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3777                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3778 #ifdef __BIG_ENDIAN
3779                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3780 #else
3781                         DMAE_CMD_ENDIANITY_DW_SWAP |
3782 #endif
3783                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3784                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3785         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3786         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3787         dmae->dst_addr_lo = bp->func_stx >> 2;
3788         dmae->dst_addr_hi = 0;
3789         dmae->len = sizeof(struct host_func_stats) >> 2;
3790         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3791         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3792         dmae->comp_val = DMAE_COMP_VAL;
3793
3794         *stats_comp = 0;
3795 }
3796
3797 static void bnx2x_stats_start(struct bnx2x *bp)
3798 {
3799         if (bp->port.pmf)
3800                 bnx2x_port_stats_init(bp);
3801
3802         else if (bp->func_stx)
3803                 bnx2x_func_stats_init(bp);
3804
3805         bnx2x_hw_stats_post(bp);
3806         bnx2x_storm_stats_post(bp);
3807 }
3808
3809 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3810 {
3811         bnx2x_stats_comp(bp);
3812         bnx2x_stats_pmf_update(bp);
3813         bnx2x_stats_start(bp);
3814 }
3815
3816 static void bnx2x_stats_restart(struct bnx2x *bp)
3817 {
3818         bnx2x_stats_comp(bp);
3819         bnx2x_stats_start(bp);
3820 }
3821
3822 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3823 {
3824         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3825         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3826         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3827         struct {
3828                 u32 lo;
3829                 u32 hi;
3830         } diff;
3831
3832         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3833         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3834         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3835         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3836         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3837         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3838         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3839         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3840         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3841         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3842         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3843         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3844         UPDATE_STAT64(tx_stat_gt127,
3845                                 tx_stat_etherstatspkts65octetsto127octets);
3846         UPDATE_STAT64(tx_stat_gt255,
3847                                 tx_stat_etherstatspkts128octetsto255octets);
3848         UPDATE_STAT64(tx_stat_gt511,
3849                                 tx_stat_etherstatspkts256octetsto511octets);
3850         UPDATE_STAT64(tx_stat_gt1023,
3851                                 tx_stat_etherstatspkts512octetsto1023octets);
3852         UPDATE_STAT64(tx_stat_gt1518,
3853                                 tx_stat_etherstatspkts1024octetsto1522octets);
3854         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3855         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3856         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3857         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3858         UPDATE_STAT64(tx_stat_gterr,
3859                                 tx_stat_dot3statsinternalmactransmiterrors);
3860         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3861
3862         estats->pause_frames_received_hi =
3863                                 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3864         estats->pause_frames_received_lo =
3865                                 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3866
3867         estats->pause_frames_sent_hi =
3868                                 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3869         estats->pause_frames_sent_lo =
3870                                 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3871 }
3872
3873 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3874 {
3875         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3876         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3877         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3878
3879         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3880         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3881         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3882         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3883         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3884         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3885         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3886         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3887         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3888         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3889         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3890         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3891         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3892         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3893         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3894         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3895         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3896         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3897         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3898         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3899         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3900         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3901         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3902         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3903         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3904         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3905         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3906         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3907         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3908         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3909         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3910
3911         estats->pause_frames_received_hi =
3912                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3913         estats->pause_frames_received_lo =
3914                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3915         ADD_64(estats->pause_frames_received_hi,
3916                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3917                estats->pause_frames_received_lo,
3918                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3919
3920         estats->pause_frames_sent_hi =
3921                         pstats->mac_stx[1].tx_stat_outxonsent_hi;
3922         estats->pause_frames_sent_lo =
3923                         pstats->mac_stx[1].tx_stat_outxonsent_lo;
3924         ADD_64(estats->pause_frames_sent_hi,
3925                pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3926                estats->pause_frames_sent_lo,
3927                pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3928 }
3929
3930 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3931 {
3932         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3933         struct nig_stats *old = &(bp->port.old_nig_stats);
3934         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3935         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3936         struct {
3937                 u32 lo;
3938                 u32 hi;
3939         } diff;
3940         u32 nig_timer_max;
3941
3942         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3943                 bnx2x_bmac_stats_update(bp);
3944
3945         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3946                 bnx2x_emac_stats_update(bp);
3947
3948         else { /* unreached */
3949                 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3950                 return -1;
3951         }
3952
3953         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3954                       new->brb_discard - old->brb_discard);
3955         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3956                       new->brb_truncate - old->brb_truncate);
3957
3958         UPDATE_STAT64_NIG(egress_mac_pkt0,
3959                                         etherstatspkts1024octetsto1522octets);
3960         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3961
3962         memcpy(old, new, sizeof(struct nig_stats));
3963
3964         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3965                sizeof(struct mac_stx));
3966         estats->brb_drop_hi = pstats->brb_drop_hi;
3967         estats->brb_drop_lo = pstats->brb_drop_lo;
3968
3969         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3970
3971         nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3972         if (nig_timer_max != estats->nig_timer_max) {
3973                 estats->nig_timer_max = nig_timer_max;
3974                 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3975         }
3976
3977         return 0;
3978 }
3979
3980 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3981 {
3982         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3983         struct tstorm_per_port_stats *tport =
3984                                         &stats->tstorm_common.port_statistics;
3985         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3986         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3987         int i;
3988
3989         memcpy(&(fstats->total_bytes_received_hi),
3990                &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
3991                sizeof(struct host_func_stats) - 2*sizeof(u32));
3992         estats->error_bytes_received_hi = 0;
3993         estats->error_bytes_received_lo = 0;
3994         estats->etherstatsoverrsizepkts_hi = 0;
3995         estats->etherstatsoverrsizepkts_lo = 0;
3996         estats->no_buff_discard_hi = 0;
3997         estats->no_buff_discard_lo = 0;
3998
3999         for_each_queue(bp, i) {
4000                 struct bnx2x_fastpath *fp = &bp->fp[i];
4001                 int cl_id = fp->cl_id;
4002                 struct tstorm_per_client_stats *tclient =
4003                                 &stats->tstorm_common.client_statistics[cl_id];
4004                 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4005                 struct ustorm_per_client_stats *uclient =
4006                                 &stats->ustorm_common.client_statistics[cl_id];
4007                 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4008                 struct xstorm_per_client_stats *xclient =
4009                                 &stats->xstorm_common.client_statistics[cl_id];
4010                 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4011                 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4012                 u32 diff;
4013
4014                 /* are storm stats valid? */
4015                 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4016                                                         bp->stats_counter) {
4017                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4018                            "  xstorm counter (%d) != stats_counter (%d)\n",
4019                            i, xclient->stats_counter, bp->stats_counter);
4020                         return -1;
4021                 }
4022                 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4023                                                         bp->stats_counter) {
4024                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4025                            "  tstorm counter (%d) != stats_counter (%d)\n",
4026                            i, tclient->stats_counter, bp->stats_counter);
4027                         return -2;
4028                 }
4029                 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4030                                                         bp->stats_counter) {
4031                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4032                            "  ustorm counter (%d) != stats_counter (%d)\n",
4033                            i, uclient->stats_counter, bp->stats_counter);
4034                         return -4;
4035                 }
4036
4037                 qstats->total_bytes_received_hi =
4038                         le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4039                 qstats->total_bytes_received_lo =
4040                         le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4041
4042                 ADD_64(qstats->total_bytes_received_hi,
4043                        le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4044                        qstats->total_bytes_received_lo,
4045                        le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4046
4047                 ADD_64(qstats->total_bytes_received_hi,
4048                        le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4049                        qstats->total_bytes_received_lo,
4050                        le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4051
4052                 qstats->valid_bytes_received_hi =
4053                                         qstats->total_bytes_received_hi;
4054                 qstats->valid_bytes_received_lo =
4055                                         qstats->total_bytes_received_lo;
4056
4057                 qstats->error_bytes_received_hi =
4058                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
4059                 qstats->error_bytes_received_lo =
4060                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
4061
4062                 ADD_64(qstats->total_bytes_received_hi,
4063                        qstats->error_bytes_received_hi,
4064                        qstats->total_bytes_received_lo,
4065                        qstats->error_bytes_received_lo);
4066
4067                 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4068                                         total_unicast_packets_received);
4069                 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4070                                         total_multicast_packets_received);
4071                 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4072                                         total_broadcast_packets_received);
4073                 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4074                                         etherstatsoverrsizepkts);
4075                 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4076
4077                 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4078                                         total_unicast_packets_received);
4079                 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4080                                         total_multicast_packets_received);
4081                 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4082                                         total_broadcast_packets_received);
4083                 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4084                 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4085                 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4086
4087                 qstats->total_bytes_transmitted_hi =
4088                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4089                 qstats->total_bytes_transmitted_lo =
4090                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4091
4092                 ADD_64(qstats->total_bytes_transmitted_hi,
4093                        le32_to_cpu(xclient->multicast_bytes_sent.hi),
4094                        qstats->total_bytes_transmitted_lo,
4095                        le32_to_cpu(xclient->multicast_bytes_sent.lo));
4096
4097                 ADD_64(qstats->total_bytes_transmitted_hi,
4098                        le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4099                        qstats->total_bytes_transmitted_lo,
4100                        le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4101
4102                 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4103                                         total_unicast_packets_transmitted);
4104                 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4105                                         total_multicast_packets_transmitted);
4106                 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4107                                         total_broadcast_packets_transmitted);
4108
4109                 old_tclient->checksum_discard = tclient->checksum_discard;
4110                 old_tclient->ttl0_discard = tclient->ttl0_discard;
4111
4112                 ADD_64(fstats->total_bytes_received_hi,
4113                        qstats->total_bytes_received_hi,
4114                        fstats->total_bytes_received_lo,
4115                        qstats->total_bytes_received_lo);
4116                 ADD_64(fstats->total_bytes_transmitted_hi,
4117                        qstats->total_bytes_transmitted_hi,
4118                        fstats->total_bytes_transmitted_lo,
4119                        qstats->total_bytes_transmitted_lo);
4120                 ADD_64(fstats->total_unicast_packets_received_hi,
4121                        qstats->total_unicast_packets_received_hi,
4122                        fstats->total_unicast_packets_received_lo,
4123                        qstats->total_unicast_packets_received_lo);
4124                 ADD_64(fstats->total_multicast_packets_received_hi,
4125                        qstats->total_multicast_packets_received_hi,
4126                        fstats->total_multicast_packets_received_lo,
4127                        qstats->total_multicast_packets_received_lo);
4128                 ADD_64(fstats->total_broadcast_packets_received_hi,
4129                        qstats->total_broadcast_packets_received_hi,
4130                        fstats->total_broadcast_packets_received_lo,
4131                        qstats->total_broadcast_packets_received_lo);
4132                 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4133                        qstats->total_unicast_packets_transmitted_hi,
4134                        fstats->total_unicast_packets_transmitted_lo,
4135                        qstats->total_unicast_packets_transmitted_lo);
4136                 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4137                        qstats->total_multicast_packets_transmitted_hi,
4138                        fstats->total_multicast_packets_transmitted_lo,
4139                        qstats->total_multicast_packets_transmitted_lo);
4140                 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4141                        qstats->total_broadcast_packets_transmitted_hi,
4142                        fstats->total_broadcast_packets_transmitted_lo,
4143                        qstats->total_broadcast_packets_transmitted_lo);
4144                 ADD_64(fstats->valid_bytes_received_hi,
4145                        qstats->valid_bytes_received_hi,
4146                        fstats->valid_bytes_received_lo,
4147                        qstats->valid_bytes_received_lo);
4148
4149                 ADD_64(estats->error_bytes_received_hi,
4150                        qstats->error_bytes_received_hi,
4151                        estats->error_bytes_received_lo,
4152                        qstats->error_bytes_received_lo);
4153                 ADD_64(estats->etherstatsoverrsizepkts_hi,
4154                        qstats->etherstatsoverrsizepkts_hi,
4155                        estats->etherstatsoverrsizepkts_lo,
4156                        qstats->etherstatsoverrsizepkts_lo);
4157                 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4158                        estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4159         }
4160
4161         ADD_64(fstats->total_bytes_received_hi,
4162                estats->rx_stat_ifhcinbadoctets_hi,
4163                fstats->total_bytes_received_lo,
4164                estats->rx_stat_ifhcinbadoctets_lo);
4165
4166         memcpy(estats, &(fstats->total_bytes_received_hi),
4167                sizeof(struct host_func_stats) - 2*sizeof(u32));
4168
4169         ADD_64(estats->etherstatsoverrsizepkts_hi,
4170                estats->rx_stat_dot3statsframestoolong_hi,
4171                estats->etherstatsoverrsizepkts_lo,
4172                estats->rx_stat_dot3statsframestoolong_lo);
4173         ADD_64(estats->error_bytes_received_hi,
4174                estats->rx_stat_ifhcinbadoctets_hi,
4175                estats->error_bytes_received_lo,
4176                estats->rx_stat_ifhcinbadoctets_lo);
4177
4178         if (bp->port.pmf) {
4179                 estats->mac_filter_discard =
4180                                 le32_to_cpu(tport->mac_filter_discard);
4181                 estats->xxoverflow_discard =
4182                                 le32_to_cpu(tport->xxoverflow_discard);
4183                 estats->brb_truncate_discard =
4184                                 le32_to_cpu(tport->brb_truncate_discard);
4185                 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4186         }
4187
4188         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4189
4190         bp->stats_pending = 0;
4191
4192         return 0;
4193 }
4194
4195 static void bnx2x_net_stats_update(struct bnx2x *bp)
4196 {
4197         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4198         struct net_device_stats *nstats = &bp->dev->stats;
4199         int i;
4200
4201         nstats->rx_packets =
4202                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4203                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4204                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4205
4206         nstats->tx_packets =
4207                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4208                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4209                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4210
4211         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4212
4213         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4214
4215         nstats->rx_dropped = estats->mac_discard;
4216         for_each_queue(bp, i)
4217                 nstats->rx_dropped +=
4218                         le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4219
4220         nstats->tx_dropped = 0;
4221
4222         nstats->multicast =
4223                 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4224
4225         nstats->collisions =
4226                 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4227
4228         nstats->rx_length_errors =
4229                 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4230                 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4231         nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4232                                  bnx2x_hilo(&estats->brb_truncate_hi);
4233         nstats->rx_crc_errors =
4234                 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4235         nstats->rx_frame_errors =
4236                 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4237         nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4238         nstats->rx_missed_errors = estats->xxoverflow_discard;
4239
4240         nstats->rx_errors = nstats->rx_length_errors +
4241                             nstats->rx_over_errors +
4242                             nstats->rx_crc_errors +
4243                             nstats->rx_frame_errors +
4244                             nstats->rx_fifo_errors +
4245                             nstats->rx_missed_errors;
4246
4247         nstats->tx_aborted_errors =
4248                 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4249                 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4250         nstats->tx_carrier_errors =
4251                 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4252         nstats->tx_fifo_errors = 0;
4253         nstats->tx_heartbeat_errors = 0;
4254         nstats->tx_window_errors = 0;
4255
4256         nstats->tx_errors = nstats->tx_aborted_errors +
4257                             nstats->tx_carrier_errors +
4258             bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4259 }
4260
4261 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4262 {
4263         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4264         int i;
4265
4266         estats->driver_xoff = 0;
4267         estats->rx_err_discard_pkt = 0;
4268         estats->rx_skb_alloc_failed = 0;
4269         estats->hw_csum_err = 0;
4270         for_each_queue(bp, i) {
4271                 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4272
4273                 estats->driver_xoff += qstats->driver_xoff;
4274                 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4275                 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4276                 estats->hw_csum_err += qstats->hw_csum_err;
4277         }
4278 }
4279
4280 static void bnx2x_stats_update(struct bnx2x *bp)
4281 {
4282         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4283
4284         if (*stats_comp != DMAE_COMP_VAL)
4285                 return;
4286
4287         if (bp->port.pmf)
4288                 bnx2x_hw_stats_update(bp);
4289
4290         if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4291                 BNX2X_ERR("storm stats were not updated for 3 times\n");
4292                 bnx2x_panic();
4293                 return;
4294         }
4295
4296         bnx2x_net_stats_update(bp);
4297         bnx2x_drv_stats_update(bp);
4298
4299         if (bp->msglevel & NETIF_MSG_TIMER) {
4300                 struct bnx2x_fastpath *fp0_rx = bp->fp;
4301                 struct bnx2x_fastpath *fp0_tx = bp->fp;
4302                 struct tstorm_per_client_stats *old_tclient =
4303                                                         &bp->fp->old_tclient;
4304                 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4305                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4306                 struct net_device_stats *nstats = &bp->dev->stats;
4307                 int i;
4308
4309                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4310                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
4311                                   "  tx pkt (%lx)\n",
4312                        bnx2x_tx_avail(fp0_tx),
4313                        le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
4314                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
4315                                   "  rx pkt (%lx)\n",
4316                        (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4317                              fp0_rx->rx_comp_cons),
4318                        le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
4319                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u  "
4320                                   "brb truncate %u\n",
4321                        (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4322                        qstats->driver_xoff,
4323                        estats->brb_drop_lo, estats->brb_truncate_lo);
4324                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
4325                         "packets_too_big_discard %lu  no_buff_discard %lu  "
4326                         "mac_discard %u  mac_filter_discard %u  "
4327                         "xxovrflow_discard %u  brb_truncate_discard %u  "
4328                         "ttl0_discard %u\n",
4329                        le32_to_cpu(old_tclient->checksum_discard),
4330                        bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4331                        bnx2x_hilo(&qstats->no_buff_discard_hi),
4332                        estats->mac_discard, estats->mac_filter_discard,
4333                        estats->xxoverflow_discard, estats->brb_truncate_discard,
4334                        le32_to_cpu(old_tclient->ttl0_discard));
4335
4336                 for_each_queue(bp, i) {
4337                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4338                                bnx2x_fp(bp, i, tx_pkt),
4339                                bnx2x_fp(bp, i, rx_pkt),
4340                                bnx2x_fp(bp, i, rx_calls));
4341                 }
4342         }
4343
4344         bnx2x_hw_stats_post(bp);
4345         bnx2x_storm_stats_post(bp);
4346 }
4347
4348 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4349 {
4350         struct dmae_command *dmae;
4351         u32 opcode;
4352         int loader_idx = PMF_DMAE_C(bp);
4353         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4354
4355         bp->executer_idx = 0;
4356
4357         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4358                   DMAE_CMD_C_ENABLE |
4359                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4360 #ifdef __BIG_ENDIAN
4361                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4362 #else
4363                   DMAE_CMD_ENDIANITY_DW_SWAP |
4364 #endif
4365                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4366                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4367
4368         if (bp->port.port_stx) {
4369
4370                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4371                 if (bp->func_stx)
4372                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4373                 else
4374                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4375                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4376                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4377                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4378                 dmae->dst_addr_hi = 0;
4379                 dmae->len = sizeof(struct host_port_stats) >> 2;
4380                 if (bp->func_stx) {
4381                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4382                         dmae->comp_addr_hi = 0;
4383                         dmae->comp_val = 1;
4384                 } else {
4385                         dmae->comp_addr_lo =
4386                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4387                         dmae->comp_addr_hi =
4388                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4389                         dmae->comp_val = DMAE_COMP_VAL;
4390
4391                         *stats_comp = 0;
4392                 }
4393         }
4394
4395         if (bp->func_stx) {
4396
4397                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4398                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4399                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4400                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4401                 dmae->dst_addr_lo = bp->func_stx >> 2;
4402                 dmae->dst_addr_hi = 0;
4403                 dmae->len = sizeof(struct host_func_stats) >> 2;
4404                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4405                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4406                 dmae->comp_val = DMAE_COMP_VAL;
4407
4408                 *stats_comp = 0;
4409         }
4410 }
4411
4412 static void bnx2x_stats_stop(struct bnx2x *bp)
4413 {
4414         int update = 0;
4415
4416         bnx2x_stats_comp(bp);
4417
4418         if (bp->port.pmf)
4419                 update = (bnx2x_hw_stats_update(bp) == 0);
4420
4421         update |= (bnx2x_storm_stats_update(bp) == 0);
4422
4423         if (update) {
4424                 bnx2x_net_stats_update(bp);
4425
4426                 if (bp->port.pmf)
4427                         bnx2x_port_stats_stop(bp);
4428
4429                 bnx2x_hw_stats_post(bp);
4430                 bnx2x_stats_comp(bp);
4431         }
4432 }
4433
4434 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4435 {
4436 }
4437
4438 static const struct {
4439         void (*action)(struct bnx2x *bp);
4440         enum bnx2x_stats_state next_state;
4441 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4442 /* state        event   */
4443 {
4444 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4445 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
4446 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4447 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4448 },
4449 {
4450 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
4451 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
4452 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
4453 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
4454 }
4455 };
4456
4457 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4458 {
4459         enum bnx2x_stats_state state = bp->stats_state;
4460
4461         bnx2x_stats_stm[state][event].action(bp);
4462         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4463
4464         /* Make sure the state has been "changed" */
4465         smp_wmb();
4466
4467         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4468                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4469                    state, event, bp->stats_state);
4470 }
4471
4472 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4473 {
4474         struct dmae_command *dmae;
4475         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4476
4477         /* sanity */
4478         if (!bp->port.pmf || !bp->port.port_stx) {
4479                 BNX2X_ERR("BUG!\n");
4480                 return;
4481         }
4482
4483         bp->executer_idx = 0;
4484
4485         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4486         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4487                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4488                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4489 #ifdef __BIG_ENDIAN
4490                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4491 #else
4492                         DMAE_CMD_ENDIANITY_DW_SWAP |
4493 #endif
4494                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4495                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4496         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4497         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4498         dmae->dst_addr_lo = bp->port.port_stx >> 2;
4499         dmae->dst_addr_hi = 0;
4500         dmae->len = sizeof(struct host_port_stats) >> 2;
4501         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4502         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4503         dmae->comp_val = DMAE_COMP_VAL;
4504
4505         *stats_comp = 0;
4506         bnx2x_hw_stats_post(bp);
4507         bnx2x_stats_comp(bp);
4508 }
4509
4510 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4511 {
4512         int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4513         int port = BP_PORT(bp);
4514         int func;
4515         u32 func_stx;
4516
4517         /* sanity */
4518         if (!bp->port.pmf || !bp->func_stx) {
4519                 BNX2X_ERR("BUG!\n");
4520                 return;
4521         }
4522
4523         /* save our func_stx */
4524         func_stx = bp->func_stx;
4525
4526         for (vn = VN_0; vn < vn_max; vn++) {
4527                 func = 2*vn + port;
4528
4529                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4530                 bnx2x_func_stats_init(bp);
4531                 bnx2x_hw_stats_post(bp);
4532                 bnx2x_stats_comp(bp);
4533         }
4534
4535         /* restore our func_stx */
4536         bp->func_stx = func_stx;
4537 }
4538
4539 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4540 {
4541         struct dmae_command *dmae = &bp->stats_dmae;
4542         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4543
4544         /* sanity */
4545         if (!bp->func_stx) {
4546                 BNX2X_ERR("BUG!\n");
4547                 return;
4548         }
4549
4550         bp->executer_idx = 0;
4551         memset(dmae, 0, sizeof(struct dmae_command));
4552
4553         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4554                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4555                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4556 #ifdef __BIG_ENDIAN
4557                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4558 #else
4559                         DMAE_CMD_ENDIANITY_DW_SWAP |
4560 #endif
4561                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4562                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4563         dmae->src_addr_lo = bp->func_stx >> 2;
4564         dmae->src_addr_hi = 0;
4565         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4566         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4567         dmae->len = sizeof(struct host_func_stats) >> 2;
4568         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4569         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4570         dmae->comp_val = DMAE_COMP_VAL;
4571
4572         *stats_comp = 0;
4573         bnx2x_hw_stats_post(bp);
4574         bnx2x_stats_comp(bp);
4575 }
4576
4577 static void bnx2x_stats_init(struct bnx2x *bp)
4578 {
4579         int port = BP_PORT(bp);
4580         int func = BP_FUNC(bp);
4581         int i;
4582
4583         bp->stats_pending = 0;
4584         bp->executer_idx = 0;
4585         bp->stats_counter = 0;
4586
4587         /* port and func stats for management */
4588         if (!BP_NOMCP(bp)) {
4589                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4590                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4591
4592         } else {
4593                 bp->port.port_stx = 0;
4594                 bp->func_stx = 0;
4595         }
4596         DP(BNX2X_MSG_STATS, "port_stx 0x%x  func_stx 0x%x\n",
4597            bp->port.port_stx, bp->func_stx);
4598
4599         /* port stats */
4600         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4601         bp->port.old_nig_stats.brb_discard =
4602                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4603         bp->port.old_nig_stats.brb_truncate =
4604                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4605         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4606                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4607         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4608                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4609
4610         /* function stats */
4611         for_each_queue(bp, i) {
4612                 struct bnx2x_fastpath *fp = &bp->fp[i];
4613
4614                 memset(&fp->old_tclient, 0,
4615                        sizeof(struct tstorm_per_client_stats));
4616                 memset(&fp->old_uclient, 0,
4617                        sizeof(struct ustorm_per_client_stats));
4618                 memset(&fp->old_xclient, 0,
4619                        sizeof(struct xstorm_per_client_stats));
4620                 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4621         }
4622
4623         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4624         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4625
4626         bp->stats_state = STATS_STATE_DISABLED;
4627
4628         if (bp->port.pmf) {
4629                 if (bp->port.port_stx)
4630                         bnx2x_port_stats_base_init(bp);
4631
4632                 if (bp->func_stx)
4633                         bnx2x_func_stats_base_init(bp);
4634
4635         } else if (bp->func_stx)
4636                 bnx2x_func_stats_base_update(bp);
4637 }
4638
4639 static void bnx2x_timer(unsigned long data)
4640 {
4641         struct bnx2x *bp = (struct bnx2x *) data;
4642
4643         if (!netif_running(bp->dev))
4644                 return;
4645
4646         if (atomic_read(&bp->intr_sem) != 0)
4647                 goto timer_restart;
4648
4649         if (poll) {
4650                 struct bnx2x_fastpath *fp = &bp->fp[0];
4651                 int rc;
4652
4653                 bnx2x_tx_int(fp);
4654                 rc = bnx2x_rx_int(fp, 1000);
4655         }
4656
4657         if (!BP_NOMCP(bp)) {
4658                 int func = BP_FUNC(bp);
4659                 u32 drv_pulse;
4660                 u32 mcp_pulse;
4661
4662                 ++bp->fw_drv_pulse_wr_seq;
4663                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4664                 /* TBD - add SYSTEM_TIME */
4665                 drv_pulse = bp->fw_drv_pulse_wr_seq;
4666                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4667
4668                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4669                              MCP_PULSE_SEQ_MASK);
4670                 /* The delta between driver pulse and mcp response
4671                  * should be 1 (before mcp response) or 0 (after mcp response)
4672                  */
4673                 if ((drv_pulse != mcp_pulse) &&
4674                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4675                         /* someone lost a heartbeat... */
4676                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4677                                   drv_pulse, mcp_pulse);
4678                 }
4679         }
4680
4681         if (bp->state == BNX2X_STATE_OPEN)
4682                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4683
4684 timer_restart:
4685         mod_timer(&bp->timer, jiffies + bp->current_interval);
4686 }
4687
4688 /* end of Statistics */
4689
4690 /* nic init */
4691
4692 /*
4693  * nic init service functions
4694  */
4695
4696 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4697 {
4698         int port = BP_PORT(bp);
4699
4700         /* "CSTORM" */
4701         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4702                         CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4703                         CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4704         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4705                         CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4706                         CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
4707 }
4708
4709 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4710                           dma_addr_t mapping, int sb_id)
4711 {
4712         int port = BP_PORT(bp);
4713         int func = BP_FUNC(bp);
4714         int index;
4715         u64 section;
4716
4717         /* USTORM */
4718         section = ((u64)mapping) + offsetof(struct host_status_block,
4719                                             u_status_block);
4720         sb->u_status_block.status_block_id = sb_id;
4721
4722         REG_WR(bp, BAR_CSTRORM_INTMEM +
4723                CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4724         REG_WR(bp, BAR_CSTRORM_INTMEM +
4725                ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
4726                U64_HI(section));
4727         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4728                 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
4729
4730         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4731                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4732                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
4733
4734         /* CSTORM */
4735         section = ((u64)mapping) + offsetof(struct host_status_block,
4736                                             c_status_block);
4737         sb->c_status_block.status_block_id = sb_id;
4738
4739         REG_WR(bp, BAR_CSTRORM_INTMEM +
4740                CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
4741         REG_WR(bp, BAR_CSTRORM_INTMEM +
4742                ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
4743                U64_HI(section));
4744         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4745                 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
4746
4747         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4748                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4749                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
4750
4751         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4752 }
4753
4754 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4755 {
4756         int func = BP_FUNC(bp);
4757
4758         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
4759                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4760                         sizeof(struct tstorm_def_status_block)/4);
4761         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4762                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4763                         sizeof(struct cstorm_def_status_block_u)/4);
4764         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4765                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4766                         sizeof(struct cstorm_def_status_block_c)/4);
4767         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
4768                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4769                         sizeof(struct xstorm_def_status_block)/4);
4770 }
4771
4772 static void bnx2x_init_def_sb(struct bnx2x *bp,
4773                               struct host_def_status_block *def_sb,
4774                               dma_addr_t mapping, int sb_id)
4775 {
4776         int port = BP_PORT(bp);
4777         int func = BP_FUNC(bp);
4778         int index, val, reg_offset;
4779         u64 section;
4780
4781         /* ATTN */
4782         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4783                                             atten_status_block);
4784         def_sb->atten_status_block.status_block_id = sb_id;
4785
4786         bp->attn_state = 0;
4787
4788         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4789                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4790
4791         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4792                 bp->attn_group[index].sig[0] = REG_RD(bp,
4793                                                      reg_offset + 0x10*index);
4794                 bp->attn_group[index].sig[1] = REG_RD(bp,
4795                                                reg_offset + 0x4 + 0x10*index);
4796                 bp->attn_group[index].sig[2] = REG_RD(bp,
4797                                                reg_offset + 0x8 + 0x10*index);
4798                 bp->attn_group[index].sig[3] = REG_RD(bp,
4799                                                reg_offset + 0xc + 0x10*index);
4800         }
4801
4802         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4803                              HC_REG_ATTN_MSG0_ADDR_L);
4804
4805         REG_WR(bp, reg_offset, U64_LO(section));
4806         REG_WR(bp, reg_offset + 4, U64_HI(section));
4807
4808         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4809
4810         val = REG_RD(bp, reg_offset);
4811         val |= sb_id;
4812         REG_WR(bp, reg_offset, val);
4813
4814         /* USTORM */
4815         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4816                                             u_def_status_block);
4817         def_sb->u_def_status_block.status_block_id = sb_id;
4818
4819         REG_WR(bp, BAR_CSTRORM_INTMEM +
4820                CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4821         REG_WR(bp, BAR_CSTRORM_INTMEM +
4822                ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
4823                U64_HI(section));
4824         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4825                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
4826
4827         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4828                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4829                          CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
4830
4831         /* CSTORM */
4832         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4833                                             c_def_status_block);
4834         def_sb->c_def_status_block.status_block_id = sb_id;
4835
4836         REG_WR(bp, BAR_CSTRORM_INTMEM +
4837                CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
4838         REG_WR(bp, BAR_CSTRORM_INTMEM +
4839                ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
4840                U64_HI(section));
4841         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4842                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
4843
4844         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4845                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4846                          CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
4847
4848         /* TSTORM */
4849         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4850                                             t_def_status_block);
4851         def_sb->t_def_status_block.status_block_id = sb_id;
4852
4853         REG_WR(bp, BAR_TSTRORM_INTMEM +
4854                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4855         REG_WR(bp, BAR_TSTRORM_INTMEM +
4856                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4857                U64_HI(section));
4858         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4859                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4860
4861         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4862                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4863                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4864
4865         /* XSTORM */
4866         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4867                                             x_def_status_block);
4868         def_sb->x_def_status_block.status_block_id = sb_id;
4869
4870         REG_WR(bp, BAR_XSTRORM_INTMEM +
4871                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4872         REG_WR(bp, BAR_XSTRORM_INTMEM +
4873                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4874                U64_HI(section));
4875         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4876                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4877
4878         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4879                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4880                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4881
4882         bp->stats_pending = 0;
4883         bp->set_mac_pending = 0;
4884
4885         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4886 }
4887
4888 static void bnx2x_update_coalesce(struct bnx2x *bp)
4889 {
4890         int port = BP_PORT(bp);
4891         int i;
4892
4893         for_each_queue(bp, i) {
4894                 int sb_id = bp->fp[i].sb_id;
4895
4896                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4897                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4898                         CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4899                                                       U_SB_ETH_RX_CQ_INDEX),
4900                         bp->rx_ticks/(4 * BNX2X_BTR));
4901                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4902                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4903                                                        U_SB_ETH_RX_CQ_INDEX),
4904                          (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
4905
4906                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4907                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4908                         CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4909                                                       C_SB_ETH_TX_CQ_INDEX),
4910                         bp->tx_ticks/(4 * BNX2X_BTR));
4911                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4912                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4913                                                        C_SB_ETH_TX_CQ_INDEX),
4914                          (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
4915         }
4916 }
4917
4918 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4919                                        struct bnx2x_fastpath *fp, int last)
4920 {
4921         int i;
4922
4923         for (i = 0; i < last; i++) {
4924                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4925                 struct sk_buff *skb = rx_buf->skb;
4926
4927                 if (skb == NULL) {
4928                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4929                         continue;
4930                 }
4931
4932                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4933                         pci_unmap_single(bp->pdev,
4934                                          pci_unmap_addr(rx_buf, mapping),
4935                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4936
4937                 dev_kfree_skb(skb);
4938                 rx_buf->skb = NULL;
4939         }
4940 }
4941
4942 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4943 {
4944         int func = BP_FUNC(bp);
4945         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4946                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4947         u16 ring_prod, cqe_ring_prod;
4948         int i, j;
4949
4950         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4951         DP(NETIF_MSG_IFUP,
4952            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4953
4954         if (bp->flags & TPA_ENABLE_FLAG) {
4955
4956                 for_each_queue(bp, j) {
4957                         struct bnx2x_fastpath *fp = &bp->fp[j];
4958
4959                         for (i = 0; i < max_agg_queues; i++) {
4960                                 fp->tpa_pool[i].skb =
4961                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4962                                 if (!fp->tpa_pool[i].skb) {
4963                                         BNX2X_ERR("Failed to allocate TPA "
4964                                                   "skb pool for queue[%d] - "
4965                                                   "disabling TPA on this "
4966                                                   "queue!\n", j);
4967                                         bnx2x_free_tpa_pool(bp, fp, i);
4968                                         fp->disable_tpa = 1;
4969                                         break;
4970                                 }
4971                                 pci_unmap_addr_set((struct sw_rx_bd *)
4972                                                         &bp->fp->tpa_pool[i],
4973                                                    mapping, 0);
4974                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4975                         }
4976                 }
4977         }
4978
4979         for_each_queue(bp, j) {
4980                 struct bnx2x_fastpath *fp = &bp->fp[j];
4981
4982                 fp->rx_bd_cons = 0;
4983                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4984                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4985
4986                 /* "next page" elements initialization */
4987                 /* SGE ring */
4988                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4989                         struct eth_rx_sge *sge;
4990
4991                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4992                         sge->addr_hi =
4993                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4994                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4995                         sge->addr_lo =
4996                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4997                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4998                 }
4999
5000                 bnx2x_init_sge_ring_bit_mask(fp);
5001
5002                 /* RX BD ring */
5003                 for (i = 1; i <= NUM_RX_RINGS; i++) {
5004                         struct eth_rx_bd *rx_bd;
5005
5006                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5007                         rx_bd->addr_hi =
5008                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5009                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5010                         rx_bd->addr_lo =
5011                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5012                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5013                 }
5014
5015                 /* CQ ring */
5016                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5017                         struct eth_rx_cqe_next_page *nextpg;
5018
5019                         nextpg = (struct eth_rx_cqe_next_page *)
5020                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5021                         nextpg->addr_hi =
5022                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5023                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5024                         nextpg->addr_lo =
5025                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5026                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5027                 }
5028
5029                 /* Allocate SGEs and initialize the ring elements */
5030                 for (i = 0, ring_prod = 0;
5031                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
5032
5033                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5034                                 BNX2X_ERR("was only able to allocate "
5035                                           "%d rx sges\n", i);
5036                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5037                                 /* Cleanup already allocated elements */
5038                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
5039                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
5040                                 fp->disable_tpa = 1;
5041                                 ring_prod = 0;
5042                                 break;
5043                         }
5044                         ring_prod = NEXT_SGE_IDX(ring_prod);
5045                 }
5046                 fp->rx_sge_prod = ring_prod;
5047
5048                 /* Allocate BDs and initialize BD ring */
5049                 fp->rx_comp_cons = 0;
5050                 cqe_ring_prod = ring_prod = 0;
5051                 for (i = 0; i < bp->rx_ring_size; i++) {
5052                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5053                                 BNX2X_ERR("was only able to allocate "
5054                                           "%d rx skbs on queue[%d]\n", i, j);
5055                                 fp->eth_q_stats.rx_skb_alloc_failed++;
5056                                 break;
5057                         }
5058                         ring_prod = NEXT_RX_IDX(ring_prod);
5059                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5060                         WARN_ON(ring_prod <= i);
5061                 }
5062
5063                 fp->rx_bd_prod = ring_prod;
5064                 /* must not have more available CQEs than BDs */
5065                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5066                                        cqe_ring_prod);
5067                 fp->rx_pkt = fp->rx_calls = 0;
5068
5069                 /* Warning!
5070                  * this will generate an interrupt (to the TSTORM)
5071                  * must only be done after chip is initialized
5072                  */
5073                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5074                                      fp->rx_sge_prod);
5075                 if (j != 0)
5076                         continue;
5077
5078                 REG_WR(bp, BAR_USTRORM_INTMEM +
5079                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5080                        U64_LO(fp->rx_comp_mapping));
5081                 REG_WR(bp, BAR_USTRORM_INTMEM +
5082                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5083                        U64_HI(fp->rx_comp_mapping));
5084         }
5085 }
5086
5087 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5088 {
5089         int i, j;
5090
5091         for_each_queue(bp, j) {
5092                 struct bnx2x_fastpath *fp = &bp->fp[j];
5093
5094                 for (i = 1; i <= NUM_TX_RINGS; i++) {
5095                         struct eth_tx_next_bd *tx_next_bd =
5096                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5097
5098                         tx_next_bd->addr_hi =
5099                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5100                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5101                         tx_next_bd->addr_lo =
5102                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5103                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5104                 }
5105
5106                 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5107                 fp->tx_db.data.zero_fill1 = 0;
5108                 fp->tx_db.data.prod = 0;
5109
5110                 fp->tx_pkt_prod = 0;
5111                 fp->tx_pkt_cons = 0;
5112                 fp->tx_bd_prod = 0;
5113                 fp->tx_bd_cons = 0;
5114                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5115                 fp->tx_pkt = 0;
5116         }
5117 }
5118
5119 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5120 {
5121         int func = BP_FUNC(bp);
5122
5123         spin_lock_init(&bp->spq_lock);
5124
5125         bp->spq_left = MAX_SPQ_PENDING;
5126         bp->spq_prod_idx = 0;
5127         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5128         bp->spq_prod_bd = bp->spq;
5129         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5130
5131         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5132                U64_LO(bp->spq_mapping));
5133         REG_WR(bp,
5134                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5135                U64_HI(bp->spq_mapping));
5136
5137         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5138                bp->spq_prod_idx);
5139 }
5140
5141 static void bnx2x_init_context(struct bnx2x *bp)
5142 {
5143         int i;
5144
5145         /* Rx */
5146         for_each_queue(bp, i) {
5147                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5148                 struct bnx2x_fastpath *fp = &bp->fp[i];
5149                 u8 cl_id = fp->cl_id;
5150
5151                 context->ustorm_st_context.common.sb_index_numbers =
5152                                                 BNX2X_RX_SB_INDEX_NUM;
5153                 context->ustorm_st_context.common.clientId = cl_id;
5154                 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5155                 context->ustorm_st_context.common.flags =
5156                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5157                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5158                 context->ustorm_st_context.common.statistics_counter_id =
5159                                                 cl_id;
5160                 context->ustorm_st_context.common.mc_alignment_log_size =
5161                                                 BNX2X_RX_ALIGN_SHIFT;
5162                 context->ustorm_st_context.common.bd_buff_size =
5163                                                 bp->rx_buf_size;
5164                 context->ustorm_st_context.common.bd_page_base_hi =
5165                                                 U64_HI(fp->rx_desc_mapping);
5166                 context->ustorm_st_context.common.bd_page_base_lo =
5167                                                 U64_LO(fp->rx_desc_mapping);
5168                 if (!fp->disable_tpa) {
5169                         context->ustorm_st_context.common.flags |=
5170                                 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5171                         context->ustorm_st_context.common.sge_buff_size =
5172                                 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5173                                          (u32)0xffff);
5174                         context->ustorm_st_context.common.sge_page_base_hi =
5175                                                 U64_HI(fp->rx_sge_mapping);
5176                         context->ustorm_st_context.common.sge_page_base_lo =
5177                                                 U64_LO(fp->rx_sge_mapping);
5178
5179                         context->ustorm_st_context.common.max_sges_for_packet =
5180                                 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5181                         context->ustorm_st_context.common.max_sges_for_packet =
5182                                 ((context->ustorm_st_context.common.
5183                                   max_sges_for_packet + PAGES_PER_SGE - 1) &
5184                                  (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5185                 }
5186
5187                 context->ustorm_ag_context.cdu_usage =
5188                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5189                                                CDU_REGION_NUMBER_UCM_AG,
5190                                                ETH_CONNECTION_TYPE);
5191
5192                 context->xstorm_ag_context.cdu_reserved =
5193                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5194                                                CDU_REGION_NUMBER_XCM_AG,
5195                                                ETH_CONNECTION_TYPE);
5196         }
5197
5198         /* Tx */
5199         for_each_queue(bp, i) {
5200                 struct bnx2x_fastpath *fp = &bp->fp[i];
5201                 struct eth_context *context =
5202                         bnx2x_sp(bp, context[i].eth);
5203
5204                 context->cstorm_st_context.sb_index_number =
5205                                                 C_SB_ETH_TX_CQ_INDEX;
5206                 context->cstorm_st_context.status_block_id = fp->sb_id;
5207
5208                 context->xstorm_st_context.tx_bd_page_base_hi =
5209                                                 U64_HI(fp->tx_desc_mapping);
5210                 context->xstorm_st_context.tx_bd_page_base_lo =
5211                                                 U64_LO(fp->tx_desc_mapping);
5212                 context->xstorm_st_context.statistics_data = (fp->cl_id |
5213                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5214         }
5215 }
5216
5217 static void bnx2x_init_ind_table(struct bnx2x *bp)
5218 {
5219         int func = BP_FUNC(bp);
5220         int i;
5221
5222         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5223                 return;
5224
5225         DP(NETIF_MSG_IFUP,
5226            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
5227         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5228                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5229                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5230                         bp->fp->cl_id + (i % bp->num_queues));
5231 }
5232
5233 static void bnx2x_set_client_config(struct bnx2x *bp)
5234 {
5235         struct tstorm_eth_client_config tstorm_client = {0};
5236         int port = BP_PORT(bp);
5237         int i;
5238
5239         tstorm_client.mtu = bp->dev->mtu;
5240         tstorm_client.config_flags =
5241                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5242                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5243 #ifdef BCM_VLAN
5244         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5245                 tstorm_client.config_flags |=
5246                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5247                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5248         }
5249 #endif
5250
5251         for_each_queue(bp, i) {
5252                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5253
5254                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5255                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5256                        ((u32 *)&tstorm_client)[0]);
5257                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5258                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5259                        ((u32 *)&tstorm_client)[1]);
5260         }
5261
5262         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5263            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5264 }
5265
5266 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5267 {
5268         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5269         int mode = bp->rx_mode;
5270         int mask = bp->rx_mode_cl_mask;
5271         int func = BP_FUNC(bp);
5272         int port = BP_PORT(bp);
5273         int i;
5274         /* All but management unicast packets should pass to the host as well */
5275         u32 llh_mask =
5276                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5277                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5278                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5279                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5280
5281         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
5282
5283         switch (mode) {
5284         case BNX2X_RX_MODE_NONE: /* no Rx */
5285                 tstorm_mac_filter.ucast_drop_all = mask;
5286                 tstorm_mac_filter.mcast_drop_all = mask;
5287                 tstorm_mac_filter.bcast_drop_all = mask;
5288                 break;
5289
5290         case BNX2X_RX_MODE_NORMAL:
5291                 tstorm_mac_filter.bcast_accept_all = mask;
5292                 break;
5293
5294         case BNX2X_RX_MODE_ALLMULTI:
5295                 tstorm_mac_filter.mcast_accept_all = mask;
5296                 tstorm_mac_filter.bcast_accept_all = mask;
5297                 break;
5298
5299         case BNX2X_RX_MODE_PROMISC:
5300                 tstorm_mac_filter.ucast_accept_all = mask;
5301                 tstorm_mac_filter.mcast_accept_all = mask;
5302                 tstorm_mac_filter.bcast_accept_all = mask;
5303                 /* pass management unicast packets as well */
5304                 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5305                 break;
5306
5307         default:
5308                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5309                 break;
5310         }
5311
5312         REG_WR(bp,
5313                (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5314                llh_mask);
5315
5316         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5317                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5318                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5319                        ((u32 *)&tstorm_mac_filter)[i]);
5320
5321 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5322                    ((u32 *)&tstorm_mac_filter)[i]); */
5323         }
5324
5325         if (mode != BNX2X_RX_MODE_NONE)
5326                 bnx2x_set_client_config(bp);
5327 }
5328
5329 static void bnx2x_init_internal_common(struct bnx2x *bp)
5330 {
5331         int i;
5332
5333         /* Zero this manually as its initialization is
5334            currently missing in the initTool */
5335         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5336                 REG_WR(bp, BAR_USTRORM_INTMEM +
5337                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
5338 }
5339
5340 static void bnx2x_init_internal_port(struct bnx2x *bp)
5341 {
5342         int port = BP_PORT(bp);
5343
5344         REG_WR(bp,
5345                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5346         REG_WR(bp,
5347                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5348         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5349         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5350 }
5351
5352 static void bnx2x_init_internal_func(struct bnx2x *bp)
5353 {
5354         struct tstorm_eth_function_common_config tstorm_config = {0};
5355         struct stats_indication_flags stats_flags = {0};
5356         int port = BP_PORT(bp);
5357         int func = BP_FUNC(bp);
5358         int i, j;
5359         u32 offset;
5360         u16 max_agg_size;
5361
5362         if (is_multi(bp)) {
5363                 tstorm_config.config_flags = MULTI_FLAGS(bp);
5364                 tstorm_config.rss_result_mask = MULTI_MASK;
5365         }
5366
5367         /* Enable TPA if needed */
5368         if (bp->flags & TPA_ENABLE_FLAG)
5369                 tstorm_config.config_flags |=
5370                         TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5371
5372         if (IS_E1HMF(bp))
5373                 tstorm_config.config_flags |=
5374                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5375
5376         tstorm_config.leading_client_id = BP_L_ID(bp);
5377
5378         REG_WR(bp, BAR_TSTRORM_INTMEM +
5379                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5380                (*(u32 *)&tstorm_config));
5381
5382         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5383         bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
5384         bnx2x_set_storm_rx_mode(bp);
5385
5386         for_each_queue(bp, i) {
5387                 u8 cl_id = bp->fp[i].cl_id;
5388
5389                 /* reset xstorm per client statistics */
5390                 offset = BAR_XSTRORM_INTMEM +
5391                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5392                 for (j = 0;
5393                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5394                         REG_WR(bp, offset + j*4, 0);
5395
5396                 /* reset tstorm per client statistics */
5397                 offset = BAR_TSTRORM_INTMEM +
5398                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5399                 for (j = 0;
5400                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5401                         REG_WR(bp, offset + j*4, 0);
5402
5403                 /* reset ustorm per client statistics */
5404                 offset = BAR_USTRORM_INTMEM +
5405                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5406                 for (j = 0;
5407                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5408                         REG_WR(bp, offset + j*4, 0);
5409         }
5410
5411         /* Init statistics related context */
5412         stats_flags.collect_eth = 1;
5413
5414         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5415                ((u32 *)&stats_flags)[0]);
5416         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5417                ((u32 *)&stats_flags)[1]);
5418
5419         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5420                ((u32 *)&stats_flags)[0]);
5421         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5422                ((u32 *)&stats_flags)[1]);
5423
5424         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5425                ((u32 *)&stats_flags)[0]);
5426         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5427                ((u32 *)&stats_flags)[1]);
5428
5429         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5430                ((u32 *)&stats_flags)[0]);
5431         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5432                ((u32 *)&stats_flags)[1]);
5433
5434         REG_WR(bp, BAR_XSTRORM_INTMEM +
5435                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5436                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5437         REG_WR(bp, BAR_XSTRORM_INTMEM +
5438                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5439                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5440
5441         REG_WR(bp, BAR_TSTRORM_INTMEM +
5442                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5443                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5444         REG_WR(bp, BAR_TSTRORM_INTMEM +
5445                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5446                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5447
5448         REG_WR(bp, BAR_USTRORM_INTMEM +
5449                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5450                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5451         REG_WR(bp, BAR_USTRORM_INTMEM +
5452                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5453                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5454
5455         if (CHIP_IS_E1H(bp)) {
5456                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5457                         IS_E1HMF(bp));
5458                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5459                         IS_E1HMF(bp));
5460                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5461                         IS_E1HMF(bp));
5462                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5463                         IS_E1HMF(bp));
5464
5465                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5466                          bp->e1hov);
5467         }
5468
5469         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5470         max_agg_size =
5471                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5472                           SGE_PAGE_SIZE * PAGES_PER_SGE),
5473                     (u32)0xffff);
5474         for_each_queue(bp, i) {
5475                 struct bnx2x_fastpath *fp = &bp->fp[i];
5476
5477                 REG_WR(bp, BAR_USTRORM_INTMEM +
5478                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5479                        U64_LO(fp->rx_comp_mapping));
5480                 REG_WR(bp, BAR_USTRORM_INTMEM +
5481                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5482                        U64_HI(fp->rx_comp_mapping));
5483
5484                 /* Next page */
5485                 REG_WR(bp, BAR_USTRORM_INTMEM +
5486                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5487                        U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5488                 REG_WR(bp, BAR_USTRORM_INTMEM +
5489                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5490                        U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5491
5492                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5493                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5494                          max_agg_size);
5495         }
5496
5497         /* dropless flow control */
5498         if (CHIP_IS_E1H(bp)) {
5499                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5500
5501                 rx_pause.bd_thr_low = 250;
5502                 rx_pause.cqe_thr_low = 250;
5503                 rx_pause.cos = 1;
5504                 rx_pause.sge_thr_low = 0;
5505                 rx_pause.bd_thr_high = 350;
5506                 rx_pause.cqe_thr_high = 350;
5507                 rx_pause.sge_thr_high = 0;
5508
5509                 for_each_queue(bp, i) {
5510                         struct bnx2x_fastpath *fp = &bp->fp[i];
5511
5512                         if (!fp->disable_tpa) {
5513                                 rx_pause.sge_thr_low = 150;
5514                                 rx_pause.sge_thr_high = 250;
5515                         }
5516
5517
5518                         offset = BAR_USTRORM_INTMEM +
5519                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5520                                                                    fp->cl_id);
5521                         for (j = 0;
5522                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5523                              j++)
5524                                 REG_WR(bp, offset + j*4,
5525                                        ((u32 *)&rx_pause)[j]);
5526                 }
5527         }
5528
5529         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5530
5531         /* Init rate shaping and fairness contexts */
5532         if (IS_E1HMF(bp)) {
5533                 int vn;
5534
5535                 /* During init there is no active link
5536                    Until link is up, set link rate to 10Gbps */
5537                 bp->link_vars.line_speed = SPEED_10000;
5538                 bnx2x_init_port_minmax(bp);
5539
5540                 if (!BP_NOMCP(bp))
5541                         bp->mf_config =
5542                               SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
5543                 bnx2x_calc_vn_weight_sum(bp);
5544
5545                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5546                         bnx2x_init_vn_minmax(bp, 2*vn + port);
5547
5548                 /* Enable rate shaping and fairness */
5549                 bp->cmng.flags.cmng_enables |=
5550                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5551
5552         } else {
5553                 /* rate shaping and fairness are disabled */
5554                 DP(NETIF_MSG_IFUP,
5555                    "single function mode  minmax will be disabled\n");
5556         }
5557
5558
5559         /* Store it to internal memory */
5560         if (bp->port.pmf)
5561                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5562                         REG_WR(bp, BAR_XSTRORM_INTMEM +
5563                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5564                                ((u32 *)(&bp->cmng))[i]);
5565 }
5566
5567 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5568 {
5569         switch (load_code) {
5570         case FW_MSG_CODE_DRV_LOAD_COMMON:
5571                 bnx2x_init_internal_common(bp);
5572                 /* no break */
5573
5574         case FW_MSG_CODE_DRV_LOAD_PORT:
5575                 bnx2x_init_internal_port(bp);
5576                 /* no break */
5577
5578         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5579                 bnx2x_init_internal_func(bp);
5580                 break;
5581
5582         default:
5583                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5584                 break;
5585         }
5586 }
5587
5588 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5589 {
5590         int i;
5591
5592         for_each_queue(bp, i) {
5593                 struct bnx2x_fastpath *fp = &bp->fp[i];
5594
5595                 fp->bp = bp;
5596                 fp->state = BNX2X_FP_STATE_CLOSED;
5597                 fp->index = i;
5598                 fp->cl_id = BP_L_ID(bp) + i;
5599 #ifdef BCM_CNIC
5600                 fp->sb_id = fp->cl_id + 1;
5601 #else
5602                 fp->sb_id = fp->cl_id;
5603 #endif
5604                 DP(NETIF_MSG_IFUP,
5605                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
5606                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5607                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5608                               fp->sb_id);
5609                 bnx2x_update_fpsb_idx(fp);
5610         }
5611
5612         /* ensure status block indices were read */
5613         rmb();
5614
5615
5616         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5617                           DEF_SB_ID);
5618         bnx2x_update_dsb_idx(bp);
5619         bnx2x_update_coalesce(bp);
5620         bnx2x_init_rx_rings(bp);
5621         bnx2x_init_tx_ring(bp);
5622         bnx2x_init_sp_ring(bp);
5623         bnx2x_init_context(bp);
5624         bnx2x_init_internal(bp, load_code);
5625         bnx2x_init_ind_table(bp);
5626         bnx2x_stats_init(bp);
5627
5628         /* At this point, we are ready for interrupts */
5629         atomic_set(&bp->intr_sem, 0);
5630
5631         /* flush all before enabling interrupts */
5632         mb();
5633         mmiowb();
5634
5635         bnx2x_int_enable(bp);
5636
5637         /* Check for SPIO5 */
5638         bnx2x_attn_int_deasserted0(bp,
5639                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5640                                    AEU_INPUTS_ATTN_BITS_SPIO5);
5641 }
5642
5643 /* end of nic init */
5644
5645 /*
5646  * gzip service functions
5647  */
5648
5649 static int bnx2x_gunzip_init(struct bnx2x *bp)
5650 {
5651         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5652                                               &bp->gunzip_mapping);
5653         if (bp->gunzip_buf  == NULL)
5654                 goto gunzip_nomem1;
5655
5656         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5657         if (bp->strm  == NULL)
5658                 goto gunzip_nomem2;
5659
5660         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5661                                       GFP_KERNEL);
5662         if (bp->strm->workspace == NULL)
5663                 goto gunzip_nomem3;
5664
5665         return 0;
5666
5667 gunzip_nomem3:
5668         kfree(bp->strm);
5669         bp->strm = NULL;
5670
5671 gunzip_nomem2:
5672         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5673                             bp->gunzip_mapping);
5674         bp->gunzip_buf = NULL;
5675
5676 gunzip_nomem1:
5677         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5678                " un-compression\n", bp->dev->name);
5679         return -ENOMEM;
5680 }
5681
5682 static void bnx2x_gunzip_end(struct bnx2x *bp)
5683 {
5684         kfree(bp->strm->workspace);
5685
5686         kfree(bp->strm);
5687         bp->strm = NULL;
5688
5689         if (bp->gunzip_buf) {
5690                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5691                                     bp->gunzip_mapping);
5692                 bp->gunzip_buf = NULL;
5693         }
5694 }
5695
5696 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5697 {
5698         int n, rc;
5699
5700         /* check gzip header */
5701         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5702                 BNX2X_ERR("Bad gzip header\n");
5703                 return -EINVAL;
5704         }
5705
5706         n = 10;
5707
5708 #define FNAME                           0x8
5709
5710         if (zbuf[3] & FNAME)
5711                 while ((zbuf[n++] != 0) && (n < len));
5712
5713         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5714         bp->strm->avail_in = len - n;
5715         bp->strm->next_out = bp->gunzip_buf;
5716         bp->strm->avail_out = FW_BUF_SIZE;
5717
5718         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5719         if (rc != Z_OK)
5720                 return rc;
5721
5722         rc = zlib_inflate(bp->strm, Z_FINISH);
5723         if ((rc != Z_OK) && (rc != Z_STREAM_END))
5724                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5725                        bp->dev->name, bp->strm->msg);
5726
5727         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5728         if (bp->gunzip_outlen & 0x3)
5729                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5730                                     " gunzip_outlen (%d) not aligned\n",
5731                        bp->dev->name, bp->gunzip_outlen);
5732         bp->gunzip_outlen >>= 2;
5733
5734         zlib_inflateEnd(bp->strm);
5735
5736         if (rc == Z_STREAM_END)
5737                 return 0;
5738
5739         return rc;
5740 }
5741
5742 /* nic load/unload */
5743
5744 /*
5745  * General service functions
5746  */
5747
5748 /* send a NIG loopback debug packet */
5749 static void bnx2x_lb_pckt(struct bnx2x *bp)
5750 {
5751         u32 wb_write[3];
5752
5753         /* Ethernet source and destination addresses */
5754         wb_write[0] = 0x55555555;
5755         wb_write[1] = 0x55555555;
5756         wb_write[2] = 0x20;             /* SOP */
5757         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5758
5759         /* NON-IP protocol */
5760         wb_write[0] = 0x09000000;
5761         wb_write[1] = 0x55555555;
5762         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
5763         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5764 }
5765
5766 /* some of the internal memories
5767  * are not directly readable from the driver
5768  * to test them we send debug packets
5769  */
5770 static int bnx2x_int_mem_test(struct bnx2x *bp)
5771 {
5772         int factor;
5773         int count, i;
5774         u32 val = 0;
5775
5776         if (CHIP_REV_IS_FPGA(bp))
5777                 factor = 120;
5778         else if (CHIP_REV_IS_EMUL(bp))
5779                 factor = 200;
5780         else
5781                 factor = 1;
5782
5783         DP(NETIF_MSG_HW, "start part1\n");
5784
5785         /* Disable inputs of parser neighbor blocks */
5786         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5787         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5788         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5789         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5790
5791         /*  Write 0 to parser credits for CFC search request */
5792         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5793
5794         /* send Ethernet packet */
5795         bnx2x_lb_pckt(bp);
5796
5797         /* TODO do i reset NIG statistic? */
5798         /* Wait until NIG register shows 1 packet of size 0x10 */
5799         count = 1000 * factor;
5800         while (count) {
5801
5802                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5803                 val = *bnx2x_sp(bp, wb_data[0]);
5804                 if (val == 0x10)
5805                         break;
5806
5807                 msleep(10);
5808                 count--;
5809         }
5810         if (val != 0x10) {
5811                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5812                 return -1;
5813         }
5814
5815         /* Wait until PRS register shows 1 packet */
5816         count = 1000 * factor;
5817         while (count) {
5818                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5819                 if (val == 1)
5820                         break;
5821
5822                 msleep(10);
5823                 count--;
5824         }
5825         if (val != 0x1) {
5826                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5827                 return -2;
5828         }
5829
5830         /* Reset and init BRB, PRS */
5831         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5832         msleep(50);
5833         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5834         msleep(50);
5835         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5836         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5837
5838         DP(NETIF_MSG_HW, "part2\n");
5839
5840         /* Disable inputs of parser neighbor blocks */
5841         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5842         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5843         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5844         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5845
5846         /* Write 0 to parser credits for CFC search request */
5847         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5848
5849         /* send 10 Ethernet packets */
5850         for (i = 0; i < 10; i++)
5851                 bnx2x_lb_pckt(bp);
5852
5853         /* Wait until NIG register shows 10 + 1
5854            packets of size 11*0x10 = 0xb0 */
5855         count = 1000 * factor;
5856         while (count) {
5857
5858                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5859                 val = *bnx2x_sp(bp, wb_data[0]);
5860                 if (val == 0xb0)
5861                         break;
5862
5863                 msleep(10);
5864                 count--;
5865         }
5866         if (val != 0xb0) {
5867                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5868                 return -3;
5869         }
5870
5871         /* Wait until PRS register shows 2 packets */
5872         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5873         if (val != 2)
5874                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5875
5876         /* Write 1 to parser credits for CFC search request */
5877         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5878
5879         /* Wait until PRS register shows 3 packets */
5880         msleep(10 * factor);
5881         /* Wait until NIG register shows 1 packet of size 0x10 */
5882         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5883         if (val != 3)
5884                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5885
5886         /* clear NIG EOP FIFO */
5887         for (i = 0; i < 11; i++)
5888                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5889         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5890         if (val != 1) {
5891                 BNX2X_ERR("clear of NIG failed\n");
5892                 return -4;
5893         }
5894
5895         /* Reset and init BRB, PRS, NIG */
5896         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5897         msleep(50);
5898         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5899         msleep(50);
5900         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5901         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5902 #ifndef BCM_CNIC
5903         /* set NIC mode */
5904         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5905 #endif
5906
5907         /* Enable inputs of parser neighbor blocks */
5908         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5909         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5910         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5911         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5912
5913         DP(NETIF_MSG_HW, "done\n");
5914
5915         return 0; /* OK */
5916 }
5917
5918 static void enable_blocks_attention(struct bnx2x *bp)
5919 {
5920         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5921         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5922         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5923         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5924         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5925         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5926         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5927         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5928         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5929 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5930 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5931         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5932         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5933         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5934 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5935 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5936         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5937         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5938         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5939         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5940 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5941 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5942         if (CHIP_REV_IS_FPGA(bp))
5943                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5944         else
5945                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5946         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5947         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5948         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5949 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5950 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5951         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5952         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5953 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5954         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5955 }
5956
5957
5958 static void bnx2x_reset_common(struct bnx2x *bp)
5959 {
5960         /* reset_common */
5961         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5962                0xd3ffff7f);
5963         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5964 }
5965
5966 static void bnx2x_init_pxp(struct bnx2x *bp)
5967 {
5968         u16 devctl;
5969         int r_order, w_order;
5970
5971         pci_read_config_word(bp->pdev,
5972                              bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
5973         DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
5974         w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5975         if (bp->mrrs == -1)
5976                 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5977         else {
5978                 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
5979                 r_order = bp->mrrs;
5980         }
5981
5982         bnx2x_init_pxp_arb(bp, r_order, w_order);
5983 }
5984
5985 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5986 {
5987         u32 val;
5988         u8 port;
5989         u8 is_required = 0;
5990
5991         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5992               SHARED_HW_CFG_FAN_FAILURE_MASK;
5993
5994         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5995                 is_required = 1;
5996
5997         /*
5998          * The fan failure mechanism is usually related to the PHY type since
5999          * the power consumption of the board is affected by the PHY. Currently,
6000          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6001          */
6002         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6003                 for (port = PORT_0; port < PORT_MAX; port++) {
6004                         u32 phy_type =
6005                                 SHMEM_RD(bp, dev_info.port_hw_config[port].
6006                                          external_phy_config) &
6007                                 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6008                         is_required |=
6009                                 ((phy_type ==
6010                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
6011                                  (phy_type ==
6012                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
6013                                  (phy_type ==
6014                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6015                 }
6016
6017         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6018
6019         if (is_required == 0)
6020                 return;
6021
6022         /* Fan failure is indicated by SPIO 5 */
6023         bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6024                        MISC_REGISTERS_SPIO_INPUT_HI_Z);
6025
6026         /* set to active low mode */
6027         val = REG_RD(bp, MISC_REG_SPIO_INT);
6028         val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6029                                 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6030         REG_WR(bp, MISC_REG_SPIO_INT, val);
6031
6032         /* enable interrupt to signal the IGU */
6033         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6034         val |= (1 << MISC_REGISTERS_SPIO_5);
6035         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6036 }
6037
6038 static int bnx2x_init_common(struct bnx2x *bp)
6039 {
6040         u32 val, i;
6041 #ifdef BCM_CNIC
6042         u32 wb_write[2];
6043 #endif
6044
6045         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
6046
6047         bnx2x_reset_common(bp);
6048         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6049         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
6050
6051         bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
6052         if (CHIP_IS_E1H(bp))
6053                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
6054
6055         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6056         msleep(30);
6057         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6058
6059         bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
6060         if (CHIP_IS_E1(bp)) {
6061                 /* enable HW interrupt from PXP on USDM overflow
6062                    bit 16 on INT_MASK_0 */
6063                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6064         }
6065
6066         bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
6067         bnx2x_init_pxp(bp);
6068
6069 #ifdef __BIG_ENDIAN
6070         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6071         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6072         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6073         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6074         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6075         /* make sure this value is 0 */
6076         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6077
6078 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6079         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6080         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6081         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6082         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6083 #endif
6084
6085         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
6086 #ifdef BCM_CNIC
6087         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6088         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6089         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6090 #endif
6091
6092         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6093                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6094
6095         /* let the HW do it's magic ... */
6096         msleep(100);
6097         /* finish PXP init */
6098         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6099         if (val != 1) {
6100                 BNX2X_ERR("PXP2 CFG failed\n");
6101                 return -EBUSY;
6102         }
6103         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6104         if (val != 1) {
6105                 BNX2X_ERR("PXP2 RD_INIT failed\n");
6106                 return -EBUSY;
6107         }
6108
6109         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6110         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6111
6112         bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
6113
6114         /* clean the DMAE memory */
6115         bp->dmae_ready = 1;
6116         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6117
6118         bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6119         bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6120         bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6121         bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
6122
6123         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6124         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6125         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6126         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6127
6128         bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
6129
6130 #ifdef BCM_CNIC
6131         wb_write[0] = 0;
6132         wb_write[1] = 0;
6133         for (i = 0; i < 64; i++) {
6134                 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6135                 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6136
6137                 if (CHIP_IS_E1H(bp)) {
6138                         REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6139                         bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6140                                           wb_write, 2);
6141                 }
6142         }
6143 #endif
6144         /* soft reset pulse */
6145         REG_WR(bp, QM_REG_SOFT_RESET, 1);
6146         REG_WR(bp, QM_REG_SOFT_RESET, 0);
6147
6148 #ifdef BCM_CNIC
6149         bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
6150 #endif
6151
6152         bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
6153         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6154         if (!CHIP_REV_IS_SLOW(bp)) {
6155                 /* enable hw interrupt from doorbell Q */
6156                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6157         }
6158
6159         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6160         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6161         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6162 #ifndef BCM_CNIC
6163         /* set NIC mode */
6164         REG_WR(bp, PRS_REG_NIC_MODE, 1);
6165 #endif
6166         if (CHIP_IS_E1H(bp))
6167                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
6168
6169         bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6170         bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6171         bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6172         bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
6173
6174         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6175         bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6176         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6177         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6178
6179         bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6180         bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6181         bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6182         bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
6183
6184         /* sync semi rtc */
6185         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6186                0x80000000);
6187         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6188                0x80000000);
6189
6190         bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6191         bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6192         bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
6193
6194         REG_WR(bp, SRC_REG_SOFT_RST, 1);
6195         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6196                 REG_WR(bp, i, 0xc0cac01a);
6197                 /* TODO: replace with something meaningful */
6198         }
6199         bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6200 #ifdef BCM_CNIC
6201         REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6202         REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6203         REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6204         REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6205         REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6206         REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6207         REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6208         REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6209         REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6210         REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6211 #endif
6212         REG_WR(bp, SRC_REG_SOFT_RST, 0);
6213
6214         if (sizeof(union cdu_context) != 1024)
6215                 /* we currently assume that a context is 1024 bytes */
6216                 printk(KERN_ALERT PFX "please adjust the size of"
6217                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
6218
6219         bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
6220         val = (4 << 24) + (0 << 12) + 1024;
6221         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6222
6223         bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
6224         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6225         /* enable context validation interrupt from CFC */
6226         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6227
6228         /* set the thresholds to prevent CFC/CDU race */
6229         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6230
6231         bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6232         bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
6233
6234         bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
6235         /* Reset PCIE errors for debug */
6236         REG_WR(bp, 0x2814, 0xffffffff);
6237         REG_WR(bp, 0x3820, 0xffffffff);
6238
6239         bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
6240         bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
6241         bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
6242         bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
6243
6244         bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
6245         if (CHIP_IS_E1H(bp)) {
6246                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6247                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6248         }
6249
6250         if (CHIP_REV_IS_SLOW(bp))
6251                 msleep(200);
6252
6253         /* finish CFC init */
6254         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6255         if (val != 1) {
6256                 BNX2X_ERR("CFC LL_INIT failed\n");
6257                 return -EBUSY;
6258         }
6259         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6260         if (val != 1) {
6261                 BNX2X_ERR("CFC AC_INIT failed\n");
6262                 return -EBUSY;
6263         }
6264         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6265         if (val != 1) {
6266                 BNX2X_ERR("CFC CAM_INIT failed\n");
6267                 return -EBUSY;
6268         }
6269         REG_WR(bp, CFC_REG_DEBUG0, 0);
6270
6271         /* read NIG statistic
6272            to see if this is our first up since powerup */
6273         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6274         val = *bnx2x_sp(bp, wb_data[0]);
6275
6276         /* do internal memory self test */
6277         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6278                 BNX2X_ERR("internal mem self test failed\n");
6279                 return -EBUSY;
6280         }
6281
6282         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6283         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6284         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6285         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6286         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6287                 bp->port.need_hw_lock = 1;
6288                 break;
6289
6290         default:
6291                 break;
6292         }
6293
6294         bnx2x_setup_fan_failure_detection(bp);
6295
6296         /* clear PXP2 attentions */
6297         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6298
6299         enable_blocks_attention(bp);
6300
6301         if (!BP_NOMCP(bp)) {
6302                 bnx2x_acquire_phy_lock(bp);
6303                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6304                 bnx2x_release_phy_lock(bp);
6305         } else
6306                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6307
6308         return 0;
6309 }
6310
6311 static int bnx2x_init_port(struct bnx2x *bp)
6312 {
6313         int port = BP_PORT(bp);
6314         int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
6315         u32 low, high;
6316         u32 val;
6317
6318         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
6319
6320         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6321
6322         bnx2x_init_block(bp, PXP_BLOCK, init_stage);
6323         bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
6324
6325         bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6326         bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6327         bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6328         bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6329
6330 #ifdef BCM_CNIC
6331         REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
6332
6333         bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6334         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6335         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
6336 #endif
6337         bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6338
6339         bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
6340         if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6341                 /* no pause for emulation and FPGA */
6342                 low = 0;
6343                 high = 513;
6344         } else {
6345                 if (IS_E1HMF(bp))
6346                         low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6347                 else if (bp->dev->mtu > 4096) {
6348                         if (bp->flags & ONE_PORT_FLAG)
6349                                 low = 160;
6350                         else {
6351                                 val = bp->dev->mtu;
6352                                 /* (24*1024 + val*4)/256 */
6353                                 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6354                         }
6355                 } else
6356                         low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6357                 high = low + 56;        /* 14*1024/256 */
6358         }
6359         REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6360         REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6361
6362
6363         bnx2x_init_block(bp, PRS_BLOCK, init_stage);
6364
6365         bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
6366         bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
6367         bnx2x_init_block(bp, USDM_BLOCK, init_stage);
6368         bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
6369
6370         bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6371         bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6372         bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6373         bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
6374
6375         bnx2x_init_block(bp, UPB_BLOCK, init_stage);
6376         bnx2x_init_block(bp, XPB_BLOCK, init_stage);
6377
6378         bnx2x_init_block(bp, PBF_BLOCK, init_stage);
6379
6380         /* configure PBF to work without PAUSE mtu 9000 */
6381         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6382
6383         /* update threshold */
6384         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6385         /* update init credit */
6386         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6387
6388         /* probe changes */
6389         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6390         msleep(5);
6391         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6392
6393 #ifdef BCM_CNIC
6394         bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
6395 #endif
6396         bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6397         bnx2x_init_block(bp, CFC_BLOCK, init_stage);
6398
6399         if (CHIP_IS_E1(bp)) {
6400                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6401                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6402         }
6403         bnx2x_init_block(bp, HC_BLOCK, init_stage);
6404
6405         bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6406         /* init aeu_mask_attn_func_0/1:
6407          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6408          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6409          *             bits 4-7 are used for "per vn group attention" */
6410         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6411                (IS_E1HMF(bp) ? 0xF7 : 0x7));
6412
6413         bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6414         bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6415         bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6416         bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6417         bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6418
6419         bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6420
6421         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6422
6423         if (CHIP_IS_E1H(bp)) {
6424                 /* 0x2 disable e1hov, 0x1 enable */
6425                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6426                        (IS_E1HMF(bp) ? 0x1 : 0x2));
6427
6428                 {
6429                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6430                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6431                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6432                 }
6433         }
6434
6435         bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6436         bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6437
6438         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6439         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6440                 {
6441                 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6442
6443                 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6444                                MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6445
6446                 /* The GPIO should be swapped if the swap register is
6447                    set and active */
6448                 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6449                 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6450
6451                 /* Select function upon port-swap configuration */
6452                 if (port == 0) {
6453                         offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6454                         aeu_gpio_mask = (swap_val && swap_override) ?
6455                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6456                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6457                 } else {
6458                         offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6459                         aeu_gpio_mask = (swap_val && swap_override) ?
6460                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6461                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6462                 }
6463                 val = REG_RD(bp, offset);
6464                 /* add GPIO3 to group */
6465                 val |= aeu_gpio_mask;
6466                 REG_WR(bp, offset, val);
6467                 }
6468                 break;
6469
6470         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6471         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6472                 /* add SPIO 5 to group 0 */
6473                 {
6474                 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6475                                        MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6476                 val = REG_RD(bp, reg_addr);
6477                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6478                 REG_WR(bp, reg_addr, val);
6479                 }
6480                 break;
6481
6482         default:
6483                 break;
6484         }
6485
6486         bnx2x__link_reset(bp);
6487
6488         return 0;
6489 }
6490
6491 #define ILT_PER_FUNC            (768/2)
6492 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
6493 /* the phys address is shifted right 12 bits and has an added
6494    1=valid bit added to the 53rd bit
6495    then since this is a wide register(TM)
6496    we split it into two 32 bit writes
6497  */
6498 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6499 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
6500 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
6501 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
6502
6503 #ifdef BCM_CNIC
6504 #define CNIC_ILT_LINES          127
6505 #define CNIC_CTX_PER_ILT        16
6506 #else
6507 #define CNIC_ILT_LINES          0
6508 #endif
6509
6510 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6511 {
6512         int reg;
6513
6514         if (CHIP_IS_E1H(bp))
6515                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6516         else /* E1 */
6517                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6518
6519         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6520 }
6521
6522 static int bnx2x_init_func(struct bnx2x *bp)
6523 {
6524         int port = BP_PORT(bp);
6525         int func = BP_FUNC(bp);
6526         u32 addr, val;
6527         int i;
6528
6529         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
6530
6531         /* set MSI reconfigure capability */
6532         addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6533         val = REG_RD(bp, addr);
6534         val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6535         REG_WR(bp, addr, val);
6536
6537         i = FUNC_ILT_BASE(func);
6538
6539         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6540         if (CHIP_IS_E1H(bp)) {
6541                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6542                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6543         } else /* E1 */
6544                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6545                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6546
6547 #ifdef BCM_CNIC
6548         i += 1 + CNIC_ILT_LINES;
6549         bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6550         if (CHIP_IS_E1(bp))
6551                 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6552         else {
6553                 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6554                 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6555         }
6556
6557         i++;
6558         bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6559         if (CHIP_IS_E1(bp))
6560                 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6561         else {
6562                 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
6563                 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
6564         }
6565
6566         i++;
6567         bnx2x_ilt_wr(bp, i, bp->t1_mapping);
6568         if (CHIP_IS_E1(bp))
6569                 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6570         else {
6571                 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
6572                 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
6573         }
6574
6575         /* tell the searcher where the T2 table is */
6576         REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
6577
6578         bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
6579                     U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
6580
6581         bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
6582                     U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
6583                     U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
6584
6585         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
6586 #endif
6587
6588         if (CHIP_IS_E1H(bp)) {
6589                 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6590                 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6591                 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6592                 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6593                 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6594                 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6595                 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6596                 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6597                 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
6598
6599                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6600                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6601         }
6602
6603         /* HC init per function */
6604         if (CHIP_IS_E1H(bp)) {
6605                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6606
6607                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6608                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6609         }
6610         bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
6611
6612         /* Reset PCIE errors for debug */
6613         REG_WR(bp, 0x2114, 0xffffffff);
6614         REG_WR(bp, 0x2120, 0xffffffff);
6615
6616         return 0;
6617 }
6618
6619 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6620 {
6621         int i, rc = 0;
6622
6623         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
6624            BP_FUNC(bp), load_code);
6625
6626         bp->dmae_ready = 0;
6627         mutex_init(&bp->dmae_mutex);
6628         rc = bnx2x_gunzip_init(bp);
6629         if (rc)
6630                 return rc;
6631
6632         switch (load_code) {
6633         case FW_MSG_CODE_DRV_LOAD_COMMON:
6634                 rc = bnx2x_init_common(bp);
6635                 if (rc)
6636                         goto init_hw_err;
6637                 /* no break */
6638
6639         case FW_MSG_CODE_DRV_LOAD_PORT:
6640                 bp->dmae_ready = 1;
6641                 rc = bnx2x_init_port(bp);
6642                 if (rc)
6643                         goto init_hw_err;
6644                 /* no break */
6645
6646         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6647                 bp->dmae_ready = 1;
6648                 rc = bnx2x_init_func(bp);
6649                 if (rc)
6650                         goto init_hw_err;
6651                 break;
6652
6653         default:
6654                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6655                 break;
6656         }
6657
6658         if (!BP_NOMCP(bp)) {
6659                 int func = BP_FUNC(bp);
6660
6661                 bp->fw_drv_pulse_wr_seq =
6662                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6663                                  DRV_PULSE_SEQ_MASK);
6664                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6665         }
6666
6667         /* this needs to be done before gunzip end */
6668         bnx2x_zero_def_sb(bp);
6669         for_each_queue(bp, i)
6670                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6671 #ifdef BCM_CNIC
6672         bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6673 #endif
6674
6675 init_hw_err:
6676         bnx2x_gunzip_end(bp);
6677
6678         return rc;
6679 }
6680
6681 static void bnx2x_free_mem(struct bnx2x *bp)
6682 {
6683
6684 #define BNX2X_PCI_FREE(x, y, size) \
6685         do { \
6686                 if (x) { \
6687                         pci_free_consistent(bp->pdev, size, x, y); \
6688                         x = NULL; \
6689                         y = 0; \
6690                 } \
6691         } while (0)
6692
6693 #define BNX2X_FREE(x) \
6694         do { \
6695                 if (x) { \
6696                         vfree(x); \
6697                         x = NULL; \
6698                 } \
6699         } while (0)
6700
6701         int i;
6702
6703         /* fastpath */
6704         /* Common */
6705         for_each_queue(bp, i) {
6706
6707                 /* status blocks */
6708                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6709                                bnx2x_fp(bp, i, status_blk_mapping),
6710                                sizeof(struct host_status_block));
6711         }
6712         /* Rx */
6713         for_each_queue(bp, i) {
6714
6715                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6716                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6717                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6718                                bnx2x_fp(bp, i, rx_desc_mapping),
6719                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
6720
6721                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6722                                bnx2x_fp(bp, i, rx_comp_mapping),
6723                                sizeof(struct eth_fast_path_rx_cqe) *
6724                                NUM_RCQ_BD);
6725
6726                 /* SGE ring */
6727                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6728                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6729                                bnx2x_fp(bp, i, rx_sge_mapping),
6730                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6731         }
6732         /* Tx */
6733         for_each_queue(bp, i) {
6734
6735                 /* fastpath tx rings: tx_buf tx_desc */
6736                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6737                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6738                                bnx2x_fp(bp, i, tx_desc_mapping),
6739                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6740         }
6741         /* end of fastpath */
6742
6743         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6744                        sizeof(struct host_def_status_block));
6745
6746         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6747                        sizeof(struct bnx2x_slowpath));
6748
6749 #ifdef BCM_CNIC
6750         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6751         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6752         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6753         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6754         BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
6755                        sizeof(struct host_status_block));
6756 #endif
6757         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6758
6759 #undef BNX2X_PCI_FREE
6760 #undef BNX2X_KFREE
6761 }
6762
6763 static int bnx2x_alloc_mem(struct bnx2x *bp)
6764 {
6765
6766 #define BNX2X_PCI_ALLOC(x, y, size) \
6767         do { \
6768                 x = pci_alloc_consistent(bp->pdev, size, y); \
6769                 if (x == NULL) \
6770                         goto alloc_mem_err; \
6771                 memset(x, 0, size); \
6772         } while (0)
6773
6774 #define BNX2X_ALLOC(x, size) \
6775         do { \
6776                 x = vmalloc(size); \
6777                 if (x == NULL) \
6778                         goto alloc_mem_err; \
6779                 memset(x, 0, size); \
6780         } while (0)
6781
6782         int i;
6783
6784         /* fastpath */
6785         /* Common */
6786         for_each_queue(bp, i) {
6787                 bnx2x_fp(bp, i, bp) = bp;
6788
6789                 /* status blocks */
6790                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6791                                 &bnx2x_fp(bp, i, status_blk_mapping),
6792                                 sizeof(struct host_status_block));
6793         }
6794         /* Rx */
6795         for_each_queue(bp, i) {
6796
6797                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6798                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6799                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6800                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6801                                 &bnx2x_fp(bp, i, rx_desc_mapping),
6802                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6803
6804                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6805                                 &bnx2x_fp(bp, i, rx_comp_mapping),
6806                                 sizeof(struct eth_fast_path_rx_cqe) *
6807                                 NUM_RCQ_BD);
6808
6809                 /* SGE ring */
6810                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6811                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6812                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6813                                 &bnx2x_fp(bp, i, rx_sge_mapping),
6814                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6815         }
6816         /* Tx */
6817         for_each_queue(bp, i) {
6818
6819                 /* fastpath tx rings: tx_buf tx_desc */
6820                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6821                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6822                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6823                                 &bnx2x_fp(bp, i, tx_desc_mapping),
6824                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6825         }
6826         /* end of fastpath */
6827
6828         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6829                         sizeof(struct host_def_status_block));
6830
6831         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6832                         sizeof(struct bnx2x_slowpath));
6833
6834 #ifdef BCM_CNIC
6835         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6836
6837         /* allocate searcher T2 table
6838            we allocate 1/4 of alloc num for T2
6839           (which is not entered into the ILT) */
6840         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6841
6842         /* Initialize T2 (for 1024 connections) */
6843         for (i = 0; i < 16*1024; i += 64)
6844                 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6845
6846         /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
6847         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6848
6849         /* QM queues (128*MAX_CONN) */
6850         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6851
6852         BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
6853                         sizeof(struct host_status_block));
6854 #endif
6855
6856         /* Slow path ring */
6857         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6858
6859         return 0;
6860
6861 alloc_mem_err:
6862         bnx2x_free_mem(bp);
6863         return -ENOMEM;
6864
6865 #undef BNX2X_PCI_ALLOC
6866 #undef BNX2X_ALLOC
6867 }
6868
6869 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6870 {
6871         int i;
6872
6873         for_each_queue(bp, i) {
6874                 struct bnx2x_fastpath *fp = &bp->fp[i];
6875
6876                 u16 bd_cons = fp->tx_bd_cons;
6877                 u16 sw_prod = fp->tx_pkt_prod;
6878                 u16 sw_cons = fp->tx_pkt_cons;
6879
6880                 while (sw_cons != sw_prod) {
6881                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6882                         sw_cons++;
6883                 }
6884         }
6885 }
6886
6887 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6888 {
6889         int i, j;
6890
6891         for_each_queue(bp, j) {
6892                 struct bnx2x_fastpath *fp = &bp->fp[j];
6893
6894                 for (i = 0; i < NUM_RX_BD; i++) {
6895                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6896                         struct sk_buff *skb = rx_buf->skb;
6897
6898                         if (skb == NULL)
6899                                 continue;
6900
6901                         pci_unmap_single(bp->pdev,
6902                                          pci_unmap_addr(rx_buf, mapping),
6903                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6904
6905                         rx_buf->skb = NULL;
6906                         dev_kfree_skb(skb);
6907                 }
6908                 if (!fp->disable_tpa)
6909                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6910                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
6911                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
6912         }
6913 }
6914
6915 static void bnx2x_free_skbs(struct bnx2x *bp)
6916 {
6917         bnx2x_free_tx_skbs(bp);
6918         bnx2x_free_rx_skbs(bp);
6919 }
6920
6921 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6922 {
6923         int i, offset = 1;
6924
6925         free_irq(bp->msix_table[0].vector, bp->dev);
6926         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6927            bp->msix_table[0].vector);
6928
6929 #ifdef BCM_CNIC
6930         offset++;
6931 #endif
6932         for_each_queue(bp, i) {
6933                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
6934                    "state %x\n", i, bp->msix_table[i + offset].vector,
6935                    bnx2x_fp(bp, i, state));
6936
6937                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6938         }
6939 }
6940
6941 static void bnx2x_free_irq(struct bnx2x *bp)
6942 {
6943         if (bp->flags & USING_MSIX_FLAG) {
6944                 bnx2x_free_msix_irqs(bp);
6945                 pci_disable_msix(bp->pdev);
6946                 bp->flags &= ~USING_MSIX_FLAG;
6947
6948         } else if (bp->flags & USING_MSI_FLAG) {
6949                 free_irq(bp->pdev->irq, bp->dev);
6950                 pci_disable_msi(bp->pdev);
6951                 bp->flags &= ~USING_MSI_FLAG;
6952
6953         } else
6954                 free_irq(bp->pdev->irq, bp->dev);
6955 }
6956
6957 static int bnx2x_enable_msix(struct bnx2x *bp)
6958 {
6959         int i, rc, offset = 1;
6960         int igu_vec = 0;
6961
6962         bp->msix_table[0].entry = igu_vec;
6963         DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6964
6965 #ifdef BCM_CNIC
6966         igu_vec = BP_L_ID(bp) + offset;
6967         bp->msix_table[1].entry = igu_vec;
6968         DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
6969         offset++;
6970 #endif
6971         for_each_queue(bp, i) {
6972                 igu_vec = BP_L_ID(bp) + offset + i;
6973                 bp->msix_table[i + offset].entry = igu_vec;
6974                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6975                    "(fastpath #%u)\n", i + offset, igu_vec, i);
6976         }
6977
6978         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6979                              BNX2X_NUM_QUEUES(bp) + offset);
6980         if (rc) {
6981                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
6982                 return rc;
6983         }
6984
6985         bp->flags |= USING_MSIX_FLAG;
6986
6987         return 0;
6988 }
6989
6990 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6991 {
6992         int i, rc, offset = 1;
6993
6994         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6995                          bp->dev->name, bp->dev);
6996         if (rc) {
6997                 BNX2X_ERR("request sp irq failed\n");
6998                 return -EBUSY;
6999         }
7000
7001 #ifdef BCM_CNIC
7002         offset++;
7003 #endif
7004         for_each_queue(bp, i) {
7005                 struct bnx2x_fastpath *fp = &bp->fp[i];
7006                 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7007                          bp->dev->name, i);
7008
7009                 rc = request_irq(bp->msix_table[i + offset].vector,
7010                                  bnx2x_msix_fp_int, 0, fp->name, fp);
7011                 if (rc) {
7012                         BNX2X_ERR("request fp #%d irq failed  rc %d\n", i, rc);
7013                         bnx2x_free_msix_irqs(bp);
7014                         return -EBUSY;
7015                 }
7016
7017                 fp->state = BNX2X_FP_STATE_IRQ;
7018         }
7019
7020         i = BNX2X_NUM_QUEUES(bp);
7021         printk(KERN_INFO PFX "%s: using MSI-X  IRQs: sp %d  fp[%d] %d"
7022                " ... fp[%d] %d\n",
7023                bp->dev->name, bp->msix_table[0].vector,
7024                0, bp->msix_table[offset].vector,
7025                i - 1, bp->msix_table[offset + i - 1].vector);
7026
7027         return 0;
7028 }
7029
7030 static int bnx2x_enable_msi(struct bnx2x *bp)
7031 {
7032         int rc;
7033
7034         rc = pci_enable_msi(bp->pdev);
7035         if (rc) {
7036                 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7037                 return -1;
7038         }
7039         bp->flags |= USING_MSI_FLAG;
7040
7041         return 0;
7042 }
7043
7044 static int bnx2x_req_irq(struct bnx2x *bp)
7045 {
7046         unsigned long flags;
7047         int rc;
7048
7049         if (bp->flags & USING_MSI_FLAG)
7050                 flags = 0;
7051         else
7052                 flags = IRQF_SHARED;
7053
7054         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
7055                          bp->dev->name, bp->dev);
7056         if (!rc)
7057                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7058
7059         return rc;
7060 }
7061
7062 static void bnx2x_napi_enable(struct bnx2x *bp)
7063 {
7064         int i;
7065
7066         for_each_queue(bp, i)
7067                 napi_enable(&bnx2x_fp(bp, i, napi));
7068 }
7069
7070 static void bnx2x_napi_disable(struct bnx2x *bp)
7071 {
7072         int i;
7073
7074         for_each_queue(bp, i)
7075                 napi_disable(&bnx2x_fp(bp, i, napi));
7076 }
7077
7078 static void bnx2x_netif_start(struct bnx2x *bp)
7079 {
7080         int intr_sem;
7081
7082         intr_sem = atomic_dec_and_test(&bp->intr_sem);
7083         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7084
7085         if (intr_sem) {
7086                 if (netif_running(bp->dev)) {
7087                         bnx2x_napi_enable(bp);
7088                         bnx2x_int_enable(bp);
7089                         if (bp->state == BNX2X_STATE_OPEN)
7090                                 netif_tx_wake_all_queues(bp->dev);
7091                 }
7092         }
7093 }
7094
7095 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
7096 {
7097         bnx2x_int_disable_sync(bp, disable_hw);
7098         bnx2x_napi_disable(bp);
7099         netif_tx_disable(bp->dev);
7100 }
7101
7102 /*
7103  * Init service functions
7104  */
7105
7106 /**
7107  * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7108  *
7109  * @param bp driver descriptor
7110  * @param set set or clear an entry (1 or 0)
7111  * @param mac pointer to a buffer containing a MAC
7112  * @param cl_bit_vec bit vector of clients to register a MAC for
7113  * @param cam_offset offset in a CAM to use
7114  * @param with_bcast set broadcast MAC as well
7115  */
7116 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7117                                       u32 cl_bit_vec, u8 cam_offset,
7118                                       u8 with_bcast)
7119 {
7120         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
7121         int port = BP_PORT(bp);
7122
7123         /* CAM allocation
7124          * unicasts 0-31:port0 32-63:port1
7125          * multicast 64-127:port0 128-191:port1
7126          */
7127         config->hdr.length = 1 + (with_bcast ? 1 : 0);
7128         config->hdr.offset = cam_offset;
7129         config->hdr.client_id = 0xff;
7130         config->hdr.reserved1 = 0;
7131
7132         /* primary MAC */
7133         config->config_table[0].cam_entry.msb_mac_addr =
7134                                         swab16(*(u16 *)&mac[0]);
7135         config->config_table[0].cam_entry.middle_mac_addr =
7136                                         swab16(*(u16 *)&mac[2]);
7137         config->config_table[0].cam_entry.lsb_mac_addr =
7138                                         swab16(*(u16 *)&mac[4]);
7139         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
7140         if (set)
7141                 config->config_table[0].target_table_entry.flags = 0;
7142         else
7143                 CAM_INVALIDATE(config->config_table[0]);
7144         config->config_table[0].target_table_entry.clients_bit_vector =
7145                                                 cpu_to_le32(cl_bit_vec);
7146         config->config_table[0].target_table_entry.vlan_id = 0;
7147
7148         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7149            (set ? "setting" : "clearing"),
7150            config->config_table[0].cam_entry.msb_mac_addr,
7151            config->config_table[0].cam_entry.middle_mac_addr,
7152            config->config_table[0].cam_entry.lsb_mac_addr);
7153
7154         /* broadcast */
7155         if (with_bcast) {
7156                 config->config_table[1].cam_entry.msb_mac_addr =
7157                         cpu_to_le16(0xffff);
7158                 config->config_table[1].cam_entry.middle_mac_addr =
7159                         cpu_to_le16(0xffff);
7160                 config->config_table[1].cam_entry.lsb_mac_addr =
7161                         cpu_to_le16(0xffff);
7162                 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7163                 if (set)
7164                         config->config_table[1].target_table_entry.flags =
7165                                         TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7166                 else
7167                         CAM_INVALIDATE(config->config_table[1]);
7168                 config->config_table[1].target_table_entry.clients_bit_vector =
7169                                                         cpu_to_le32(cl_bit_vec);
7170                 config->config_table[1].target_table_entry.vlan_id = 0;
7171         }
7172
7173         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7174                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7175                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7176 }
7177
7178 /**
7179  * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7180  *
7181  * @param bp driver descriptor
7182  * @param set set or clear an entry (1 or 0)
7183  * @param mac pointer to a buffer containing a MAC
7184  * @param cl_bit_vec bit vector of clients to register a MAC for
7185  * @param cam_offset offset in a CAM to use
7186  */
7187 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7188                                        u32 cl_bit_vec, u8 cam_offset)
7189 {
7190         struct mac_configuration_cmd_e1h *config =
7191                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7192
7193         config->hdr.length = 1;
7194         config->hdr.offset = cam_offset;
7195         config->hdr.client_id = 0xff;
7196         config->hdr.reserved1 = 0;
7197
7198         /* primary MAC */
7199         config->config_table[0].msb_mac_addr =
7200                                         swab16(*(u16 *)&mac[0]);
7201         config->config_table[0].middle_mac_addr =
7202                                         swab16(*(u16 *)&mac[2]);
7203         config->config_table[0].lsb_mac_addr =
7204                                         swab16(*(u16 *)&mac[4]);
7205         config->config_table[0].clients_bit_vector =
7206                                         cpu_to_le32(cl_bit_vec);
7207         config->config_table[0].vlan_id = 0;
7208         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
7209         if (set)
7210                 config->config_table[0].flags = BP_PORT(bp);
7211         else
7212                 config->config_table[0].flags =
7213                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
7214
7215         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID mask %d\n",
7216            (set ? "setting" : "clearing"),
7217            config->config_table[0].msb_mac_addr,
7218            config->config_table[0].middle_mac_addr,
7219            config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
7220
7221         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7222                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7223                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7224 }
7225
7226 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7227                              int *state_p, int poll)
7228 {
7229         /* can take a while if any port is running */
7230         int cnt = 5000;
7231
7232         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7233            poll ? "polling" : "waiting", state, idx);
7234
7235         might_sleep();
7236         while (cnt--) {
7237                 if (poll) {
7238                         bnx2x_rx_int(bp->fp, 10);
7239                         /* if index is different from 0
7240                          * the reply for some commands will
7241                          * be on the non default queue
7242                          */
7243                         if (idx)
7244                                 bnx2x_rx_int(&bp->fp[idx], 10);
7245                 }
7246
7247                 mb(); /* state is changed by bnx2x_sp_event() */
7248                 if (*state_p == state) {
7249 #ifdef BNX2X_STOP_ON_ERROR
7250                         DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
7251 #endif
7252                         return 0;
7253                 }
7254
7255                 msleep(1);
7256
7257                 if (bp->panic)
7258                         return -EIO;
7259         }
7260
7261         /* timeout! */
7262         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7263                   poll ? "polling" : "waiting", state, idx);
7264 #ifdef BNX2X_STOP_ON_ERROR
7265         bnx2x_panic();
7266 #endif
7267
7268         return -EBUSY;
7269 }
7270
7271 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7272 {
7273         bp->set_mac_pending++;
7274         smp_wmb();
7275
7276         bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7277                                    (1 << bp->fp->cl_id), BP_FUNC(bp));
7278
7279         /* Wait for a completion */
7280         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7281 }
7282
7283 static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7284 {
7285         bp->set_mac_pending++;
7286         smp_wmb();
7287
7288         bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7289                                   (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7290                                   1);
7291
7292         /* Wait for a completion */
7293         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7294 }
7295
7296 #ifdef BCM_CNIC
7297 /**
7298  * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7299  * MAC(s). This function will wait until the ramdord completion
7300  * returns.
7301  *
7302  * @param bp driver handle
7303  * @param set set or clear the CAM entry
7304  *
7305  * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7306  */
7307 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7308 {
7309         u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7310
7311         bp->set_mac_pending++;
7312         smp_wmb();
7313
7314         /* Send a SET_MAC ramrod */
7315         if (CHIP_IS_E1(bp))
7316                 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7317                                   cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7318                                   1);
7319         else
7320                 /* CAM allocation for E1H
7321                 * unicasts: by func number
7322                 * multicast: 20+FUNC*20, 20 each
7323                 */
7324                 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7325                                    cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7326
7327         /* Wait for a completion when setting */
7328         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7329
7330         return 0;
7331 }
7332 #endif
7333
7334 static int bnx2x_setup_leading(struct bnx2x *bp)
7335 {
7336         int rc;
7337
7338         /* reset IGU state */
7339         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7340
7341         /* SETUP ramrod */
7342         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7343
7344         /* Wait for completion */
7345         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7346
7347         return rc;
7348 }
7349
7350 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7351 {
7352         struct bnx2x_fastpath *fp = &bp->fp[index];
7353
7354         /* reset IGU state */
7355         bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7356
7357         /* SETUP ramrod */
7358         fp->state = BNX2X_FP_STATE_OPENING;
7359         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7360                       fp->cl_id, 0);
7361
7362         /* Wait for completion */
7363         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7364                                  &(fp->state), 0);
7365 }
7366
7367 static int bnx2x_poll(struct napi_struct *napi, int budget);
7368
7369 static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
7370 {
7371
7372         switch (bp->multi_mode) {
7373         case ETH_RSS_MODE_DISABLED:
7374                 bp->num_queues = 1;
7375                 break;
7376
7377         case ETH_RSS_MODE_REGULAR:
7378                 if (num_queues)
7379                         bp->num_queues = min_t(u32, num_queues,
7380                                                   BNX2X_MAX_QUEUES(bp));
7381                 else
7382                         bp->num_queues = min_t(u32, num_online_cpus(),
7383                                                   BNX2X_MAX_QUEUES(bp));
7384                 break;
7385
7386
7387         default:
7388                 bp->num_queues = 1;
7389                 break;
7390         }
7391 }
7392
7393 static int bnx2x_set_num_queues(struct bnx2x *bp)
7394 {
7395         int rc = 0;
7396
7397         switch (int_mode) {
7398         case INT_MODE_INTx:
7399         case INT_MODE_MSI:
7400                 bp->num_queues = 1;
7401                 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7402                 break;
7403
7404         case INT_MODE_MSIX:
7405         default:
7406                 /* Set number of queues according to bp->multi_mode value */
7407                 bnx2x_set_num_queues_msix(bp);
7408
7409                 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7410                    bp->num_queues);
7411
7412                 /* if we can't use MSI-X we only need one fp,
7413                  * so try to enable MSI-X with the requested number of fp's
7414                  * and fallback to MSI or legacy INTx with one fp
7415                  */
7416                 rc = bnx2x_enable_msix(bp);
7417                 if (rc)
7418                         /* failed to enable MSI-X */
7419                         bp->num_queues = 1;
7420                 break;
7421         }
7422         bp->dev->real_num_tx_queues = bp->num_queues;
7423         return rc;
7424 }
7425
7426 #ifdef BCM_CNIC
7427 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7428 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7429 #endif
7430
7431 /* must be called with rtnl_lock */
7432 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7433 {
7434         u32 load_code;
7435         int i, rc;
7436
7437 #ifdef BNX2X_STOP_ON_ERROR
7438         if (unlikely(bp->panic))
7439                 return -EPERM;
7440 #endif
7441
7442         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7443
7444         rc = bnx2x_set_num_queues(bp);
7445
7446         if (bnx2x_alloc_mem(bp))
7447                 return -ENOMEM;
7448
7449         for_each_queue(bp, i)
7450                 bnx2x_fp(bp, i, disable_tpa) =
7451                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
7452
7453         for_each_queue(bp, i)
7454                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7455                                bnx2x_poll, 128);
7456
7457         bnx2x_napi_enable(bp);
7458
7459         if (bp->flags & USING_MSIX_FLAG) {
7460                 rc = bnx2x_req_msix_irqs(bp);
7461                 if (rc) {
7462                         pci_disable_msix(bp->pdev);
7463                         goto load_error1;
7464                 }
7465         } else {
7466                 /* Fall to INTx if failed to enable MSI-X due to lack of
7467                    memory (in bnx2x_set_num_queues()) */
7468                 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7469                         bnx2x_enable_msi(bp);
7470                 bnx2x_ack_int(bp);
7471                 rc = bnx2x_req_irq(bp);
7472                 if (rc) {
7473                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
7474                         if (bp->flags & USING_MSI_FLAG)
7475                                 pci_disable_msi(bp->pdev);
7476                         goto load_error1;
7477                 }
7478                 if (bp->flags & USING_MSI_FLAG) {
7479                         bp->dev->irq = bp->pdev->irq;
7480                         printk(KERN_INFO PFX "%s: using MSI  IRQ %d\n",
7481                                bp->dev->name, bp->pdev->irq);
7482                 }
7483         }
7484
7485         /* Send LOAD_REQUEST command to MCP
7486            Returns the type of LOAD command:
7487            if it is the first port to be initialized
7488            common blocks should be initialized, otherwise - not
7489         */
7490         if (!BP_NOMCP(bp)) {
7491                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7492                 if (!load_code) {
7493                         BNX2X_ERR("MCP response failure, aborting\n");
7494                         rc = -EBUSY;
7495                         goto load_error2;
7496                 }
7497                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7498                         rc = -EBUSY; /* other port in diagnostic mode */
7499                         goto load_error2;
7500                 }
7501
7502         } else {
7503                 int port = BP_PORT(bp);
7504
7505                 DP(NETIF_MSG_IFUP, "NO MCP - load counts      %d, %d, %d\n",
7506                    load_count[0], load_count[1], load_count[2]);
7507                 load_count[0]++;
7508                 load_count[1 + port]++;
7509                 DP(NETIF_MSG_IFUP, "NO MCP - new load counts  %d, %d, %d\n",
7510                    load_count[0], load_count[1], load_count[2]);
7511                 if (load_count[0] == 1)
7512                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7513                 else if (load_count[1 + port] == 1)
7514                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7515                 else
7516                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7517         }
7518
7519         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7520             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7521                 bp->port.pmf = 1;
7522         else
7523                 bp->port.pmf = 0;
7524         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7525
7526         /* Initialize HW */
7527         rc = bnx2x_init_hw(bp, load_code);
7528         if (rc) {
7529                 BNX2X_ERR("HW init failed, aborting\n");
7530                 goto load_error2;
7531         }
7532
7533         /* Setup NIC internals and enable interrupts */
7534         bnx2x_nic_init(bp, load_code);
7535
7536         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7537             (bp->common.shmem2_base))
7538                 SHMEM2_WR(bp, dcc_support,
7539                           (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7540                            SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7541
7542         /* Send LOAD_DONE command to MCP */
7543         if (!BP_NOMCP(bp)) {
7544                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7545                 if (!load_code) {
7546                         BNX2X_ERR("MCP response failure, aborting\n");
7547                         rc = -EBUSY;
7548                         goto load_error3;
7549                 }
7550         }
7551
7552         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7553
7554         rc = bnx2x_setup_leading(bp);
7555         if (rc) {
7556                 BNX2X_ERR("Setup leading failed!\n");
7557 #ifndef BNX2X_STOP_ON_ERROR
7558                 goto load_error3;
7559 #else
7560                 bp->panic = 1;
7561                 return -EBUSY;
7562 #endif
7563         }
7564
7565         if (CHIP_IS_E1H(bp))
7566                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
7567                         DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
7568                         bp->flags |= MF_FUNC_DIS;
7569                 }
7570
7571         if (bp->state == BNX2X_STATE_OPEN) {
7572 #ifdef BCM_CNIC
7573                 /* Enable Timer scan */
7574                 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
7575 #endif
7576                 for_each_nondefault_queue(bp, i) {
7577                         rc = bnx2x_setup_multi(bp, i);
7578                         if (rc)
7579 #ifdef BCM_CNIC
7580                                 goto load_error4;
7581 #else
7582                                 goto load_error3;
7583 #endif
7584                 }
7585
7586                 if (CHIP_IS_E1(bp))
7587                         bnx2x_set_eth_mac_addr_e1(bp, 1);
7588                 else
7589                         bnx2x_set_eth_mac_addr_e1h(bp, 1);
7590 #ifdef BCM_CNIC
7591                 /* Set iSCSI L2 MAC */
7592                 mutex_lock(&bp->cnic_mutex);
7593                 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
7594                         bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7595                         bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7596                         bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
7597                                       CNIC_SB_ID(bp));
7598                 }
7599                 mutex_unlock(&bp->cnic_mutex);
7600 #endif
7601         }
7602
7603         if (bp->port.pmf)
7604                 bnx2x_initial_phy_init(bp, load_mode);
7605
7606         /* Start fast path */
7607         switch (load_mode) {
7608         case LOAD_NORMAL:
7609                 if (bp->state == BNX2X_STATE_OPEN) {
7610                         /* Tx queue should be only reenabled */
7611                         netif_tx_wake_all_queues(bp->dev);
7612                 }
7613                 /* Initialize the receive filter. */
7614                 bnx2x_set_rx_mode(bp->dev);
7615                 break;
7616
7617         case LOAD_OPEN:
7618                 netif_tx_start_all_queues(bp->dev);
7619                 if (bp->state != BNX2X_STATE_OPEN)
7620                         netif_tx_disable(bp->dev);
7621                 /* Initialize the receive filter. */
7622                 bnx2x_set_rx_mode(bp->dev);
7623                 break;
7624
7625         case LOAD_DIAG:
7626                 /* Initialize the receive filter. */
7627                 bnx2x_set_rx_mode(bp->dev);
7628                 bp->state = BNX2X_STATE_DIAG;
7629                 break;
7630
7631         default:
7632                 break;
7633         }
7634
7635         if (!bp->port.pmf)
7636                 bnx2x__link_status_update(bp);
7637
7638         /* start the timer */
7639         mod_timer(&bp->timer, jiffies + bp->current_interval);
7640
7641 #ifdef BCM_CNIC
7642         bnx2x_setup_cnic_irq_info(bp);
7643         if (bp->state == BNX2X_STATE_OPEN)
7644                 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
7645 #endif
7646
7647         return 0;
7648
7649 #ifdef BCM_CNIC
7650 load_error4:
7651         /* Disable Timer scan */
7652         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
7653 #endif
7654 load_error3:
7655         bnx2x_int_disable_sync(bp, 1);
7656         if (!BP_NOMCP(bp)) {
7657                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7658                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7659         }
7660         bp->port.pmf = 0;
7661         /* Free SKBs, SGEs, TPA pool and driver internals */
7662         bnx2x_free_skbs(bp);
7663         for_each_queue(bp, i)
7664                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7665 load_error2:
7666         /* Release IRQs */
7667         bnx2x_free_irq(bp);
7668 load_error1:
7669         bnx2x_napi_disable(bp);
7670         for_each_queue(bp, i)
7671                 netif_napi_del(&bnx2x_fp(bp, i, napi));
7672         bnx2x_free_mem(bp);
7673
7674         return rc;
7675 }
7676
7677 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7678 {
7679         struct bnx2x_fastpath *fp = &bp->fp[index];
7680         int rc;
7681
7682         /* halt the connection */
7683         fp->state = BNX2X_FP_STATE_HALTING;
7684         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7685
7686         /* Wait for completion */
7687         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7688                                &(fp->state), 1);
7689         if (rc) /* timeout */
7690                 return rc;
7691
7692         /* delete cfc entry */
7693         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7694
7695         /* Wait for completion */
7696         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7697                                &(fp->state), 1);
7698         return rc;
7699 }
7700
7701 static int bnx2x_stop_leading(struct bnx2x *bp)
7702 {
7703         __le16 dsb_sp_prod_idx;
7704         /* if the other port is handling traffic,
7705            this can take a lot of time */
7706         int cnt = 500;
7707         int rc;
7708
7709         might_sleep();
7710
7711         /* Send HALT ramrod */
7712         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7713         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7714
7715         /* Wait for completion */
7716         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7717                                &(bp->fp[0].state), 1);
7718         if (rc) /* timeout */
7719                 return rc;
7720
7721         dsb_sp_prod_idx = *bp->dsb_sp_prod;
7722
7723         /* Send PORT_DELETE ramrod */
7724         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7725
7726         /* Wait for completion to arrive on default status block
7727            we are going to reset the chip anyway
7728            so there is not much to do if this times out
7729          */
7730         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7731                 if (!cnt) {
7732                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7733                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7734                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
7735 #ifdef BNX2X_STOP_ON_ERROR
7736                         bnx2x_panic();
7737 #endif
7738                         rc = -EBUSY;
7739                         break;
7740                 }
7741                 cnt--;
7742                 msleep(1);
7743                 rmb(); /* Refresh the dsb_sp_prod */
7744         }
7745         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7746         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7747
7748         return rc;
7749 }
7750
7751 static void bnx2x_reset_func(struct bnx2x *bp)
7752 {
7753         int port = BP_PORT(bp);
7754         int func = BP_FUNC(bp);
7755         int base, i;
7756
7757         /* Configure IGU */
7758         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7759         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7760
7761 #ifdef BCM_CNIC
7762         /* Disable Timer scan */
7763         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
7764         /*
7765          * Wait for at least 10ms and up to 2 second for the timers scan to
7766          * complete
7767          */
7768         for (i = 0; i < 200; i++) {
7769                 msleep(10);
7770                 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7771                         break;
7772         }
7773 #endif
7774         /* Clear ILT */
7775         base = FUNC_ILT_BASE(func);
7776         for (i = base; i < base + ILT_PER_FUNC; i++)
7777                 bnx2x_ilt_wr(bp, i, 0);
7778 }
7779
7780 static void bnx2x_reset_port(struct bnx2x *bp)
7781 {
7782         int port = BP_PORT(bp);
7783         u32 val;
7784
7785         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7786
7787         /* Do not rcv packets to BRB */
7788         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7789         /* Do not direct rcv packets that are not for MCP to the BRB */
7790         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7791                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7792
7793         /* Configure AEU */
7794         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7795
7796         msleep(100);
7797         /* Check for BRB port occupancy */
7798         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7799         if (val)
7800                 DP(NETIF_MSG_IFDOWN,
7801                    "BRB1 is not empty  %d blocks are occupied\n", val);
7802
7803         /* TODO: Close Doorbell port? */
7804 }
7805
7806 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7807 {
7808         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
7809            BP_FUNC(bp), reset_code);
7810
7811         switch (reset_code) {
7812         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7813                 bnx2x_reset_port(bp);
7814                 bnx2x_reset_func(bp);
7815                 bnx2x_reset_common(bp);
7816                 break;
7817
7818         case FW_MSG_CODE_DRV_UNLOAD_PORT:
7819                 bnx2x_reset_port(bp);
7820                 bnx2x_reset_func(bp);
7821                 break;
7822
7823         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7824                 bnx2x_reset_func(bp);
7825                 break;
7826
7827         default:
7828                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7829                 break;
7830         }
7831 }
7832
7833 /* must be called with rtnl_lock */
7834 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7835 {
7836         int port = BP_PORT(bp);
7837         u32 reset_code = 0;
7838         int i, cnt, rc;
7839
7840 #ifdef BCM_CNIC
7841         bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
7842 #endif
7843         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7844
7845         /* Set "drop all" */
7846         bp->rx_mode = BNX2X_RX_MODE_NONE;
7847         bnx2x_set_storm_rx_mode(bp);
7848
7849         /* Disable HW interrupts, NAPI and Tx */
7850         bnx2x_netif_stop(bp, 1);
7851
7852         del_timer_sync(&bp->timer);
7853         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7854                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7855         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7856
7857         /* Release IRQs */
7858         bnx2x_free_irq(bp);
7859
7860         /* Wait until tx fastpath tasks complete */
7861         for_each_queue(bp, i) {
7862                 struct bnx2x_fastpath *fp = &bp->fp[i];
7863
7864                 cnt = 1000;
7865                 while (bnx2x_has_tx_work_unload(fp)) {
7866
7867                         bnx2x_tx_int(fp);
7868                         if (!cnt) {
7869                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
7870                                           i);
7871 #ifdef BNX2X_STOP_ON_ERROR
7872                                 bnx2x_panic();
7873                                 return -EBUSY;
7874 #else
7875                                 break;
7876 #endif
7877                         }
7878                         cnt--;
7879                         msleep(1);
7880                 }
7881         }
7882         /* Give HW time to discard old tx messages */
7883         msleep(1);
7884
7885         if (CHIP_IS_E1(bp)) {
7886                 struct mac_configuration_cmd *config =
7887                                                 bnx2x_sp(bp, mcast_config);
7888
7889                 bnx2x_set_eth_mac_addr_e1(bp, 0);
7890
7891                 for (i = 0; i < config->hdr.length; i++)
7892                         CAM_INVALIDATE(config->config_table[i]);
7893
7894                 config->hdr.length = i;
7895                 if (CHIP_REV_IS_SLOW(bp))
7896                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7897                 else
7898                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7899                 config->hdr.client_id = bp->fp->cl_id;
7900                 config->hdr.reserved1 = 0;
7901
7902                 bp->set_mac_pending++;
7903                 smp_wmb();
7904
7905                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7906                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7907                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7908
7909         } else { /* E1H */
7910                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7911
7912                 bnx2x_set_eth_mac_addr_e1h(bp, 0);
7913
7914                 for (i = 0; i < MC_HASH_SIZE; i++)
7915                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7916
7917                 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7918         }
7919 #ifdef BCM_CNIC
7920         /* Clear iSCSI L2 MAC */
7921         mutex_lock(&bp->cnic_mutex);
7922         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7923                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7924                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7925         }
7926         mutex_unlock(&bp->cnic_mutex);
7927 #endif
7928
7929         if (unload_mode == UNLOAD_NORMAL)
7930                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7931
7932         else if (bp->flags & NO_WOL_FLAG)
7933                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7934
7935         else if (bp->wol) {
7936                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7937                 u8 *mac_addr = bp->dev->dev_addr;
7938                 u32 val;
7939                 /* The mac address is written to entries 1-4 to
7940                    preserve entry 0 which is used by the PMF */
7941                 u8 entry = (BP_E1HVN(bp) + 1)*8;
7942
7943                 val = (mac_addr[0] << 8) | mac_addr[1];
7944                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7945
7946                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7947                       (mac_addr[4] << 8) | mac_addr[5];
7948                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7949
7950                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7951
7952         } else
7953                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7954
7955         /* Close multi and leading connections
7956            Completions for ramrods are collected in a synchronous way */
7957         for_each_nondefault_queue(bp, i)
7958                 if (bnx2x_stop_multi(bp, i))
7959                         goto unload_error;
7960
7961         rc = bnx2x_stop_leading(bp);
7962         if (rc) {
7963                 BNX2X_ERR("Stop leading failed!\n");
7964 #ifdef BNX2X_STOP_ON_ERROR
7965                 return -EBUSY;
7966 #else
7967                 goto unload_error;
7968 #endif
7969         }
7970
7971 unload_error:
7972         if (!BP_NOMCP(bp))
7973                 reset_code = bnx2x_fw_command(bp, reset_code);
7974         else {
7975                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts      %d, %d, %d\n",
7976                    load_count[0], load_count[1], load_count[2]);
7977                 load_count[0]--;
7978                 load_count[1 + port]--;
7979                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts  %d, %d, %d\n",
7980                    load_count[0], load_count[1], load_count[2]);
7981                 if (load_count[0] == 0)
7982                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7983                 else if (load_count[1 + port] == 0)
7984                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7985                 else
7986                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7987         }
7988
7989         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7990             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7991                 bnx2x__link_reset(bp);
7992
7993         /* Reset the chip */
7994         bnx2x_reset_chip(bp, reset_code);
7995
7996         /* Report UNLOAD_DONE to MCP */
7997         if (!BP_NOMCP(bp))
7998                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7999
8000         bp->port.pmf = 0;
8001
8002         /* Free SKBs, SGEs, TPA pool and driver internals */
8003         bnx2x_free_skbs(bp);
8004         for_each_queue(bp, i)
8005                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8006         for_each_queue(bp, i)
8007                 netif_napi_del(&bnx2x_fp(bp, i, napi));
8008         bnx2x_free_mem(bp);
8009
8010         bp->state = BNX2X_STATE_CLOSED;
8011
8012         netif_carrier_off(bp->dev);
8013
8014         return 0;
8015 }
8016
8017 static void bnx2x_reset_task(struct work_struct *work)
8018 {
8019         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
8020
8021 #ifdef BNX2X_STOP_ON_ERROR
8022         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8023                   " so reset not done to allow debug dump,\n"
8024                   " you will need to reboot when done\n");
8025         return;
8026 #endif
8027
8028         rtnl_lock();
8029
8030         if (!netif_running(bp->dev))
8031                 goto reset_task_exit;
8032
8033         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8034         bnx2x_nic_load(bp, LOAD_NORMAL);
8035
8036 reset_task_exit:
8037         rtnl_unlock();
8038 }
8039
8040 /* end of nic load/unload */
8041
8042 /* ethtool_ops */
8043
8044 /*
8045  * Init service functions
8046  */
8047
8048 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8049 {
8050         switch (func) {
8051         case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8052         case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8053         case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8054         case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
8055         case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
8056         case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
8057         case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
8058         case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
8059         default:
8060                 BNX2X_ERR("Unsupported function index: %d\n", func);
8061                 return (u32)(-1);
8062         }
8063 }
8064
8065 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
8066 {
8067         u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
8068
8069         /* Flush all outstanding writes */
8070         mmiowb();
8071
8072         /* Pretend to be function 0 */
8073         REG_WR(bp, reg, 0);
8074         /* Flush the GRC transaction (in the chip) */
8075         new_val = REG_RD(bp, reg);
8076         if (new_val != 0) {
8077                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
8078                           new_val);
8079                 BUG();
8080         }
8081
8082         /* From now we are in the "like-E1" mode */
8083         bnx2x_int_disable(bp);
8084
8085         /* Flush all outstanding writes */
8086         mmiowb();
8087
8088         /* Restore the original funtion settings */
8089         REG_WR(bp, reg, orig_func);
8090         new_val = REG_RD(bp, reg);
8091         if (new_val != orig_func) {
8092                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
8093                           orig_func, new_val);
8094                 BUG();
8095         }
8096 }
8097
8098 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
8099 {
8100         if (CHIP_IS_E1H(bp))
8101                 bnx2x_undi_int_disable_e1h(bp, func);
8102         else
8103                 bnx2x_int_disable(bp);
8104 }
8105
8106 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8107 {
8108         u32 val;
8109
8110         /* Check if there is any driver already loaded */
8111         val = REG_RD(bp, MISC_REG_UNPREPARED);
8112         if (val == 0x1) {
8113                 /* Check if it is the UNDI driver
8114                  * UNDI driver initializes CID offset for normal bell to 0x7
8115                  */
8116                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8117                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8118                 if (val == 0x7) {
8119                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8120                         /* save our func */
8121                         int func = BP_FUNC(bp);
8122                         u32 swap_en;
8123                         u32 swap_val;
8124
8125                         /* clear the UNDI indication */
8126                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
8127
8128                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
8129
8130                         /* try unload UNDI on port 0 */
8131                         bp->func = 0;
8132                         bp->fw_seq =
8133                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8134                                 DRV_MSG_SEQ_NUMBER_MASK);
8135                         reset_code = bnx2x_fw_command(bp, reset_code);
8136
8137                         /* if UNDI is loaded on the other port */
8138                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
8139
8140                                 /* send "DONE" for previous unload */
8141                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8142
8143                                 /* unload UNDI on port 1 */
8144                                 bp->func = 1;
8145                                 bp->fw_seq =
8146                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8147                                         DRV_MSG_SEQ_NUMBER_MASK);
8148                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8149
8150                                 bnx2x_fw_command(bp, reset_code);
8151                         }
8152
8153                         /* now it's safe to release the lock */
8154                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8155
8156                         bnx2x_undi_int_disable(bp, func);
8157
8158                         /* close input traffic and wait for it */
8159                         /* Do not rcv packets to BRB */
8160                         REG_WR(bp,
8161                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
8162                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
8163                         /* Do not direct rcv packets that are not for MCP to
8164                          * the BRB */
8165                         REG_WR(bp,
8166                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
8167                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8168                         /* clear AEU */
8169                         REG_WR(bp,
8170                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8171                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
8172                         msleep(10);
8173
8174                         /* save NIG port swap info */
8175                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
8176                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
8177                         /* reset device */
8178                         REG_WR(bp,
8179                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8180                                0xd3ffffff);
8181                         REG_WR(bp,
8182                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8183                                0x1403);
8184                         /* take the NIG out of reset and restore swap values */
8185                         REG_WR(bp,
8186                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
8187                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
8188                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
8189                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
8190
8191                         /* send unload done to the MCP */
8192                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8193
8194                         /* restore our func and fw_seq */
8195                         bp->func = func;
8196                         bp->fw_seq =
8197                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8198                                 DRV_MSG_SEQ_NUMBER_MASK);
8199
8200                 } else
8201                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8202         }
8203 }
8204
8205 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8206 {
8207         u32 val, val2, val3, val4, id;
8208         u16 pmc;
8209
8210         /* Get the chip revision id and number. */
8211         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
8212         val = REG_RD(bp, MISC_REG_CHIP_NUM);
8213         id = ((val & 0xffff) << 16);
8214         val = REG_RD(bp, MISC_REG_CHIP_REV);
8215         id |= ((val & 0xf) << 12);
8216         val = REG_RD(bp, MISC_REG_CHIP_METAL);
8217         id |= ((val & 0xff) << 4);
8218         val = REG_RD(bp, MISC_REG_BOND_ID);
8219         id |= (val & 0xf);
8220         bp->common.chip_id = id;
8221         bp->link_params.chip_id = bp->common.chip_id;
8222         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
8223
8224         val = (REG_RD(bp, 0x2874) & 0x55);
8225         if ((bp->common.chip_id & 0x1) ||
8226             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
8227                 bp->flags |= ONE_PORT_FLAG;
8228                 BNX2X_DEV_INFO("single port device\n");
8229         }
8230
8231         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8232         bp->common.flash_size = (NVRAM_1MB_SIZE <<
8233                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
8234         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8235                        bp->common.flash_size, bp->common.flash_size);
8236
8237         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8238         bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
8239         bp->link_params.shmem_base = bp->common.shmem_base;
8240         BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
8241                        bp->common.shmem_base, bp->common.shmem2_base);
8242
8243         if (!bp->common.shmem_base ||
8244             (bp->common.shmem_base < 0xA0000) ||
8245             (bp->common.shmem_base >= 0xC0000)) {
8246                 BNX2X_DEV_INFO("MCP not active\n");
8247                 bp->flags |= NO_MCP_FLAG;
8248                 return;
8249         }
8250
8251         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8252         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8253                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8254                 BNX2X_ERR("BAD MCP validity signature\n");
8255
8256         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
8257         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
8258
8259         bp->link_params.hw_led_mode = ((bp->common.hw_config &
8260                                         SHARED_HW_CFG_LED_MODE_MASK) >>
8261                                        SHARED_HW_CFG_LED_MODE_SHIFT);
8262
8263         bp->link_params.feature_config_flags = 0;
8264         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8265         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8266                 bp->link_params.feature_config_flags |=
8267                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8268         else
8269                 bp->link_params.feature_config_flags &=
8270                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8271
8272         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8273         bp->common.bc_ver = val;
8274         BNX2X_DEV_INFO("bc_ver %X\n", val);
8275         if (val < BNX2X_BC_VER) {
8276                 /* for now only warn
8277                  * later we might need to enforce this */
8278                 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8279                           " please upgrade BC\n", BNX2X_BC_VER, val);
8280         }
8281         bp->link_params.feature_config_flags |=
8282                 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8283                 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
8284
8285         if (BP_E1HVN(bp) == 0) {
8286                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8287                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8288         } else {
8289                 /* no WOL capability for E1HVN != 0 */
8290                 bp->flags |= NO_WOL_FLAG;
8291         }
8292         BNX2X_DEV_INFO("%sWoL capable\n",
8293                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
8294
8295         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8296         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8297         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8298         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8299
8300         printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8301                val, val2, val3, val4);
8302 }
8303
8304 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8305                                                     u32 switch_cfg)
8306 {
8307         int port = BP_PORT(bp);
8308         u32 ext_phy_type;
8309
8310         switch (switch_cfg) {
8311         case SWITCH_CFG_1G:
8312                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8313
8314                 ext_phy_type =
8315                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8316                 switch (ext_phy_type) {
8317                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8318                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8319                                        ext_phy_type);
8320
8321                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8322                                                SUPPORTED_10baseT_Full |
8323                                                SUPPORTED_100baseT_Half |
8324                                                SUPPORTED_100baseT_Full |
8325                                                SUPPORTED_1000baseT_Full |
8326                                                SUPPORTED_2500baseX_Full |
8327                                                SUPPORTED_TP |
8328                                                SUPPORTED_FIBRE |
8329                                                SUPPORTED_Autoneg |
8330                                                SUPPORTED_Pause |
8331                                                SUPPORTED_Asym_Pause);
8332                         break;
8333
8334                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8335                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8336                                        ext_phy_type);
8337
8338                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8339                                                SUPPORTED_10baseT_Full |
8340                                                SUPPORTED_100baseT_Half |
8341                                                SUPPORTED_100baseT_Full |
8342                                                SUPPORTED_1000baseT_Full |
8343                                                SUPPORTED_TP |
8344                                                SUPPORTED_FIBRE |
8345                                                SUPPORTED_Autoneg |
8346                                                SUPPORTED_Pause |
8347                                                SUPPORTED_Asym_Pause);
8348                         break;
8349
8350                 default:
8351                         BNX2X_ERR("NVRAM config error. "
8352                                   "BAD SerDes ext_phy_config 0x%x\n",
8353                                   bp->link_params.ext_phy_config);
8354                         return;
8355                 }
8356
8357                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8358                                            port*0x10);
8359                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8360                 break;
8361
8362         case SWITCH_CFG_10G:
8363                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8364
8365                 ext_phy_type =
8366                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8367                 switch (ext_phy_type) {
8368                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8369                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8370                                        ext_phy_type);
8371
8372                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8373                                                SUPPORTED_10baseT_Full |
8374                                                SUPPORTED_100baseT_Half |
8375                                                SUPPORTED_100baseT_Full |
8376                                                SUPPORTED_1000baseT_Full |
8377                                                SUPPORTED_2500baseX_Full |
8378                                                SUPPORTED_10000baseT_Full |
8379                                                SUPPORTED_TP |
8380                                                SUPPORTED_FIBRE |
8381                                                SUPPORTED_Autoneg |
8382                                                SUPPORTED_Pause |
8383                                                SUPPORTED_Asym_Pause);
8384                         break;
8385
8386                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8387                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
8388                                        ext_phy_type);
8389
8390                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8391                                                SUPPORTED_1000baseT_Full |
8392                                                SUPPORTED_FIBRE |
8393                                                SUPPORTED_Autoneg |
8394                                                SUPPORTED_Pause |
8395                                                SUPPORTED_Asym_Pause);
8396                         break;
8397
8398                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8399                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
8400                                        ext_phy_type);
8401
8402                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8403                                                SUPPORTED_2500baseX_Full |
8404                                                SUPPORTED_1000baseT_Full |
8405                                                SUPPORTED_FIBRE |
8406                                                SUPPORTED_Autoneg |
8407                                                SUPPORTED_Pause |
8408                                                SUPPORTED_Asym_Pause);
8409                         break;
8410
8411                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8412                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8413                                        ext_phy_type);
8414
8415                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8416                                                SUPPORTED_FIBRE |
8417                                                SUPPORTED_Pause |
8418                                                SUPPORTED_Asym_Pause);
8419                         break;
8420
8421                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8422                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
8423                                        ext_phy_type);
8424
8425                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8426                                                SUPPORTED_1000baseT_Full |
8427                                                SUPPORTED_FIBRE |
8428                                                SUPPORTED_Pause |
8429                                                SUPPORTED_Asym_Pause);
8430                         break;
8431
8432                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8433                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
8434                                        ext_phy_type);
8435
8436                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8437                                                SUPPORTED_1000baseT_Full |
8438                                                SUPPORTED_Autoneg |
8439                                                SUPPORTED_FIBRE |
8440                                                SUPPORTED_Pause |
8441                                                SUPPORTED_Asym_Pause);
8442                         break;
8443
8444                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8445                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8446                                        ext_phy_type);
8447
8448                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8449                                                SUPPORTED_1000baseT_Full |
8450                                                SUPPORTED_Autoneg |
8451                                                SUPPORTED_FIBRE |
8452                                                SUPPORTED_Pause |
8453                                                SUPPORTED_Asym_Pause);
8454                         break;
8455
8456                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8457                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8458                                        ext_phy_type);
8459
8460                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8461                                                SUPPORTED_TP |
8462                                                SUPPORTED_Autoneg |
8463                                                SUPPORTED_Pause |
8464                                                SUPPORTED_Asym_Pause);
8465                         break;
8466
8467                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8468                         BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8469                                        ext_phy_type);
8470
8471                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8472                                                SUPPORTED_10baseT_Full |
8473                                                SUPPORTED_100baseT_Half |
8474                                                SUPPORTED_100baseT_Full |
8475                                                SUPPORTED_1000baseT_Full |
8476                                                SUPPORTED_10000baseT_Full |
8477                                                SUPPORTED_TP |
8478                                                SUPPORTED_Autoneg |
8479                                                SUPPORTED_Pause |
8480                                                SUPPORTED_Asym_Pause);
8481                         break;
8482
8483                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8484                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8485                                   bp->link_params.ext_phy_config);
8486                         break;
8487
8488                 default:
8489                         BNX2X_ERR("NVRAM config error. "
8490                                   "BAD XGXS ext_phy_config 0x%x\n",
8491                                   bp->link_params.ext_phy_config);
8492                         return;
8493                 }
8494
8495                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8496                                            port*0x18);
8497                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8498
8499                 break;
8500
8501         default:
8502                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
8503                           bp->port.link_config);
8504                 return;
8505         }
8506         bp->link_params.phy_addr = bp->port.phy_addr;
8507
8508         /* mask what we support according to speed_cap_mask */
8509         if (!(bp->link_params.speed_cap_mask &
8510                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
8511                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
8512
8513         if (!(bp->link_params.speed_cap_mask &
8514                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
8515                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
8516
8517         if (!(bp->link_params.speed_cap_mask &
8518                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
8519                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
8520
8521         if (!(bp->link_params.speed_cap_mask &
8522                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
8523                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
8524
8525         if (!(bp->link_params.speed_cap_mask &
8526                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
8527                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8528                                         SUPPORTED_1000baseT_Full);
8529
8530         if (!(bp->link_params.speed_cap_mask &
8531                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
8532                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
8533
8534         if (!(bp->link_params.speed_cap_mask &
8535                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
8536                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
8537
8538         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
8539 }
8540
8541 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8542 {
8543         bp->link_params.req_duplex = DUPLEX_FULL;
8544
8545         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
8546         case PORT_FEATURE_LINK_SPEED_AUTO:
8547                 if (bp->port.supported & SUPPORTED_Autoneg) {
8548                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8549                         bp->port.advertising = bp->port.supported;
8550                 } else {
8551                         u32 ext_phy_type =
8552                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8553
8554                         if ((ext_phy_type ==
8555                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8556                             (ext_phy_type ==
8557                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
8558                                 /* force 10G, no AN */
8559                                 bp->link_params.req_line_speed = SPEED_10000;
8560                                 bp->port.advertising =
8561                                                 (ADVERTISED_10000baseT_Full |
8562                                                  ADVERTISED_FIBRE);
8563                                 break;
8564                         }
8565                         BNX2X_ERR("NVRAM config error. "
8566                                   "Invalid link_config 0x%x"
8567                                   "  Autoneg not supported\n",
8568                                   bp->port.link_config);
8569                         return;
8570                 }
8571                 break;
8572
8573         case PORT_FEATURE_LINK_SPEED_10M_FULL:
8574                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
8575                         bp->link_params.req_line_speed = SPEED_10;
8576                         bp->port.advertising = (ADVERTISED_10baseT_Full |
8577                                                 ADVERTISED_TP);
8578                 } else {
8579                         BNX2X_ERR("NVRAM config error. "
8580                                   "Invalid link_config 0x%x"
8581                                   "  speed_cap_mask 0x%x\n",
8582                                   bp->port.link_config,
8583                                   bp->link_params.speed_cap_mask);
8584                         return;
8585                 }
8586                 break;
8587
8588         case PORT_FEATURE_LINK_SPEED_10M_HALF:
8589                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
8590                         bp->link_params.req_line_speed = SPEED_10;
8591                         bp->link_params.req_duplex = DUPLEX_HALF;
8592                         bp->port.advertising = (ADVERTISED_10baseT_Half |
8593                                                 ADVERTISED_TP);
8594                 } else {
8595                         BNX2X_ERR("NVRAM config error. "
8596                                   "Invalid link_config 0x%x"
8597                                   "  speed_cap_mask 0x%x\n",
8598                                   bp->port.link_config,
8599                                   bp->link_params.speed_cap_mask);
8600                         return;
8601                 }
8602                 break;
8603
8604         case PORT_FEATURE_LINK_SPEED_100M_FULL:
8605                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
8606                         bp->link_params.req_line_speed = SPEED_100;
8607                         bp->port.advertising = (ADVERTISED_100baseT_Full |
8608                                                 ADVERTISED_TP);
8609                 } else {
8610                         BNX2X_ERR("NVRAM config error. "
8611                                   "Invalid link_config 0x%x"
8612                                   "  speed_cap_mask 0x%x\n",
8613                                   bp->port.link_config,
8614                                   bp->link_params.speed_cap_mask);
8615                         return;
8616                 }
8617                 break;
8618
8619         case PORT_FEATURE_LINK_SPEED_100M_HALF:
8620                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
8621                         bp->link_params.req_line_speed = SPEED_100;
8622                         bp->link_params.req_duplex = DUPLEX_HALF;
8623                         bp->port.advertising = (ADVERTISED_100baseT_Half |
8624                                                 ADVERTISED_TP);
8625                 } else {
8626                         BNX2X_ERR("NVRAM config error. "
8627                                   "Invalid link_config 0x%x"
8628                                   "  speed_cap_mask 0x%x\n",
8629                                   bp->port.link_config,
8630                                   bp->link_params.speed_cap_mask);
8631                         return;
8632                 }
8633                 break;
8634
8635         case PORT_FEATURE_LINK_SPEED_1G:
8636                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
8637                         bp->link_params.req_line_speed = SPEED_1000;
8638                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
8639                                                 ADVERTISED_TP);
8640                 } else {
8641                         BNX2X_ERR("NVRAM config error. "
8642                                   "Invalid link_config 0x%x"
8643                                   "  speed_cap_mask 0x%x\n",
8644                                   bp->port.link_config,
8645                                   bp->link_params.speed_cap_mask);
8646                         return;
8647                 }
8648                 break;
8649
8650         case PORT_FEATURE_LINK_SPEED_2_5G:
8651                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
8652                         bp->link_params.req_line_speed = SPEED_2500;
8653                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
8654                                                 ADVERTISED_TP);
8655                 } else {
8656                         BNX2X_ERR("NVRAM config error. "
8657                                   "Invalid link_config 0x%x"
8658                                   "  speed_cap_mask 0x%x\n",
8659                                   bp->port.link_config,
8660                                   bp->link_params.speed_cap_mask);
8661                         return;
8662                 }
8663                 break;
8664
8665         case PORT_FEATURE_LINK_SPEED_10G_CX4:
8666         case PORT_FEATURE_LINK_SPEED_10G_KX4:
8667         case PORT_FEATURE_LINK_SPEED_10G_KR:
8668                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
8669                         bp->link_params.req_line_speed = SPEED_10000;
8670                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
8671                                                 ADVERTISED_FIBRE);
8672                 } else {
8673                         BNX2X_ERR("NVRAM config error. "
8674                                   "Invalid link_config 0x%x"
8675                                   "  speed_cap_mask 0x%x\n",
8676                                   bp->port.link_config,
8677                                   bp->link_params.speed_cap_mask);
8678                         return;
8679                 }
8680                 break;
8681
8682         default:
8683                 BNX2X_ERR("NVRAM config error. "
8684                           "BAD link speed link_config 0x%x\n",
8685                           bp->port.link_config);
8686                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8687                 bp->port.advertising = bp->port.supported;
8688                 break;
8689         }
8690
8691         bp->link_params.req_flow_ctrl = (bp->port.link_config &
8692                                          PORT_FEATURE_FLOW_CONTROL_MASK);
8693         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8694             !(bp->port.supported & SUPPORTED_Autoneg))
8695                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8696
8697         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
8698                        "  advertising 0x%x\n",
8699                        bp->link_params.req_line_speed,
8700                        bp->link_params.req_duplex,
8701                        bp->link_params.req_flow_ctrl, bp->port.advertising);
8702 }
8703
8704 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8705 {
8706         mac_hi = cpu_to_be16(mac_hi);
8707         mac_lo = cpu_to_be32(mac_lo);
8708         memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8709         memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8710 }
8711
8712 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8713 {
8714         int port = BP_PORT(bp);
8715         u32 val, val2;
8716         u32 config;
8717         u16 i;
8718         u32 ext_phy_type;
8719
8720         bp->link_params.bp = bp;
8721         bp->link_params.port = port;
8722
8723         bp->link_params.lane_config =
8724                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8725         bp->link_params.ext_phy_config =
8726                 SHMEM_RD(bp,
8727                          dev_info.port_hw_config[port].external_phy_config);
8728         /* BCM8727_NOC => BCM8727 no over current */
8729         if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8730             PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8731                 bp->link_params.ext_phy_config &=
8732                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8733                 bp->link_params.ext_phy_config |=
8734                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8735                 bp->link_params.feature_config_flags |=
8736                         FEATURE_CONFIG_BCM8727_NOC;
8737         }
8738
8739         bp->link_params.speed_cap_mask =
8740                 SHMEM_RD(bp,
8741                          dev_info.port_hw_config[port].speed_capability_mask);
8742
8743         bp->port.link_config =
8744                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8745
8746         /* Get the 4 lanes xgxs config rx and tx */
8747         for (i = 0; i < 2; i++) {
8748                 val = SHMEM_RD(bp,
8749                            dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8750                 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8751                 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8752
8753                 val = SHMEM_RD(bp,
8754                            dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8755                 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8756                 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8757         }
8758
8759         /* If the device is capable of WoL, set the default state according
8760          * to the HW
8761          */
8762         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8763         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8764                    (config & PORT_FEATURE_WOL_ENABLED));
8765
8766         BNX2X_DEV_INFO("lane_config 0x%08x  ext_phy_config 0x%08x"
8767                        "  speed_cap_mask 0x%08x  link_config 0x%08x\n",
8768                        bp->link_params.lane_config,
8769                        bp->link_params.ext_phy_config,
8770                        bp->link_params.speed_cap_mask, bp->port.link_config);
8771
8772         bp->link_params.switch_cfg |= (bp->port.link_config &
8773                                        PORT_FEATURE_CONNECTED_SWITCH_MASK);
8774         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8775
8776         bnx2x_link_settings_requested(bp);
8777
8778         /*
8779          * If connected directly, work with the internal PHY, otherwise, work
8780          * with the external PHY
8781          */
8782         ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8783         if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8784                 bp->mdio.prtad = bp->link_params.phy_addr;
8785
8786         else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8787                  (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8788                 bp->mdio.prtad =
8789                         XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
8790
8791         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8792         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8793         bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8794         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8795         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8796
8797 #ifdef BCM_CNIC
8798         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8799         val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8800         bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8801 #endif
8802 }
8803
8804 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8805 {
8806         int func = BP_FUNC(bp);
8807         u32 val, val2;
8808         int rc = 0;
8809
8810         bnx2x_get_common_hwinfo(bp);
8811
8812         bp->e1hov = 0;
8813         bp->e1hmf = 0;
8814         if (CHIP_IS_E1H(bp)) {
8815                 bp->mf_config =
8816                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8817
8818                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
8819                        FUNC_MF_CFG_E1HOV_TAG_MASK);
8820                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
8821                         bp->e1hmf = 1;
8822                 BNX2X_DEV_INFO("%s function mode\n",
8823                                IS_E1HMF(bp) ? "multi" : "single");
8824
8825                 if (IS_E1HMF(bp)) {
8826                         val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8827                                                                 e1hov_tag) &
8828                                FUNC_MF_CFG_E1HOV_TAG_MASK);
8829                         if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8830                                 bp->e1hov = val;
8831                                 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8832                                                "(0x%04x)\n",
8833                                                func, bp->e1hov, bp->e1hov);
8834                         } else {
8835                                 BNX2X_ERR("!!!  No valid E1HOV for func %d,"
8836                                           "  aborting\n", func);
8837                                 rc = -EPERM;
8838                         }
8839                 } else {
8840                         if (BP_E1HVN(bp)) {
8841                                 BNX2X_ERR("!!!  VN %d in single function mode,"
8842                                           "  aborting\n", BP_E1HVN(bp));
8843                                 rc = -EPERM;
8844                         }
8845                 }
8846         }
8847
8848         if (!BP_NOMCP(bp)) {
8849                 bnx2x_get_port_hwinfo(bp);
8850
8851                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8852                               DRV_MSG_SEQ_NUMBER_MASK);
8853                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8854         }
8855
8856         if (IS_E1HMF(bp)) {
8857                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8858                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
8859                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8860                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8861                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8862                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8863                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8864                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8865                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
8866                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
8867                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8868                                ETH_ALEN);
8869                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8870                                ETH_ALEN);
8871                 }
8872
8873                 return rc;
8874         }
8875
8876         if (BP_NOMCP(bp)) {
8877                 /* only supposed to happen on emulation/FPGA */
8878                 BNX2X_ERR("warning random MAC workaround active\n");
8879                 random_ether_addr(bp->dev->dev_addr);
8880                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8881         }
8882
8883         return rc;
8884 }
8885
8886 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8887 {
8888         int func = BP_FUNC(bp);
8889         int timer_interval;
8890         int rc;
8891
8892         /* Disable interrupt handling until HW is initialized */
8893         atomic_set(&bp->intr_sem, 1);
8894         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8895
8896         mutex_init(&bp->port.phy_mutex);
8897         mutex_init(&bp->fw_mb_mutex);
8898 #ifdef BCM_CNIC
8899         mutex_init(&bp->cnic_mutex);
8900 #endif
8901
8902         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8903         INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8904
8905         rc = bnx2x_get_hwinfo(bp);
8906
8907         /* need to reset chip if undi was active */
8908         if (!BP_NOMCP(bp))
8909                 bnx2x_undi_unload(bp);
8910
8911         if (CHIP_REV_IS_FPGA(bp))
8912                 printk(KERN_ERR PFX "FPGA detected\n");
8913
8914         if (BP_NOMCP(bp) && (func == 0))
8915                 printk(KERN_ERR PFX
8916                        "MCP disabled, must load devices in order!\n");
8917
8918         /* Set multi queue mode */
8919         if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8920             ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8921                 printk(KERN_ERR PFX
8922                       "Multi disabled since int_mode requested is not MSI-X\n");
8923                 multi_mode = ETH_RSS_MODE_DISABLED;
8924         }
8925         bp->multi_mode = multi_mode;
8926
8927
8928         /* Set TPA flags */
8929         if (disable_tpa) {
8930                 bp->flags &= ~TPA_ENABLE_FLAG;
8931                 bp->dev->features &= ~NETIF_F_LRO;
8932         } else {
8933                 bp->flags |= TPA_ENABLE_FLAG;
8934                 bp->dev->features |= NETIF_F_LRO;
8935         }
8936
8937         if (CHIP_IS_E1(bp))
8938                 bp->dropless_fc = 0;
8939         else
8940                 bp->dropless_fc = dropless_fc;
8941
8942         bp->mrrs = mrrs;
8943
8944         bp->tx_ring_size = MAX_TX_AVAIL;
8945         bp->rx_ring_size = MAX_RX_AVAIL;
8946
8947         bp->rx_csum = 1;
8948
8949         /* make sure that the numbers are in the right granularity */
8950         bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
8951         bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
8952
8953         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8954         bp->current_interval = (poll ? poll : timer_interval);
8955
8956         init_timer(&bp->timer);
8957         bp->timer.expires = jiffies + bp->current_interval;
8958         bp->timer.data = (unsigned long) bp;
8959         bp->timer.function = bnx2x_timer;
8960
8961         return rc;
8962 }
8963
8964 /*
8965  * ethtool service functions
8966  */
8967
8968 /* All ethtool functions called with rtnl_lock */
8969
8970 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8971 {
8972         struct bnx2x *bp = netdev_priv(dev);
8973
8974         cmd->supported = bp->port.supported;
8975         cmd->advertising = bp->port.advertising;
8976
8977         if ((bp->state == BNX2X_STATE_OPEN) &&
8978             !(bp->flags & MF_FUNC_DIS) &&
8979             (bp->link_vars.link_up)) {
8980                 cmd->speed = bp->link_vars.line_speed;
8981                 cmd->duplex = bp->link_vars.duplex;
8982                 if (IS_E1HMF(bp)) {
8983                         u16 vn_max_rate;
8984
8985                         vn_max_rate =
8986                                 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8987                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8988                         if (vn_max_rate < cmd->speed)
8989                                 cmd->speed = vn_max_rate;
8990                 }
8991         } else {
8992                 cmd->speed = -1;
8993                 cmd->duplex = -1;
8994         }
8995
8996         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8997                 u32 ext_phy_type =
8998                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8999
9000                 switch (ext_phy_type) {
9001                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
9002                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
9003                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
9004                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9005                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9006                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
9007                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
9008                         cmd->port = PORT_FIBRE;
9009                         break;
9010
9011                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
9012                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
9013                         cmd->port = PORT_TP;
9014                         break;
9015
9016                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9017                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9018                                   bp->link_params.ext_phy_config);
9019                         break;
9020
9021                 default:
9022                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
9023                            bp->link_params.ext_phy_config);
9024                         break;
9025                 }
9026         } else
9027                 cmd->port = PORT_TP;
9028
9029         cmd->phy_address = bp->mdio.prtad;
9030         cmd->transceiver = XCVR_INTERNAL;
9031
9032         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9033                 cmd->autoneg = AUTONEG_ENABLE;
9034         else
9035                 cmd->autoneg = AUTONEG_DISABLE;
9036
9037         cmd->maxtxpkt = 0;
9038         cmd->maxrxpkt = 0;
9039
9040         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9041            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
9042            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
9043            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
9044            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9045            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9046            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9047
9048         return 0;
9049 }
9050
9051 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9052 {
9053         struct bnx2x *bp = netdev_priv(dev);
9054         u32 advertising;
9055
9056         if (IS_E1HMF(bp))
9057                 return 0;
9058
9059         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9060            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
9061            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
9062            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
9063            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9064            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9065            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9066
9067         if (cmd->autoneg == AUTONEG_ENABLE) {
9068                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9069                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
9070                         return -EINVAL;
9071                 }
9072
9073                 /* advertise the requested speed and duplex if supported */
9074                 cmd->advertising &= bp->port.supported;
9075
9076                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9077                 bp->link_params.req_duplex = DUPLEX_FULL;
9078                 bp->port.advertising |= (ADVERTISED_Autoneg |
9079                                          cmd->advertising);
9080
9081         } else { /* forced speed */
9082                 /* advertise the requested speed and duplex if supported */
9083                 switch (cmd->speed) {
9084                 case SPEED_10:
9085                         if (cmd->duplex == DUPLEX_FULL) {
9086                                 if (!(bp->port.supported &
9087                                       SUPPORTED_10baseT_Full)) {
9088                                         DP(NETIF_MSG_LINK,
9089                                            "10M full not supported\n");
9090                                         return -EINVAL;
9091                                 }
9092
9093                                 advertising = (ADVERTISED_10baseT_Full |
9094                                                ADVERTISED_TP);
9095                         } else {
9096                                 if (!(bp->port.supported &
9097                                       SUPPORTED_10baseT_Half)) {
9098                                         DP(NETIF_MSG_LINK,
9099                                            "10M half not supported\n");
9100                                         return -EINVAL;
9101                                 }
9102
9103                                 advertising = (ADVERTISED_10baseT_Half |
9104                                                ADVERTISED_TP);
9105                         }
9106                         break;
9107
9108                 case SPEED_100:
9109                         if (cmd->duplex == DUPLEX_FULL) {
9110                                 if (!(bp->port.supported &
9111                                                 SUPPORTED_100baseT_Full)) {
9112                                         DP(NETIF_MSG_LINK,
9113                                            "100M full not supported\n");
9114                                         return -EINVAL;
9115                                 }
9116
9117                                 advertising = (ADVERTISED_100baseT_Full |
9118                                                ADVERTISED_TP);
9119                         } else {
9120                                 if (!(bp->port.supported &
9121                                                 SUPPORTED_100baseT_Half)) {
9122                                         DP(NETIF_MSG_LINK,
9123                                            "100M half not supported\n");
9124                                         return -EINVAL;
9125                                 }
9126
9127                                 advertising = (ADVERTISED_100baseT_Half |
9128                                                ADVERTISED_TP);
9129                         }
9130                         break;
9131
9132                 case SPEED_1000:
9133                         if (cmd->duplex != DUPLEX_FULL) {
9134                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
9135                                 return -EINVAL;
9136                         }
9137
9138                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
9139                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
9140                                 return -EINVAL;
9141                         }
9142
9143                         advertising = (ADVERTISED_1000baseT_Full |
9144                                        ADVERTISED_TP);
9145                         break;
9146
9147                 case SPEED_2500:
9148                         if (cmd->duplex != DUPLEX_FULL) {
9149                                 DP(NETIF_MSG_LINK,
9150                                    "2.5G half not supported\n");
9151                                 return -EINVAL;
9152                         }
9153
9154                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
9155                                 DP(NETIF_MSG_LINK,
9156                                    "2.5G full not supported\n");
9157                                 return -EINVAL;
9158                         }
9159
9160                         advertising = (ADVERTISED_2500baseX_Full |
9161                                        ADVERTISED_TP);
9162                         break;
9163
9164                 case SPEED_10000:
9165                         if (cmd->duplex != DUPLEX_FULL) {
9166                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
9167                                 return -EINVAL;
9168                         }
9169
9170                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
9171                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
9172                                 return -EINVAL;
9173                         }
9174
9175                         advertising = (ADVERTISED_10000baseT_Full |
9176                                        ADVERTISED_FIBRE);
9177                         break;
9178
9179                 default:
9180                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
9181                         return -EINVAL;
9182                 }
9183
9184                 bp->link_params.req_line_speed = cmd->speed;
9185                 bp->link_params.req_duplex = cmd->duplex;
9186                 bp->port.advertising = advertising;
9187         }
9188
9189         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
9190            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
9191            bp->link_params.req_line_speed, bp->link_params.req_duplex,
9192            bp->port.advertising);
9193
9194         if (netif_running(dev)) {
9195                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9196                 bnx2x_link_set(bp);
9197         }
9198
9199         return 0;
9200 }
9201
9202 #define IS_E1_ONLINE(info)      (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
9203 #define IS_E1H_ONLINE(info)     (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
9204
9205 static int bnx2x_get_regs_len(struct net_device *dev)
9206 {
9207         struct bnx2x *bp = netdev_priv(dev);
9208         int regdump_len = 0;
9209         int i;
9210
9211         if (CHIP_IS_E1(bp)) {
9212                 for (i = 0; i < REGS_COUNT; i++)
9213                         if (IS_E1_ONLINE(reg_addrs[i].info))
9214                                 regdump_len += reg_addrs[i].size;
9215
9216                 for (i = 0; i < WREGS_COUNT_E1; i++)
9217                         if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
9218                                 regdump_len += wreg_addrs_e1[i].size *
9219                                         (1 + wreg_addrs_e1[i].read_regs_count);
9220
9221         } else { /* E1H */
9222                 for (i = 0; i < REGS_COUNT; i++)
9223                         if (IS_E1H_ONLINE(reg_addrs[i].info))
9224                                 regdump_len += reg_addrs[i].size;
9225
9226                 for (i = 0; i < WREGS_COUNT_E1H; i++)
9227                         if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
9228                                 regdump_len += wreg_addrs_e1h[i].size *
9229                                         (1 + wreg_addrs_e1h[i].read_regs_count);
9230         }
9231         regdump_len *= 4;
9232         regdump_len += sizeof(struct dump_hdr);
9233
9234         return regdump_len;
9235 }
9236
9237 static void bnx2x_get_regs(struct net_device *dev,
9238                            struct ethtool_regs *regs, void *_p)
9239 {
9240         u32 *p = _p, i, j;
9241         struct bnx2x *bp = netdev_priv(dev);
9242         struct dump_hdr dump_hdr = {0};
9243
9244         regs->version = 0;
9245         memset(p, 0, regs->len);
9246
9247         if (!netif_running(bp->dev))
9248                 return;
9249
9250         dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9251         dump_hdr.dump_sign = dump_sign_all;
9252         dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9253         dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9254         dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9255         dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9256         dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9257
9258         memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9259         p += dump_hdr.hdr_size + 1;
9260
9261         if (CHIP_IS_E1(bp)) {
9262                 for (i = 0; i < REGS_COUNT; i++)
9263                         if (IS_E1_ONLINE(reg_addrs[i].info))
9264                                 for (j = 0; j < reg_addrs[i].size; j++)
9265                                         *p++ = REG_RD(bp,
9266                                                       reg_addrs[i].addr + j*4);
9267
9268         } else { /* E1H */
9269                 for (i = 0; i < REGS_COUNT; i++)
9270                         if (IS_E1H_ONLINE(reg_addrs[i].info))
9271                                 for (j = 0; j < reg_addrs[i].size; j++)
9272                                         *p++ = REG_RD(bp,
9273                                                       reg_addrs[i].addr + j*4);
9274         }
9275 }
9276
9277 #define PHY_FW_VER_LEN                  10
9278
9279 static void bnx2x_get_drvinfo(struct net_device *dev,
9280                               struct ethtool_drvinfo *info)
9281 {
9282         struct bnx2x *bp = netdev_priv(dev);
9283         u8 phy_fw_ver[PHY_FW_VER_LEN];
9284
9285         strcpy(info->driver, DRV_MODULE_NAME);
9286         strcpy(info->version, DRV_MODULE_VERSION);
9287
9288         phy_fw_ver[0] = '\0';
9289         if (bp->port.pmf) {
9290                 bnx2x_acquire_phy_lock(bp);
9291                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9292                                              (bp->state != BNX2X_STATE_CLOSED),
9293                                              phy_fw_ver, PHY_FW_VER_LEN);
9294                 bnx2x_release_phy_lock(bp);
9295         }
9296
9297         snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9298                  (bp->common.bc_ver & 0xff0000) >> 16,
9299                  (bp->common.bc_ver & 0xff00) >> 8,
9300                  (bp->common.bc_ver & 0xff),
9301                  ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9302         strcpy(info->bus_info, pci_name(bp->pdev));
9303         info->n_stats = BNX2X_NUM_STATS;
9304         info->testinfo_len = BNX2X_NUM_TESTS;
9305         info->eedump_len = bp->common.flash_size;
9306         info->regdump_len = bnx2x_get_regs_len(dev);
9307 }
9308
9309 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9310 {
9311         struct bnx2x *bp = netdev_priv(dev);
9312
9313         if (bp->flags & NO_WOL_FLAG) {
9314                 wol->supported = 0;
9315                 wol->wolopts = 0;
9316         } else {
9317                 wol->supported = WAKE_MAGIC;
9318                 if (bp->wol)
9319                         wol->wolopts = WAKE_MAGIC;
9320                 else
9321                         wol->wolopts = 0;
9322         }
9323         memset(&wol->sopass, 0, sizeof(wol->sopass));
9324 }
9325
9326 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9327 {
9328         struct bnx2x *bp = netdev_priv(dev);
9329
9330         if (wol->wolopts & ~WAKE_MAGIC)
9331                 return -EINVAL;
9332
9333         if (wol->wolopts & WAKE_MAGIC) {
9334                 if (bp->flags & NO_WOL_FLAG)
9335                         return -EINVAL;
9336
9337                 bp->wol = 1;
9338         } else
9339                 bp->wol = 0;
9340
9341         return 0;
9342 }
9343
9344 static u32 bnx2x_get_msglevel(struct net_device *dev)
9345 {
9346         struct bnx2x *bp = netdev_priv(dev);
9347
9348         return bp->msglevel;
9349 }
9350
9351 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9352 {
9353         struct bnx2x *bp = netdev_priv(dev);
9354
9355         if (capable(CAP_NET_ADMIN))
9356                 bp->msglevel = level;
9357 }
9358
9359 static int bnx2x_nway_reset(struct net_device *dev)
9360 {
9361         struct bnx2x *bp = netdev_priv(dev);
9362
9363         if (!bp->port.pmf)
9364                 return 0;
9365
9366         if (netif_running(dev)) {
9367                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9368                 bnx2x_link_set(bp);
9369         }
9370
9371         return 0;
9372 }
9373
9374 static u32 bnx2x_get_link(struct net_device *dev)
9375 {
9376         struct bnx2x *bp = netdev_priv(dev);
9377
9378         if (bp->flags & MF_FUNC_DIS)
9379                 return 0;
9380
9381         return bp->link_vars.link_up;
9382 }
9383
9384 static int bnx2x_get_eeprom_len(struct net_device *dev)
9385 {
9386         struct bnx2x *bp = netdev_priv(dev);
9387
9388         return bp->common.flash_size;
9389 }
9390
9391 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9392 {
9393         int port = BP_PORT(bp);
9394         int count, i;
9395         u32 val = 0;
9396
9397         /* adjust timeout for emulation/FPGA */
9398         count = NVRAM_TIMEOUT_COUNT;
9399         if (CHIP_REV_IS_SLOW(bp))
9400                 count *= 100;
9401
9402         /* request access to nvram interface */
9403         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9404                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9405
9406         for (i = 0; i < count*10; i++) {
9407                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9408                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9409                         break;
9410
9411                 udelay(5);
9412         }
9413
9414         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
9415                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
9416                 return -EBUSY;
9417         }
9418
9419         return 0;
9420 }
9421
9422 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9423 {
9424         int port = BP_PORT(bp);
9425         int count, i;
9426         u32 val = 0;
9427
9428         /* adjust timeout for emulation/FPGA */
9429         count = NVRAM_TIMEOUT_COUNT;
9430         if (CHIP_REV_IS_SLOW(bp))
9431                 count *= 100;
9432
9433         /* relinquish nvram interface */
9434         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9435                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9436
9437         for (i = 0; i < count*10; i++) {
9438                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9439                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9440                         break;
9441
9442                 udelay(5);
9443         }
9444
9445         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
9446                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
9447                 return -EBUSY;
9448         }
9449
9450         return 0;
9451 }
9452
9453 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9454 {
9455         u32 val;
9456
9457         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9458
9459         /* enable both bits, even on read */
9460         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9461                (val | MCPR_NVM_ACCESS_ENABLE_EN |
9462                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
9463 }
9464
9465 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9466 {
9467         u32 val;
9468
9469         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9470
9471         /* disable both bits, even after read */
9472         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9473                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9474                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9475 }
9476
9477 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
9478                                   u32 cmd_flags)
9479 {
9480         int count, i, rc;
9481         u32 val;
9482
9483         /* build the command word */
9484         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9485
9486         /* need to clear DONE bit separately */
9487         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9488
9489         /* address of the NVRAM to read from */
9490         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9491                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9492
9493         /* issue a read command */
9494         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9495
9496         /* adjust timeout for emulation/FPGA */
9497         count = NVRAM_TIMEOUT_COUNT;
9498         if (CHIP_REV_IS_SLOW(bp))
9499                 count *= 100;
9500
9501         /* wait for completion */
9502         *ret_val = 0;
9503         rc = -EBUSY;
9504         for (i = 0; i < count; i++) {
9505                 udelay(5);
9506                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9507
9508                 if (val & MCPR_NVM_COMMAND_DONE) {
9509                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
9510                         /* we read nvram data in cpu order
9511                          * but ethtool sees it as an array of bytes
9512                          * converting to big-endian will do the work */
9513                         *ret_val = cpu_to_be32(val);
9514                         rc = 0;
9515                         break;
9516                 }
9517         }
9518
9519         return rc;
9520 }
9521
9522 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9523                             int buf_size)
9524 {
9525         int rc;
9526         u32 cmd_flags;
9527         __be32 val;
9528
9529         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9530                 DP(BNX2X_MSG_NVM,
9531                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
9532                    offset, buf_size);
9533                 return -EINVAL;
9534         }
9535
9536         if (offset + buf_size > bp->common.flash_size) {
9537                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9538                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9539                    offset, buf_size, bp->common.flash_size);
9540                 return -EINVAL;
9541         }
9542
9543         /* request access to nvram interface */
9544         rc = bnx2x_acquire_nvram_lock(bp);
9545         if (rc)
9546                 return rc;
9547
9548         /* enable access to nvram interface */
9549         bnx2x_enable_nvram_access(bp);
9550
9551         /* read the first word(s) */
9552         cmd_flags = MCPR_NVM_COMMAND_FIRST;
9553         while ((buf_size > sizeof(u32)) && (rc == 0)) {
9554                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9555                 memcpy(ret_buf, &val, 4);
9556
9557                 /* advance to the next dword */
9558                 offset += sizeof(u32);
9559                 ret_buf += sizeof(u32);
9560                 buf_size -= sizeof(u32);
9561                 cmd_flags = 0;
9562         }
9563
9564         if (rc == 0) {
9565                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9566                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9567                 memcpy(ret_buf, &val, 4);
9568         }
9569
9570         /* disable access to nvram interface */
9571         bnx2x_disable_nvram_access(bp);
9572         bnx2x_release_nvram_lock(bp);
9573
9574         return rc;
9575 }
9576
9577 static int bnx2x_get_eeprom(struct net_device *dev,
9578                             struct ethtool_eeprom *eeprom, u8 *eebuf)
9579 {
9580         struct bnx2x *bp = netdev_priv(dev);
9581         int rc;
9582
9583         if (!netif_running(dev))
9584                 return -EAGAIN;
9585
9586         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9587            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
9588            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9589            eeprom->len, eeprom->len);
9590
9591         /* parameters already validated in ethtool_get_eeprom */
9592
9593         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9594
9595         return rc;
9596 }
9597
9598 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9599                                    u32 cmd_flags)
9600 {
9601         int count, i, rc;
9602
9603         /* build the command word */
9604         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9605
9606         /* need to clear DONE bit separately */
9607         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9608
9609         /* write the data */
9610         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9611
9612         /* address of the NVRAM to write to */
9613         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9614                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9615
9616         /* issue the write command */
9617         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9618
9619         /* adjust timeout for emulation/FPGA */
9620         count = NVRAM_TIMEOUT_COUNT;
9621         if (CHIP_REV_IS_SLOW(bp))
9622                 count *= 100;
9623
9624         /* wait for completion */
9625         rc = -EBUSY;
9626         for (i = 0; i < count; i++) {
9627                 udelay(5);
9628                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9629                 if (val & MCPR_NVM_COMMAND_DONE) {
9630                         rc = 0;
9631                         break;
9632                 }
9633         }
9634
9635         return rc;
9636 }
9637
9638 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
9639
9640 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9641                               int buf_size)
9642 {
9643         int rc;
9644         u32 cmd_flags;
9645         u32 align_offset;
9646         __be32 val;
9647
9648         if (offset + buf_size > bp->common.flash_size) {
9649                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9650                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9651                    offset, buf_size, bp->common.flash_size);
9652                 return -EINVAL;
9653         }
9654
9655         /* request access to nvram interface */
9656         rc = bnx2x_acquire_nvram_lock(bp);
9657         if (rc)
9658                 return rc;
9659
9660         /* enable access to nvram interface */
9661         bnx2x_enable_nvram_access(bp);
9662
9663         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9664         align_offset = (offset & ~0x03);
9665         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9666
9667         if (rc == 0) {
9668                 val &= ~(0xff << BYTE_OFFSET(offset));
9669                 val |= (*data_buf << BYTE_OFFSET(offset));
9670
9671                 /* nvram data is returned as an array of bytes
9672                  * convert it back to cpu order */
9673                 val = be32_to_cpu(val);
9674
9675                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9676                                              cmd_flags);
9677         }
9678
9679         /* disable access to nvram interface */
9680         bnx2x_disable_nvram_access(bp);
9681         bnx2x_release_nvram_lock(bp);
9682
9683         return rc;
9684 }
9685
9686 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9687                              int buf_size)
9688 {
9689         int rc;
9690         u32 cmd_flags;
9691         u32 val;
9692         u32 written_so_far;
9693
9694         if (buf_size == 1)      /* ethtool */
9695                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
9696
9697         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9698                 DP(BNX2X_MSG_NVM,
9699                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
9700                    offset, buf_size);
9701                 return -EINVAL;
9702         }
9703
9704         if (offset + buf_size > bp->common.flash_size) {
9705                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9706                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9707                    offset, buf_size, bp->common.flash_size);
9708                 return -EINVAL;
9709         }
9710
9711         /* request access to nvram interface */
9712         rc = bnx2x_acquire_nvram_lock(bp);
9713         if (rc)
9714                 return rc;
9715
9716         /* enable access to nvram interface */
9717         bnx2x_enable_nvram_access(bp);
9718
9719         written_so_far = 0;
9720         cmd_flags = MCPR_NVM_COMMAND_FIRST;
9721         while ((written_so_far < buf_size) && (rc == 0)) {
9722                 if (written_so_far == (buf_size - sizeof(u32)))
9723                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
9724                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9725                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
9726                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9727                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9728
9729                 memcpy(&val, data_buf, 4);
9730
9731                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9732
9733                 /* advance to the next dword */
9734                 offset += sizeof(u32);
9735                 data_buf += sizeof(u32);
9736                 written_so_far += sizeof(u32);
9737                 cmd_flags = 0;
9738         }
9739
9740         /* disable access to nvram interface */
9741         bnx2x_disable_nvram_access(bp);
9742         bnx2x_release_nvram_lock(bp);
9743
9744         return rc;
9745 }
9746
9747 static int bnx2x_set_eeprom(struct net_device *dev,
9748                             struct ethtool_eeprom *eeprom, u8 *eebuf)
9749 {
9750         struct bnx2x *bp = netdev_priv(dev);
9751         int port = BP_PORT(bp);
9752         int rc = 0;
9753
9754         if (!netif_running(dev))
9755                 return -EAGAIN;
9756
9757         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9758            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
9759            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9760            eeprom->len, eeprom->len);
9761
9762         /* parameters already validated in ethtool_set_eeprom */
9763
9764         /* PHY eeprom can be accessed only by the PMF */
9765         if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9766             !bp->port.pmf)
9767                 return -EINVAL;
9768
9769         if (eeprom->magic == 0x50485950) {
9770                 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9771                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9772
9773                 bnx2x_acquire_phy_lock(bp);
9774                 rc |= bnx2x_link_reset(&bp->link_params,
9775                                        &bp->link_vars, 0);
9776                 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9777                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9778                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9779                                        MISC_REGISTERS_GPIO_HIGH, port);
9780                 bnx2x_release_phy_lock(bp);
9781                 bnx2x_link_report(bp);
9782
9783         } else if (eeprom->magic == 0x50485952) {
9784                 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
9785                 if (bp->state == BNX2X_STATE_OPEN) {
9786                         bnx2x_acquire_phy_lock(bp);
9787                         rc |= bnx2x_link_reset(&bp->link_params,
9788                                                &bp->link_vars, 1);
9789
9790                         rc |= bnx2x_phy_init(&bp->link_params,
9791                                              &bp->link_vars);
9792                         bnx2x_release_phy_lock(bp);
9793                         bnx2x_calc_fc_adv(bp);
9794                 }
9795         } else if (eeprom->magic == 0x53985943) {
9796                 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9797                 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9798                                        PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9799                         u8 ext_phy_addr =
9800                              XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
9801
9802                         /* DSP Remove Download Mode */
9803                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9804                                        MISC_REGISTERS_GPIO_LOW, port);
9805
9806                         bnx2x_acquire_phy_lock(bp);
9807
9808                         bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9809
9810                         /* wait 0.5 sec to allow it to run */
9811                         msleep(500);
9812                         bnx2x_ext_phy_hw_reset(bp, port);
9813                         msleep(500);
9814                         bnx2x_release_phy_lock(bp);
9815                 }
9816         } else
9817                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
9818
9819         return rc;
9820 }
9821
9822 static int bnx2x_get_coalesce(struct net_device *dev,
9823                               struct ethtool_coalesce *coal)
9824 {
9825         struct bnx2x *bp = netdev_priv(dev);
9826
9827         memset(coal, 0, sizeof(struct ethtool_coalesce));
9828
9829         coal->rx_coalesce_usecs = bp->rx_ticks;
9830         coal->tx_coalesce_usecs = bp->tx_ticks;
9831
9832         return 0;
9833 }
9834
9835 #define BNX2X_MAX_COALES_TOUT  (0xf0*12) /* Maximal coalescing timeout in us */
9836 static int bnx2x_set_coalesce(struct net_device *dev,
9837                               struct ethtool_coalesce *coal)
9838 {
9839         struct bnx2x *bp = netdev_priv(dev);
9840
9841         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
9842         if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9843                 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
9844
9845         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
9846         if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9847                 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
9848
9849         if (netif_running(dev))
9850                 bnx2x_update_coalesce(bp);
9851
9852         return 0;
9853 }
9854
9855 static void bnx2x_get_ringparam(struct net_device *dev,
9856                                 struct ethtool_ringparam *ering)
9857 {
9858         struct bnx2x *bp = netdev_priv(dev);
9859
9860         ering->rx_max_pending = MAX_RX_AVAIL;
9861         ering->rx_mini_max_pending = 0;
9862         ering->rx_jumbo_max_pending = 0;
9863
9864         ering->rx_pending = bp->rx_ring_size;
9865         ering->rx_mini_pending = 0;
9866         ering->rx_jumbo_pending = 0;
9867
9868         ering->tx_max_pending = MAX_TX_AVAIL;
9869         ering->tx_pending = bp->tx_ring_size;
9870 }
9871
9872 static int bnx2x_set_ringparam(struct net_device *dev,
9873                                struct ethtool_ringparam *ering)
9874 {
9875         struct bnx2x *bp = netdev_priv(dev);
9876         int rc = 0;
9877
9878         if ((ering->rx_pending > MAX_RX_AVAIL) ||
9879             (ering->tx_pending > MAX_TX_AVAIL) ||
9880             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9881                 return -EINVAL;
9882
9883         bp->rx_ring_size = ering->rx_pending;
9884         bp->tx_ring_size = ering->tx_pending;
9885
9886         if (netif_running(dev)) {
9887                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9888                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9889         }
9890
9891         return rc;
9892 }
9893
9894 static void bnx2x_get_pauseparam(struct net_device *dev,
9895                                  struct ethtool_pauseparam *epause)
9896 {
9897         struct bnx2x *bp = netdev_priv(dev);
9898
9899         epause->autoneg = (bp->link_params.req_flow_ctrl ==
9900                            BNX2X_FLOW_CTRL_AUTO) &&
9901                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9902
9903         epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9904                             BNX2X_FLOW_CTRL_RX);
9905         epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9906                             BNX2X_FLOW_CTRL_TX);
9907
9908         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9909            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9910            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9911 }
9912
9913 static int bnx2x_set_pauseparam(struct net_device *dev,
9914                                 struct ethtool_pauseparam *epause)
9915 {
9916         struct bnx2x *bp = netdev_priv(dev);
9917
9918         if (IS_E1HMF(bp))
9919                 return 0;
9920
9921         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9922            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9923            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9924
9925         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9926
9927         if (epause->rx_pause)
9928                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9929
9930         if (epause->tx_pause)
9931                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9932
9933         if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9934                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9935
9936         if (epause->autoneg) {
9937                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9938                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
9939                         return -EINVAL;
9940                 }
9941
9942                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9943                         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9944         }
9945
9946         DP(NETIF_MSG_LINK,
9947            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9948
9949         if (netif_running(dev)) {
9950                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9951                 bnx2x_link_set(bp);
9952         }
9953
9954         return 0;
9955 }
9956
9957 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9958 {
9959         struct bnx2x *bp = netdev_priv(dev);
9960         int changed = 0;
9961         int rc = 0;
9962
9963         /* TPA requires Rx CSUM offloading */
9964         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9965                 if (!(dev->features & NETIF_F_LRO)) {
9966                         dev->features |= NETIF_F_LRO;
9967                         bp->flags |= TPA_ENABLE_FLAG;
9968                         changed = 1;
9969                 }
9970
9971         } else if (dev->features & NETIF_F_LRO) {
9972                 dev->features &= ~NETIF_F_LRO;
9973                 bp->flags &= ~TPA_ENABLE_FLAG;
9974                 changed = 1;
9975         }
9976
9977         if (changed && netif_running(dev)) {
9978                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9979                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9980         }
9981
9982         return rc;
9983 }
9984
9985 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9986 {
9987         struct bnx2x *bp = netdev_priv(dev);
9988
9989         return bp->rx_csum;
9990 }
9991
9992 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9993 {
9994         struct bnx2x *bp = netdev_priv(dev);
9995         int rc = 0;
9996
9997         bp->rx_csum = data;
9998
9999         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
10000            TPA'ed packets will be discarded due to wrong TCP CSUM */
10001         if (!data) {
10002                 u32 flags = ethtool_op_get_flags(dev);
10003
10004                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
10005         }
10006
10007         return rc;
10008 }
10009
10010 static int bnx2x_set_tso(struct net_device *dev, u32 data)
10011 {
10012         if (data) {
10013                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10014                 dev->features |= NETIF_F_TSO6;
10015         } else {
10016                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
10017                 dev->features &= ~NETIF_F_TSO6;
10018         }
10019
10020         return 0;
10021 }
10022
10023 static const struct {
10024         char string[ETH_GSTRING_LEN];
10025 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
10026         { "register_test (offline)" },
10027         { "memory_test (offline)" },
10028         { "loopback_test (offline)" },
10029         { "nvram_test (online)" },
10030         { "interrupt_test (online)" },
10031         { "link_test (online)" },
10032         { "idle check (online)" }
10033 };
10034
10035 static int bnx2x_test_registers(struct bnx2x *bp)
10036 {
10037         int idx, i, rc = -ENODEV;
10038         u32 wr_val = 0;
10039         int port = BP_PORT(bp);
10040         static const struct {
10041                 u32  offset0;
10042                 u32  offset1;
10043                 u32  mask;
10044         } reg_tbl[] = {
10045 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
10046                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
10047                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
10048                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
10049                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
10050                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
10051                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
10052                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
10053                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
10054                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
10055 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
10056                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
10057                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
10058                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
10059                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
10060                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
10061                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
10062                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
10063                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
10064                 { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
10065 /* 20 */        { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
10066                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
10067                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
10068                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
10069                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
10070                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
10071                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
10072                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
10073                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
10074                 { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
10075 /* 30 */        { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
10076                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
10077                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
10078                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
10079                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
10080                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
10081                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
10082
10083                 { 0xffffffff, 0, 0x00000000 }
10084         };
10085
10086         if (!netif_running(bp->dev))
10087                 return rc;
10088
10089         /* Repeat the test twice:
10090            First by writing 0x00000000, second by writing 0xffffffff */
10091         for (idx = 0; idx < 2; idx++) {
10092
10093                 switch (idx) {
10094                 case 0:
10095                         wr_val = 0;
10096                         break;
10097                 case 1:
10098                         wr_val = 0xffffffff;
10099                         break;
10100                 }
10101
10102                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
10103                         u32 offset, mask, save_val, val;
10104
10105                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
10106                         mask = reg_tbl[i].mask;
10107
10108                         save_val = REG_RD(bp, offset);
10109
10110                         REG_WR(bp, offset, wr_val);
10111                         val = REG_RD(bp, offset);
10112
10113                         /* Restore the original register's value */
10114                         REG_WR(bp, offset, save_val);
10115
10116                         /* verify that value is as expected value */
10117                         if ((val & mask) != (wr_val & mask))
10118                                 goto test_reg_exit;
10119                 }
10120         }
10121
10122         rc = 0;
10123
10124 test_reg_exit:
10125         return rc;
10126 }
10127
10128 static int bnx2x_test_memory(struct bnx2x *bp)
10129 {
10130         int i, j, rc = -ENODEV;
10131         u32 val;
10132         static const struct {
10133                 u32 offset;
10134                 int size;
10135         } mem_tbl[] = {
10136                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
10137                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
10138                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
10139                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
10140                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
10141                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
10142                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
10143
10144                 { 0xffffffff, 0 }
10145         };
10146         static const struct {
10147                 char *name;
10148                 u32 offset;
10149                 u32 e1_mask;
10150                 u32 e1h_mask;
10151         } prty_tbl[] = {
10152                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
10153                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
10154                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
10155                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
10156                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
10157                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
10158
10159                 { NULL, 0xffffffff, 0, 0 }
10160         };
10161
10162         if (!netif_running(bp->dev))
10163                 return rc;
10164
10165         /* Go through all the memories */
10166         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
10167                 for (j = 0; j < mem_tbl[i].size; j++)
10168                         REG_RD(bp, mem_tbl[i].offset + j*4);
10169
10170         /* Check the parity status */
10171         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
10172                 val = REG_RD(bp, prty_tbl[i].offset);
10173                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
10174                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
10175                         DP(NETIF_MSG_HW,
10176                            "%s is 0x%x\n", prty_tbl[i].name, val);
10177                         goto test_mem_exit;
10178                 }
10179         }
10180
10181         rc = 0;
10182
10183 test_mem_exit:
10184         return rc;
10185 }
10186
10187 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
10188 {
10189         int cnt = 1000;
10190
10191         if (link_up)
10192                 while (bnx2x_link_test(bp) && cnt--)
10193                         msleep(10);
10194 }
10195
10196 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10197 {
10198         unsigned int pkt_size, num_pkts, i;
10199         struct sk_buff *skb;
10200         unsigned char *packet;
10201         struct bnx2x_fastpath *fp_rx = &bp->fp[0];
10202         struct bnx2x_fastpath *fp_tx = &bp->fp[0];
10203         u16 tx_start_idx, tx_idx;
10204         u16 rx_start_idx, rx_idx;
10205         u16 pkt_prod, bd_prod;
10206         struct sw_tx_bd *tx_buf;
10207         struct eth_tx_start_bd *tx_start_bd;
10208         struct eth_tx_parse_bd *pbd = NULL;
10209         dma_addr_t mapping;
10210         union eth_rx_cqe *cqe;
10211         u8 cqe_fp_flags;
10212         struct sw_rx_bd *rx_buf;
10213         u16 len;
10214         int rc = -ENODEV;
10215
10216         /* check the loopback mode */
10217         switch (loopback_mode) {
10218         case BNX2X_PHY_LOOPBACK:
10219                 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
10220                         return -EINVAL;
10221                 break;
10222         case BNX2X_MAC_LOOPBACK:
10223                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
10224                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
10225                 break;
10226         default:
10227                 return -EINVAL;
10228         }
10229
10230         /* prepare the loopback packet */
10231         pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
10232                      bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
10233         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
10234         if (!skb) {
10235                 rc = -ENOMEM;
10236                 goto test_loopback_exit;
10237         }
10238         packet = skb_put(skb, pkt_size);
10239         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
10240         memset(packet + ETH_ALEN, 0, ETH_ALEN);
10241         memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
10242         for (i = ETH_HLEN; i < pkt_size; i++)
10243                 packet[i] = (unsigned char) (i & 0xff);
10244
10245         /* send the loopback packet */
10246         num_pkts = 0;
10247         tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10248         rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10249
10250         pkt_prod = fp_tx->tx_pkt_prod++;
10251         tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10252         tx_buf->first_bd = fp_tx->tx_bd_prod;
10253         tx_buf->skb = skb;
10254         tx_buf->flags = 0;
10255
10256         bd_prod = TX_BD(fp_tx->tx_bd_prod);
10257         tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
10258         mapping = pci_map_single(bp->pdev, skb->data,
10259                                  skb_headlen(skb), PCI_DMA_TODEVICE);
10260         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10261         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10262         tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10263         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10264         tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10265         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10266         tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10267                                 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10268
10269         /* turn on parsing and get a BD */
10270         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10271         pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10272
10273         memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10274
10275         wmb();
10276
10277         fp_tx->tx_db.data.prod += 2;
10278         barrier();
10279         DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
10280
10281         mmiowb();
10282
10283         num_pkts++;
10284         fp_tx->tx_bd_prod += 2; /* start + pbd */
10285
10286         udelay(100);
10287
10288         tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10289         if (tx_idx != tx_start_idx + num_pkts)
10290                 goto test_loopback_exit;
10291
10292         rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10293         if (rx_idx != rx_start_idx + num_pkts)
10294                 goto test_loopback_exit;
10295
10296         cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
10297         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10298         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10299                 goto test_loopback_rx_exit;
10300
10301         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10302         if (len != pkt_size)
10303                 goto test_loopback_rx_exit;
10304
10305         rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
10306         skb = rx_buf->skb;
10307         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10308         for (i = ETH_HLEN; i < pkt_size; i++)
10309                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10310                         goto test_loopback_rx_exit;
10311
10312         rc = 0;
10313
10314 test_loopback_rx_exit:
10315
10316         fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10317         fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10318         fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10319         fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
10320
10321         /* Update producers */
10322         bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10323                              fp_rx->rx_sge_prod);
10324
10325 test_loopback_exit:
10326         bp->link_params.loopback_mode = LOOPBACK_NONE;
10327
10328         return rc;
10329 }
10330
10331 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10332 {
10333         int rc = 0, res;
10334
10335         if (!netif_running(bp->dev))
10336                 return BNX2X_LOOPBACK_FAILED;
10337
10338         bnx2x_netif_stop(bp, 1);
10339         bnx2x_acquire_phy_lock(bp);
10340
10341         res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10342         if (res) {
10343                 DP(NETIF_MSG_PROBE, "  PHY loopback failed  (res %d)\n", res);
10344                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
10345         }
10346
10347         res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10348         if (res) {
10349                 DP(NETIF_MSG_PROBE, "  MAC loopback failed  (res %d)\n", res);
10350                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
10351         }
10352
10353         bnx2x_release_phy_lock(bp);
10354         bnx2x_netif_start(bp);
10355
10356         return rc;
10357 }
10358
10359 #define CRC32_RESIDUAL                  0xdebb20e3
10360
10361 static int bnx2x_test_nvram(struct bnx2x *bp)
10362 {
10363         static const struct {
10364                 int offset;
10365                 int size;
10366         } nvram_tbl[] = {
10367                 {     0,  0x14 }, /* bootstrap */
10368                 {  0x14,  0xec }, /* dir */
10369                 { 0x100, 0x350 }, /* manuf_info */
10370                 { 0x450,  0xf0 }, /* feature_info */
10371                 { 0x640,  0x64 }, /* upgrade_key_info */
10372                 { 0x6a4,  0x64 },
10373                 { 0x708,  0x70 }, /* manuf_key_info */
10374                 { 0x778,  0x70 },
10375                 {     0,     0 }
10376         };
10377         __be32 buf[0x350 / 4];
10378         u8 *data = (u8 *)buf;
10379         int i, rc;
10380         u32 magic, crc;
10381
10382         rc = bnx2x_nvram_read(bp, 0, data, 4);
10383         if (rc) {
10384                 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
10385                 goto test_nvram_exit;
10386         }
10387
10388         magic = be32_to_cpu(buf[0]);
10389         if (magic != 0x669955aa) {
10390                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10391                 rc = -ENODEV;
10392                 goto test_nvram_exit;
10393         }
10394
10395         for (i = 0; nvram_tbl[i].size; i++) {
10396
10397                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10398                                       nvram_tbl[i].size);
10399                 if (rc) {
10400                         DP(NETIF_MSG_PROBE,
10401                            "nvram_tbl[%d] read data (rc %d)\n", i, rc);
10402                         goto test_nvram_exit;
10403                 }
10404
10405                 crc = ether_crc_le(nvram_tbl[i].size, data);
10406                 if (crc != CRC32_RESIDUAL) {
10407                         DP(NETIF_MSG_PROBE,
10408                            "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
10409                         rc = -ENODEV;
10410                         goto test_nvram_exit;
10411                 }
10412         }
10413
10414 test_nvram_exit:
10415         return rc;
10416 }
10417
10418 static int bnx2x_test_intr(struct bnx2x *bp)
10419 {
10420         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10421         int i, rc;
10422
10423         if (!netif_running(bp->dev))
10424                 return -ENODEV;
10425
10426         config->hdr.length = 0;
10427         if (CHIP_IS_E1(bp))
10428                 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10429         else
10430                 config->hdr.offset = BP_FUNC(bp);
10431         config->hdr.client_id = bp->fp->cl_id;
10432         config->hdr.reserved1 = 0;
10433
10434         bp->set_mac_pending++;
10435         smp_wmb();
10436         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10437                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10438                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10439         if (rc == 0) {
10440                 for (i = 0; i < 10; i++) {
10441                         if (!bp->set_mac_pending)
10442                                 break;
10443                         smp_rmb();
10444                         msleep_interruptible(10);
10445                 }
10446                 if (i == 10)
10447                         rc = -ENODEV;
10448         }
10449
10450         return rc;
10451 }
10452
10453 static void bnx2x_self_test(struct net_device *dev,
10454                             struct ethtool_test *etest, u64 *buf)
10455 {
10456         struct bnx2x *bp = netdev_priv(dev);
10457
10458         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10459
10460         if (!netif_running(dev))
10461                 return;
10462
10463         /* offline tests are not supported in MF mode */
10464         if (IS_E1HMF(bp))
10465                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10466
10467         if (etest->flags & ETH_TEST_FL_OFFLINE) {
10468                 int port = BP_PORT(bp);
10469                 u32 val;
10470                 u8 link_up;
10471
10472                 /* save current value of input enable for TX port IF */
10473                 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10474                 /* disable input for TX port IF */
10475                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10476
10477                 link_up = (bnx2x_link_test(bp) == 0);
10478                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10479                 bnx2x_nic_load(bp, LOAD_DIAG);
10480                 /* wait until link state is restored */
10481                 bnx2x_wait_for_link(bp, link_up);
10482
10483                 if (bnx2x_test_registers(bp) != 0) {
10484                         buf[0] = 1;
10485                         etest->flags |= ETH_TEST_FL_FAILED;
10486                 }
10487                 if (bnx2x_test_memory(bp) != 0) {
10488                         buf[1] = 1;
10489                         etest->flags |= ETH_TEST_FL_FAILED;
10490                 }
10491                 buf[2] = bnx2x_test_loopback(bp, link_up);
10492                 if (buf[2] != 0)
10493                         etest->flags |= ETH_TEST_FL_FAILED;
10494
10495                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10496
10497                 /* restore input for TX port IF */
10498                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10499
10500                 bnx2x_nic_load(bp, LOAD_NORMAL);
10501                 /* wait until link state is restored */
10502                 bnx2x_wait_for_link(bp, link_up);
10503         }
10504         if (bnx2x_test_nvram(bp) != 0) {
10505                 buf[3] = 1;
10506                 etest->flags |= ETH_TEST_FL_FAILED;
10507         }
10508         if (bnx2x_test_intr(bp) != 0) {
10509                 buf[4] = 1;
10510                 etest->flags |= ETH_TEST_FL_FAILED;
10511         }
10512         if (bp->port.pmf)
10513                 if (bnx2x_link_test(bp) != 0) {
10514                         buf[5] = 1;
10515                         etest->flags |= ETH_TEST_FL_FAILED;
10516                 }
10517
10518 #ifdef BNX2X_EXTRA_DEBUG
10519         bnx2x_panic_dump(bp);
10520 #endif
10521 }
10522
10523 static const struct {
10524         long offset;
10525         int size;
10526         u8 string[ETH_GSTRING_LEN];
10527 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10528 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10529         { Q_STATS_OFFSET32(error_bytes_received_hi),
10530                                                 8, "[%d]: rx_error_bytes" },
10531         { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10532                                                 8, "[%d]: rx_ucast_packets" },
10533         { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10534                                                 8, "[%d]: rx_mcast_packets" },
10535         { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10536                                                 8, "[%d]: rx_bcast_packets" },
10537         { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10538         { Q_STATS_OFFSET32(rx_err_discard_pkt),
10539                                          4, "[%d]: rx_phy_ip_err_discards"},
10540         { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10541                                          4, "[%d]: rx_skb_alloc_discard" },
10542         { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10543
10544 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10545         { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10546                                                         8, "[%d]: tx_packets" }
10547 };
10548
10549 static const struct {
10550         long offset;
10551         int size;
10552         u32 flags;
10553 #define STATS_FLAGS_PORT                1
10554 #define STATS_FLAGS_FUNC                2
10555 #define STATS_FLAGS_BOTH                (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
10556         u8 string[ETH_GSTRING_LEN];
10557 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
10558 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10559                                 8, STATS_FLAGS_BOTH, "rx_bytes" },
10560         { STATS_OFFSET32(error_bytes_received_hi),
10561                                 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
10562         { STATS_OFFSET32(total_unicast_packets_received_hi),
10563                                 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
10564         { STATS_OFFSET32(total_multicast_packets_received_hi),
10565                                 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
10566         { STATS_OFFSET32(total_broadcast_packets_received_hi),
10567                                 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
10568         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
10569                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
10570         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
10571                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
10572         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10573                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10574         { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10575                                 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10576 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10577                                 8, STATS_FLAGS_PORT, "rx_fragments" },
10578         { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10579                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
10580         { STATS_OFFSET32(no_buff_discard_hi),
10581                                 8, STATS_FLAGS_BOTH, "rx_discards" },
10582         { STATS_OFFSET32(mac_filter_discard),
10583                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10584         { STATS_OFFSET32(xxoverflow_discard),
10585                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10586         { STATS_OFFSET32(brb_drop_hi),
10587                                 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10588         { STATS_OFFSET32(brb_truncate_hi),
10589                                 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10590         { STATS_OFFSET32(pause_frames_received_hi),
10591                                 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10592         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10593                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10594         { STATS_OFFSET32(nig_timer_max),
10595                         4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10596 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10597                                 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10598         { STATS_OFFSET32(rx_skb_alloc_failed),
10599                                 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10600         { STATS_OFFSET32(hw_csum_err),
10601                                 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10602
10603         { STATS_OFFSET32(total_bytes_transmitted_hi),
10604                                 8, STATS_FLAGS_BOTH, "tx_bytes" },
10605         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10606                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10607         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10608                                 8, STATS_FLAGS_BOTH, "tx_packets" },
10609         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10610                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10611         { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10612                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
10613         { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
10614                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
10615         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
10616                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
10617 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
10618                                 8, STATS_FLAGS_PORT, "tx_deferred" },
10619         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
10620                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
10621         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
10622                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
10623         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
10624                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
10625         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
10626                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
10627         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
10628                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
10629         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
10630                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
10631         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
10632                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
10633         { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
10634                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
10635         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
10636                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
10637 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
10638                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
10639         { STATS_OFFSET32(pause_frames_sent_hi),
10640                                 8, STATS_FLAGS_PORT, "tx_pause_frames" }
10641 };
10642
10643 #define IS_PORT_STAT(i) \
10644         ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10645 #define IS_FUNC_STAT(i)         (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10646 #define IS_E1HMF_MODE_STAT(bp) \
10647                         (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
10648
10649 static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10650 {
10651         struct bnx2x *bp = netdev_priv(dev);
10652         int i, num_stats;
10653
10654         switch(stringset) {
10655         case ETH_SS_STATS:
10656                 if (is_multi(bp)) {
10657                         num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
10658                         if (!IS_E1HMF_MODE_STAT(bp))
10659                                 num_stats += BNX2X_NUM_STATS;
10660                 } else {
10661                         if (IS_E1HMF_MODE_STAT(bp)) {
10662                                 num_stats = 0;
10663                                 for (i = 0; i < BNX2X_NUM_STATS; i++)
10664                                         if (IS_FUNC_STAT(i))
10665                                                 num_stats++;
10666                         } else
10667                                 num_stats = BNX2X_NUM_STATS;
10668                 }
10669                 return num_stats;
10670
10671         case ETH_SS_TEST:
10672                 return BNX2X_NUM_TESTS;
10673
10674         default:
10675                 return -EINVAL;
10676         }
10677 }
10678
10679 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10680 {
10681         struct bnx2x *bp = netdev_priv(dev);
10682         int i, j, k;
10683
10684         switch (stringset) {
10685         case ETH_SS_STATS:
10686                 if (is_multi(bp)) {
10687                         k = 0;
10688                         for_each_queue(bp, i) {
10689                                 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10690                                         sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10691                                                 bnx2x_q_stats_arr[j].string, i);
10692                                 k += BNX2X_NUM_Q_STATS;
10693                         }
10694                         if (IS_E1HMF_MODE_STAT(bp))
10695                                 break;
10696                         for (j = 0; j < BNX2X_NUM_STATS; j++)
10697                                 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10698                                        bnx2x_stats_arr[j].string);
10699                 } else {
10700                         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10701                                 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10702                                         continue;
10703                                 strcpy(buf + j*ETH_GSTRING_LEN,
10704                                        bnx2x_stats_arr[i].string);
10705                                 j++;
10706                         }
10707                 }
10708                 break;
10709
10710         case ETH_SS_TEST:
10711                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10712                 break;
10713         }
10714 }
10715
10716 static void bnx2x_get_ethtool_stats(struct net_device *dev,
10717                                     struct ethtool_stats *stats, u64 *buf)
10718 {
10719         struct bnx2x *bp = netdev_priv(dev);
10720         u32 *hw_stats, *offset;
10721         int i, j, k;
10722
10723         if (is_multi(bp)) {
10724                 k = 0;
10725                 for_each_queue(bp, i) {
10726                         hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10727                         for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10728                                 if (bnx2x_q_stats_arr[j].size == 0) {
10729                                         /* skip this counter */
10730                                         buf[k + j] = 0;
10731                                         continue;
10732                                 }
10733                                 offset = (hw_stats +
10734                                           bnx2x_q_stats_arr[j].offset);
10735                                 if (bnx2x_q_stats_arr[j].size == 4) {
10736                                         /* 4-byte counter */
10737                                         buf[k + j] = (u64) *offset;
10738                                         continue;
10739                                 }
10740                                 /* 8-byte counter */
10741                                 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10742                         }
10743                         k += BNX2X_NUM_Q_STATS;
10744                 }
10745                 if (IS_E1HMF_MODE_STAT(bp))
10746                         return;
10747                 hw_stats = (u32 *)&bp->eth_stats;
10748                 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10749                         if (bnx2x_stats_arr[j].size == 0) {
10750                                 /* skip this counter */
10751                                 buf[k + j] = 0;
10752                                 continue;
10753                         }
10754                         offset = (hw_stats + bnx2x_stats_arr[j].offset);
10755                         if (bnx2x_stats_arr[j].size == 4) {
10756                                 /* 4-byte counter */
10757                                 buf[k + j] = (u64) *offset;
10758                                 continue;
10759                         }
10760                         /* 8-byte counter */
10761                         buf[k + j] = HILO_U64(*offset, *(offset + 1));
10762                 }
10763         } else {
10764                 hw_stats = (u32 *)&bp->eth_stats;
10765                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10766                         if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10767                                 continue;
10768                         if (bnx2x_stats_arr[i].size == 0) {
10769                                 /* skip this counter */
10770                                 buf[j] = 0;
10771                                 j++;
10772                                 continue;
10773                         }
10774                         offset = (hw_stats + bnx2x_stats_arr[i].offset);
10775                         if (bnx2x_stats_arr[i].size == 4) {
10776                                 /* 4-byte counter */
10777                                 buf[j] = (u64) *offset;
10778                                 j++;
10779                                 continue;
10780                         }
10781                         /* 8-byte counter */
10782                         buf[j] = HILO_U64(*offset, *(offset + 1));
10783                         j++;
10784                 }
10785         }
10786 }
10787
10788 static int bnx2x_phys_id(struct net_device *dev, u32 data)
10789 {
10790         struct bnx2x *bp = netdev_priv(dev);
10791         int i;
10792
10793         if (!netif_running(dev))
10794                 return 0;
10795
10796         if (!bp->port.pmf)
10797                 return 0;
10798
10799         if (data == 0)
10800                 data = 2;
10801
10802         for (i = 0; i < (data * 2); i++) {
10803                 if ((i % 2) == 0)
10804                         bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10805                                       SPEED_1000);
10806                 else
10807                         bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
10808
10809                 msleep_interruptible(500);
10810                 if (signal_pending(current))
10811                         break;
10812         }
10813
10814         if (bp->link_vars.link_up)
10815                 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10816                               bp->link_vars.line_speed);
10817
10818         return 0;
10819 }
10820
10821 static const struct ethtool_ops bnx2x_ethtool_ops = {
10822         .get_settings           = bnx2x_get_settings,
10823         .set_settings           = bnx2x_set_settings,
10824         .get_drvinfo            = bnx2x_get_drvinfo,
10825         .get_regs_len           = bnx2x_get_regs_len,
10826         .get_regs               = bnx2x_get_regs,
10827         .get_wol                = bnx2x_get_wol,
10828         .set_wol                = bnx2x_set_wol,
10829         .get_msglevel           = bnx2x_get_msglevel,
10830         .set_msglevel           = bnx2x_set_msglevel,
10831         .nway_reset             = bnx2x_nway_reset,
10832         .get_link               = bnx2x_get_link,
10833         .get_eeprom_len         = bnx2x_get_eeprom_len,
10834         .get_eeprom             = bnx2x_get_eeprom,
10835         .set_eeprom             = bnx2x_set_eeprom,
10836         .get_coalesce           = bnx2x_get_coalesce,
10837         .set_coalesce           = bnx2x_set_coalesce,
10838         .get_ringparam          = bnx2x_get_ringparam,
10839         .set_ringparam          = bnx2x_set_ringparam,
10840         .get_pauseparam         = bnx2x_get_pauseparam,
10841         .set_pauseparam         = bnx2x_set_pauseparam,
10842         .get_rx_csum            = bnx2x_get_rx_csum,
10843         .set_rx_csum            = bnx2x_set_rx_csum,
10844         .get_tx_csum            = ethtool_op_get_tx_csum,
10845         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
10846         .set_flags              = bnx2x_set_flags,
10847         .get_flags              = ethtool_op_get_flags,
10848         .get_sg                 = ethtool_op_get_sg,
10849         .set_sg                 = ethtool_op_set_sg,
10850         .get_tso                = ethtool_op_get_tso,
10851         .set_tso                = bnx2x_set_tso,
10852         .self_test              = bnx2x_self_test,
10853         .get_sset_count         = bnx2x_get_sset_count,
10854         .get_strings            = bnx2x_get_strings,
10855         .phys_id                = bnx2x_phys_id,
10856         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
10857 };
10858
10859 /* end of ethtool_ops */
10860
10861 /****************************************************************************
10862 * General service functions
10863 ****************************************************************************/
10864
10865 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10866 {
10867         u16 pmcsr;
10868
10869         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10870
10871         switch (state) {
10872         case PCI_D0:
10873                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10874                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10875                                        PCI_PM_CTRL_PME_STATUS));
10876
10877                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10878                         /* delay required during transition out of D3hot */
10879                         msleep(20);
10880                 break;
10881
10882         case PCI_D3hot:
10883                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10884                 pmcsr |= 3;
10885
10886                 if (bp->wol)
10887                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10888
10889                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10890                                       pmcsr);
10891
10892                 /* No more memory access after this point until
10893                 * device is brought back to D0.
10894                 */
10895                 break;
10896
10897         default:
10898                 return -EINVAL;
10899         }
10900         return 0;
10901 }
10902
10903 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10904 {
10905         u16 rx_cons_sb;
10906
10907         /* Tell compiler that status block fields can change */
10908         barrier();
10909         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10910         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10911                 rx_cons_sb++;
10912         return (fp->rx_comp_cons != rx_cons_sb);
10913 }
10914
10915 /*
10916  * net_device service functions
10917  */
10918
10919 static int bnx2x_poll(struct napi_struct *napi, int budget)
10920 {
10921         int work_done = 0;
10922         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10923                                                  napi);
10924         struct bnx2x *bp = fp->bp;
10925
10926         while (1) {
10927 #ifdef BNX2X_STOP_ON_ERROR
10928                 if (unlikely(bp->panic)) {
10929                         napi_complete(napi);
10930                         return 0;
10931                 }
10932 #endif
10933
10934                 if (bnx2x_has_tx_work(fp))
10935                         bnx2x_tx_int(fp);
10936
10937                 if (bnx2x_has_rx_work(fp)) {
10938                         work_done += bnx2x_rx_int(fp, budget - work_done);
10939
10940                         /* must not complete if we consumed full budget */
10941                         if (work_done >= budget)
10942                                 break;
10943                 }
10944
10945                 /* Fall out from the NAPI loop if needed */
10946                 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
10947                         bnx2x_update_fpsb_idx(fp);
10948                 /* bnx2x_has_rx_work() reads the status block, thus we need
10949                  * to ensure that status block indices have been actually read
10950                  * (bnx2x_update_fpsb_idx) prior to this check
10951                  * (bnx2x_has_rx_work) so that we won't write the "newer"
10952                  * value of the status block to IGU (if there was a DMA right
10953                  * after bnx2x_has_rx_work and if there is no rmb, the memory
10954                  * reading (bnx2x_update_fpsb_idx) may be postponed to right
10955                  * before bnx2x_ack_sb). In this case there will never be
10956                  * another interrupt until there is another update of the
10957                  * status block, while there is still unhandled work.
10958                  */
10959                         rmb();
10960
10961                         if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
10962                                 napi_complete(napi);
10963                                 /* Re-enable interrupts */
10964                                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10965                                              le16_to_cpu(fp->fp_c_idx),
10966                                              IGU_INT_NOP, 1);
10967                                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10968                                              le16_to_cpu(fp->fp_u_idx),
10969                                              IGU_INT_ENABLE, 1);
10970                                 break;
10971                         }
10972                 }
10973         }
10974
10975         return work_done;
10976 }
10977
10978
10979 /* we split the first BD into headers and data BDs
10980  * to ease the pain of our fellow microcode engineers
10981  * we use one mapping for both BDs
10982  * So far this has only been observed to happen
10983  * in Other Operating Systems(TM)
10984  */
10985 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10986                                    struct bnx2x_fastpath *fp,
10987                                    struct sw_tx_bd *tx_buf,
10988                                    struct eth_tx_start_bd **tx_bd, u16 hlen,
10989                                    u16 bd_prod, int nbd)
10990 {
10991         struct eth_tx_start_bd *h_tx_bd = *tx_bd;
10992         struct eth_tx_bd *d_tx_bd;
10993         dma_addr_t mapping;
10994         int old_len = le16_to_cpu(h_tx_bd->nbytes);
10995
10996         /* first fix first BD */
10997         h_tx_bd->nbd = cpu_to_le16(nbd);
10998         h_tx_bd->nbytes = cpu_to_le16(hlen);
10999
11000         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
11001            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
11002            h_tx_bd->addr_lo, h_tx_bd->nbd);
11003
11004         /* now get a new data BD
11005          * (after the pbd) and fill it */
11006         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11007         d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11008
11009         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
11010                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
11011
11012         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11013         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11014         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
11015
11016         /* this marks the BD as one that has no individual mapping */
11017         tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
11018
11019         DP(NETIF_MSG_TX_QUEUED,
11020            "TSO split data size is %d (%x:%x)\n",
11021            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
11022
11023         /* update tx_bd */
11024         *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
11025
11026         return bd_prod;
11027 }
11028
11029 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
11030 {
11031         if (fix > 0)
11032                 csum = (u16) ~csum_fold(csum_sub(csum,
11033                                 csum_partial(t_header - fix, fix, 0)));
11034
11035         else if (fix < 0)
11036                 csum = (u16) ~csum_fold(csum_add(csum,
11037                                 csum_partial(t_header, -fix, 0)));
11038
11039         return swab16(csum);
11040 }
11041
11042 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
11043 {
11044         u32 rc;
11045
11046         if (skb->ip_summed != CHECKSUM_PARTIAL)
11047                 rc = XMIT_PLAIN;
11048
11049         else {
11050                 if (skb->protocol == htons(ETH_P_IPV6)) {
11051                         rc = XMIT_CSUM_V6;
11052                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
11053                                 rc |= XMIT_CSUM_TCP;
11054
11055                 } else {
11056                         rc = XMIT_CSUM_V4;
11057                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
11058                                 rc |= XMIT_CSUM_TCP;
11059                 }
11060         }
11061
11062         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
11063                 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
11064
11065         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
11066                 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
11067
11068         return rc;
11069 }
11070
11071 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
11072 /* check if packet requires linearization (packet is too fragmented)
11073    no need to check fragmentation if page size > 8K (there will be no
11074    violation to FW restrictions) */
11075 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
11076                              u32 xmit_type)
11077 {
11078         int to_copy = 0;
11079         int hlen = 0;
11080         int first_bd_sz = 0;
11081
11082         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
11083         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
11084
11085                 if (xmit_type & XMIT_GSO) {
11086                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
11087                         /* Check if LSO packet needs to be copied:
11088                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
11089                         int wnd_size = MAX_FETCH_BD - 3;
11090                         /* Number of windows to check */
11091                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
11092                         int wnd_idx = 0;
11093                         int frag_idx = 0;
11094                         u32 wnd_sum = 0;
11095
11096                         /* Headers length */
11097                         hlen = (int)(skb_transport_header(skb) - skb->data) +
11098                                 tcp_hdrlen(skb);
11099
11100                         /* Amount of data (w/o headers) on linear part of SKB*/
11101                         first_bd_sz = skb_headlen(skb) - hlen;
11102
11103                         wnd_sum  = first_bd_sz;
11104
11105                         /* Calculate the first sum - it's special */
11106                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
11107                                 wnd_sum +=
11108                                         skb_shinfo(skb)->frags[frag_idx].size;
11109
11110                         /* If there was data on linear skb data - check it */
11111                         if (first_bd_sz > 0) {
11112                                 if (unlikely(wnd_sum < lso_mss)) {
11113                                         to_copy = 1;
11114                                         goto exit_lbl;
11115                                 }
11116
11117                                 wnd_sum -= first_bd_sz;
11118                         }
11119
11120                         /* Others are easier: run through the frag list and
11121                            check all windows */
11122                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
11123                                 wnd_sum +=
11124                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
11125
11126                                 if (unlikely(wnd_sum < lso_mss)) {
11127                                         to_copy = 1;
11128                                         break;
11129                                 }
11130                                 wnd_sum -=
11131                                         skb_shinfo(skb)->frags[wnd_idx].size;
11132                         }
11133                 } else {
11134                         /* in non-LSO too fragmented packet should always
11135                            be linearized */
11136                         to_copy = 1;
11137                 }
11138         }
11139
11140 exit_lbl:
11141         if (unlikely(to_copy))
11142                 DP(NETIF_MSG_TX_QUEUED,
11143                    "Linearization IS REQUIRED for %s packet. "
11144                    "num_frags %d  hlen %d  first_bd_sz %d\n",
11145                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
11146                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
11147
11148         return to_copy;
11149 }
11150 #endif
11151
11152 /* called with netif_tx_lock
11153  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
11154  * netif_wake_queue()
11155  */
11156 static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11157 {
11158         struct bnx2x *bp = netdev_priv(dev);
11159         struct bnx2x_fastpath *fp;
11160         struct netdev_queue *txq;
11161         struct sw_tx_bd *tx_buf;
11162         struct eth_tx_start_bd *tx_start_bd;
11163         struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
11164         struct eth_tx_parse_bd *pbd = NULL;
11165         u16 pkt_prod, bd_prod;
11166         int nbd, fp_index;
11167         dma_addr_t mapping;
11168         u32 xmit_type = bnx2x_xmit_type(bp, skb);
11169         int i;
11170         u8 hlen = 0;
11171         __le16 pkt_size = 0;
11172
11173 #ifdef BNX2X_STOP_ON_ERROR
11174         if (unlikely(bp->panic))
11175                 return NETDEV_TX_BUSY;
11176 #endif
11177
11178         fp_index = skb_get_queue_mapping(skb);
11179         txq = netdev_get_tx_queue(dev, fp_index);
11180
11181         fp = &bp->fp[fp_index];
11182
11183         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
11184                 fp->eth_q_stats.driver_xoff++;
11185                 netif_tx_stop_queue(txq);
11186                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
11187                 return NETDEV_TX_BUSY;
11188         }
11189
11190         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
11191            "  gso type %x  xmit_type %x\n",
11192            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
11193            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
11194
11195 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
11196         /* First, check if we need to linearize the skb (due to FW
11197            restrictions). No need to check fragmentation if page size > 8K
11198            (there will be no violation to FW restrictions) */
11199         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
11200                 /* Statistics of linearization */
11201                 bp->lin_cnt++;
11202                 if (skb_linearize(skb) != 0) {
11203                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
11204                            "silently dropping this SKB\n");
11205                         dev_kfree_skb_any(skb);
11206                         return NETDEV_TX_OK;
11207                 }
11208         }
11209 #endif
11210
11211         /*
11212         Please read carefully. First we use one BD which we mark as start,
11213         then we have a parsing info BD (used for TSO or xsum),
11214         and only then we have the rest of the TSO BDs.
11215         (don't forget to mark the last one as last,
11216         and to unmap only AFTER you write to the BD ...)
11217         And above all, all pdb sizes are in words - NOT DWORDS!
11218         */
11219
11220         pkt_prod = fp->tx_pkt_prod++;
11221         bd_prod = TX_BD(fp->tx_bd_prod);
11222
11223         /* get a tx_buf and first BD */
11224         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
11225         tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
11226
11227         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11228         tx_start_bd->general_data = (UNICAST_ADDRESS <<
11229                                      ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
11230         /* header nbd */
11231         tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
11232
11233         /* remember the first BD of the packet */
11234         tx_buf->first_bd = fp->tx_bd_prod;
11235         tx_buf->skb = skb;
11236         tx_buf->flags = 0;
11237
11238         DP(NETIF_MSG_TX_QUEUED,
11239            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
11240            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
11241
11242 #ifdef BCM_VLAN
11243         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
11244             (bp->flags & HW_VLAN_TX_FLAG)) {
11245                 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
11246                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
11247         } else
11248 #endif
11249                 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11250
11251         /* turn on parsing and get a BD */
11252         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11253         pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
11254
11255         memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
11256
11257         if (xmit_type & XMIT_CSUM) {
11258                 hlen = (skb_network_header(skb) - skb->data) / 2;
11259
11260                 /* for now NS flag is not used in Linux */
11261                 pbd->global_data =
11262                         (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11263                                  ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
11264
11265                 pbd->ip_hlen = (skb_transport_header(skb) -
11266                                 skb_network_header(skb)) / 2;
11267
11268                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
11269
11270                 pbd->total_hlen = cpu_to_le16(hlen);
11271                 hlen = hlen*2;
11272
11273                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
11274
11275                 if (xmit_type & XMIT_CSUM_V4)
11276                         tx_start_bd->bd_flags.as_bitfield |=
11277                                                 ETH_TX_BD_FLAGS_IP_CSUM;
11278                 else
11279                         tx_start_bd->bd_flags.as_bitfield |=
11280                                                 ETH_TX_BD_FLAGS_IPV6;
11281
11282                 if (xmit_type & XMIT_CSUM_TCP) {
11283                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11284
11285                 } else {
11286                         s8 fix = SKB_CS_OFF(skb); /* signed! */
11287
11288                         pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
11289
11290                         DP(NETIF_MSG_TX_QUEUED,
11291                            "hlen %d  fix %d  csum before fix %x\n",
11292                            le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
11293
11294                         /* HW bug: fixup the CSUM */
11295                         pbd->tcp_pseudo_csum =
11296                                 bnx2x_csum_fix(skb_transport_header(skb),
11297                                                SKB_CS(skb), fix);
11298
11299                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11300                            pbd->tcp_pseudo_csum);
11301                 }
11302         }
11303
11304         mapping = pci_map_single(bp->pdev, skb->data,
11305                                  skb_headlen(skb), PCI_DMA_TODEVICE);
11306
11307         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11308         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11309         nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11310         tx_start_bd->nbd = cpu_to_le16(nbd);
11311         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11312         pkt_size = tx_start_bd->nbytes;
11313
11314         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
11315            "  nbytes %d  flags %x  vlan %x\n",
11316            tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11317            le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11318            tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
11319
11320         if (xmit_type & XMIT_GSO) {
11321
11322                 DP(NETIF_MSG_TX_QUEUED,
11323                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
11324                    skb->len, hlen, skb_headlen(skb),
11325                    skb_shinfo(skb)->gso_size);
11326
11327                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
11328
11329                 if (unlikely(skb_headlen(skb) > hlen))
11330                         bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11331                                                  hlen, bd_prod, ++nbd);
11332
11333                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11334                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
11335                 pbd->tcp_flags = pbd_tcp_flags(skb);
11336
11337                 if (xmit_type & XMIT_GSO_V4) {
11338                         pbd->ip_id = swab16(ip_hdr(skb)->id);
11339                         pbd->tcp_pseudo_csum =
11340                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11341                                                           ip_hdr(skb)->daddr,
11342                                                           0, IPPROTO_TCP, 0));
11343
11344                 } else
11345                         pbd->tcp_pseudo_csum =
11346                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11347                                                         &ipv6_hdr(skb)->daddr,
11348                                                         0, IPPROTO_TCP, 0));
11349
11350                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11351         }
11352         tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
11353
11354         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11355                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
11356
11357                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11358                 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11359                 if (total_pkt_bd == NULL)
11360                         total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11361
11362                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11363                                        frag->size, PCI_DMA_TODEVICE);
11364
11365                 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11366                 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11367                 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11368                 le16_add_cpu(&pkt_size, frag->size);
11369
11370                 DP(NETIF_MSG_TX_QUEUED,
11371                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
11372                    i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11373                    le16_to_cpu(tx_data_bd->nbytes));
11374         }
11375
11376         DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
11377
11378         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11379
11380         /* now send a tx doorbell, counting the next BD
11381          * if the packet contains or ends with it
11382          */
11383         if (TX_BD_POFF(bd_prod) < nbd)
11384                 nbd++;
11385
11386         if (total_pkt_bd != NULL)
11387                 total_pkt_bd->total_pkt_bytes = pkt_size;
11388
11389         if (pbd)
11390                 DP(NETIF_MSG_TX_QUEUED,
11391                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
11392                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
11393                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11394                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
11395                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
11396
11397         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
11398
11399         /*
11400          * Make sure that the BD data is updated before updating the producer
11401          * since FW might read the BD right after the producer is updated.
11402          * This is only applicable for weak-ordered memory model archs such
11403          * as IA-64. The following barrier is also mandatory since FW will
11404          * assumes packets must have BDs.
11405          */
11406         wmb();
11407
11408         fp->tx_db.data.prod += nbd;
11409         barrier();
11410         DOORBELL(bp, fp->index, fp->tx_db.raw);
11411
11412         mmiowb();
11413
11414         fp->tx_bd_prod += nbd;
11415
11416         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
11417                 netif_tx_stop_queue(txq);
11418                 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11419                    if we put Tx into XOFF state. */
11420                 smp_mb();
11421                 fp->eth_q_stats.driver_xoff++;
11422                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
11423                         netif_tx_wake_queue(txq);
11424         }
11425         fp->tx_pkt++;
11426
11427         return NETDEV_TX_OK;
11428 }
11429
11430 /* called with rtnl_lock */
11431 static int bnx2x_open(struct net_device *dev)
11432 {
11433         struct bnx2x *bp = netdev_priv(dev);
11434
11435         netif_carrier_off(dev);
11436
11437         bnx2x_set_power_state(bp, PCI_D0);
11438
11439         return bnx2x_nic_load(bp, LOAD_OPEN);
11440 }
11441
11442 /* called with rtnl_lock */
11443 static int bnx2x_close(struct net_device *dev)
11444 {
11445         struct bnx2x *bp = netdev_priv(dev);
11446
11447         /* Unload the driver, release IRQs */
11448         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11449         if (atomic_read(&bp->pdev->enable_cnt) == 1)
11450                 if (!CHIP_REV_IS_SLOW(bp))
11451                         bnx2x_set_power_state(bp, PCI_D3hot);
11452
11453         return 0;
11454 }
11455
11456 /* called with netif_tx_lock from dev_mcast.c */
11457 static void bnx2x_set_rx_mode(struct net_device *dev)
11458 {
11459         struct bnx2x *bp = netdev_priv(dev);
11460         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11461         int port = BP_PORT(bp);
11462
11463         if (bp->state != BNX2X_STATE_OPEN) {
11464                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11465                 return;
11466         }
11467
11468         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11469
11470         if (dev->flags & IFF_PROMISC)
11471                 rx_mode = BNX2X_RX_MODE_PROMISC;
11472
11473         else if ((dev->flags & IFF_ALLMULTI) ||
11474                  ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11475                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11476
11477         else { /* some multicasts */
11478                 if (CHIP_IS_E1(bp)) {
11479                         int i, old, offset;
11480                         struct dev_mc_list *mclist;
11481                         struct mac_configuration_cmd *config =
11482                                                 bnx2x_sp(bp, mcast_config);
11483
11484                         for (i = 0, mclist = dev->mc_list;
11485                              mclist && (i < dev->mc_count);
11486                              i++, mclist = mclist->next) {
11487
11488                                 config->config_table[i].
11489                                         cam_entry.msb_mac_addr =
11490                                         swab16(*(u16 *)&mclist->dmi_addr[0]);
11491                                 config->config_table[i].
11492                                         cam_entry.middle_mac_addr =
11493                                         swab16(*(u16 *)&mclist->dmi_addr[2]);
11494                                 config->config_table[i].
11495                                         cam_entry.lsb_mac_addr =
11496                                         swab16(*(u16 *)&mclist->dmi_addr[4]);
11497                                 config->config_table[i].cam_entry.flags =
11498                                                         cpu_to_le16(port);
11499                                 config->config_table[i].
11500                                         target_table_entry.flags = 0;
11501                                 config->config_table[i].target_table_entry.
11502                                         clients_bit_vector =
11503                                                 cpu_to_le32(1 << BP_L_ID(bp));
11504                                 config->config_table[i].
11505                                         target_table_entry.vlan_id = 0;
11506
11507                                 DP(NETIF_MSG_IFUP,
11508                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11509                                    config->config_table[i].
11510                                                 cam_entry.msb_mac_addr,
11511                                    config->config_table[i].
11512                                                 cam_entry.middle_mac_addr,
11513                                    config->config_table[i].
11514                                                 cam_entry.lsb_mac_addr);
11515                         }
11516                         old = config->hdr.length;
11517                         if (old > i) {
11518                                 for (; i < old; i++) {
11519                                         if (CAM_IS_INVALID(config->
11520                                                            config_table[i])) {
11521                                                 /* already invalidated */
11522                                                 break;
11523                                         }
11524                                         /* invalidate */
11525                                         CAM_INVALIDATE(config->
11526                                                        config_table[i]);
11527                                 }
11528                         }
11529
11530                         if (CHIP_REV_IS_SLOW(bp))
11531                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11532                         else
11533                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
11534
11535                         config->hdr.length = i;
11536                         config->hdr.offset = offset;
11537                         config->hdr.client_id = bp->fp->cl_id;
11538                         config->hdr.reserved1 = 0;
11539
11540                         bp->set_mac_pending++;
11541                         smp_wmb();
11542
11543                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11544                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11545                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11546                                       0);
11547                 } else { /* E1H */
11548                         /* Accept one or more multicasts */
11549                         struct dev_mc_list *mclist;
11550                         u32 mc_filter[MC_HASH_SIZE];
11551                         u32 crc, bit, regidx;
11552                         int i;
11553
11554                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11555
11556                         for (i = 0, mclist = dev->mc_list;
11557                              mclist && (i < dev->mc_count);
11558                              i++, mclist = mclist->next) {
11559
11560                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11561                                    mclist->dmi_addr);
11562
11563                                 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11564                                 bit = (crc >> 24) & 0xff;
11565                                 regidx = bit >> 5;
11566                                 bit &= 0x1f;
11567                                 mc_filter[regidx] |= (1 << bit);
11568                         }
11569
11570                         for (i = 0; i < MC_HASH_SIZE; i++)
11571                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11572                                        mc_filter[i]);
11573                 }
11574         }
11575
11576         bp->rx_mode = rx_mode;
11577         bnx2x_set_storm_rx_mode(bp);
11578 }
11579
11580 /* called with rtnl_lock */
11581 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11582 {
11583         struct sockaddr *addr = p;
11584         struct bnx2x *bp = netdev_priv(dev);
11585
11586         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
11587                 return -EINVAL;
11588
11589         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
11590         if (netif_running(dev)) {
11591                 if (CHIP_IS_E1(bp))
11592                         bnx2x_set_eth_mac_addr_e1(bp, 1);
11593                 else
11594                         bnx2x_set_eth_mac_addr_e1h(bp, 1);
11595         }
11596
11597         return 0;
11598 }
11599
11600 /* called with rtnl_lock */
11601 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11602                            int devad, u16 addr)
11603 {
11604         struct bnx2x *bp = netdev_priv(netdev);
11605         u16 value;
11606         int rc;
11607         u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11608
11609         DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11610            prtad, devad, addr);
11611
11612         if (prtad != bp->mdio.prtad) {
11613                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11614                    prtad, bp->mdio.prtad);
11615                 return -EINVAL;
11616         }
11617
11618         /* The HW expects different devad if CL22 is used */
11619         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11620
11621         bnx2x_acquire_phy_lock(bp);
11622         rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11623                              devad, addr, &value);
11624         bnx2x_release_phy_lock(bp);
11625         DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
11626
11627         if (!rc)
11628                 rc = value;
11629         return rc;
11630 }
11631
11632 /* called with rtnl_lock */
11633 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11634                             u16 addr, u16 value)
11635 {
11636         struct bnx2x *bp = netdev_priv(netdev);
11637         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11638         int rc;
11639
11640         DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11641                            " value 0x%x\n", prtad, devad, addr, value);
11642
11643         if (prtad != bp->mdio.prtad) {
11644                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11645                    prtad, bp->mdio.prtad);
11646                 return -EINVAL;
11647         }
11648
11649         /* The HW expects different devad if CL22 is used */
11650         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11651
11652         bnx2x_acquire_phy_lock(bp);
11653         rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11654                               devad, addr, value);
11655         bnx2x_release_phy_lock(bp);
11656         return rc;
11657 }
11658
11659 /* called with rtnl_lock */
11660 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11661 {
11662         struct bnx2x *bp = netdev_priv(dev);
11663         struct mii_ioctl_data *mdio = if_mii(ifr);
11664
11665         DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11666            mdio->phy_id, mdio->reg_num, mdio->val_in);
11667
11668         if (!netif_running(dev))
11669                 return -EAGAIN;
11670
11671         return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
11672 }
11673
11674 /* called with rtnl_lock */
11675 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11676 {
11677         struct bnx2x *bp = netdev_priv(dev);
11678         int rc = 0;
11679
11680         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11681             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11682                 return -EINVAL;
11683
11684         /* This does not race with packet allocation
11685          * because the actual alloc size is
11686          * only updated as part of load
11687          */
11688         dev->mtu = new_mtu;
11689
11690         if (netif_running(dev)) {
11691                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11692                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11693         }
11694
11695         return rc;
11696 }
11697
11698 static void bnx2x_tx_timeout(struct net_device *dev)
11699 {
11700         struct bnx2x *bp = netdev_priv(dev);
11701
11702 #ifdef BNX2X_STOP_ON_ERROR
11703         if (!bp->panic)
11704                 bnx2x_panic();
11705 #endif
11706         /* This allows the netif to be shutdown gracefully before resetting */
11707         schedule_work(&bp->reset_task);
11708 }
11709
11710 #ifdef BCM_VLAN
11711 /* called with rtnl_lock */
11712 static void bnx2x_vlan_rx_register(struct net_device *dev,
11713                                    struct vlan_group *vlgrp)
11714 {
11715         struct bnx2x *bp = netdev_priv(dev);
11716
11717         bp->vlgrp = vlgrp;
11718
11719         /* Set flags according to the required capabilities */
11720         bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11721
11722         if (dev->features & NETIF_F_HW_VLAN_TX)
11723                 bp->flags |= HW_VLAN_TX_FLAG;
11724
11725         if (dev->features & NETIF_F_HW_VLAN_RX)
11726                 bp->flags |= HW_VLAN_RX_FLAG;
11727
11728         if (netif_running(dev))
11729                 bnx2x_set_client_config(bp);
11730 }
11731
11732 #endif
11733
11734 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11735 static void poll_bnx2x(struct net_device *dev)
11736 {
11737         struct bnx2x *bp = netdev_priv(dev);
11738
11739         disable_irq(bp->pdev->irq);
11740         bnx2x_interrupt(bp->pdev->irq, dev);
11741         enable_irq(bp->pdev->irq);
11742 }
11743 #endif
11744
11745 static const struct net_device_ops bnx2x_netdev_ops = {
11746         .ndo_open               = bnx2x_open,
11747         .ndo_stop               = bnx2x_close,
11748         .ndo_start_xmit         = bnx2x_start_xmit,
11749         .ndo_set_multicast_list = bnx2x_set_rx_mode,
11750         .ndo_set_mac_address    = bnx2x_change_mac_addr,
11751         .ndo_validate_addr      = eth_validate_addr,
11752         .ndo_do_ioctl           = bnx2x_ioctl,
11753         .ndo_change_mtu         = bnx2x_change_mtu,
11754         .ndo_tx_timeout         = bnx2x_tx_timeout,
11755 #ifdef BCM_VLAN
11756         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
11757 #endif
11758 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11759         .ndo_poll_controller    = poll_bnx2x,
11760 #endif
11761 };
11762
11763 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11764                                     struct net_device *dev)
11765 {
11766         struct bnx2x *bp;
11767         int rc;
11768
11769         SET_NETDEV_DEV(dev, &pdev->dev);
11770         bp = netdev_priv(dev);
11771
11772         bp->dev = dev;
11773         bp->pdev = pdev;
11774         bp->flags = 0;
11775         bp->func = PCI_FUNC(pdev->devfn);
11776
11777         rc = pci_enable_device(pdev);
11778         if (rc) {
11779                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11780                 goto err_out;
11781         }
11782
11783         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11784                 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11785                        " aborting\n");
11786                 rc = -ENODEV;
11787                 goto err_out_disable;
11788         }
11789
11790         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11791                 printk(KERN_ERR PFX "Cannot find second PCI device"
11792                        " base address, aborting\n");
11793                 rc = -ENODEV;
11794                 goto err_out_disable;
11795         }
11796
11797         if (atomic_read(&pdev->enable_cnt) == 1) {
11798                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11799                 if (rc) {
11800                         printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11801                                " aborting\n");
11802                         goto err_out_disable;
11803                 }
11804
11805                 pci_set_master(pdev);
11806                 pci_save_state(pdev);
11807         }
11808
11809         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11810         if (bp->pm_cap == 0) {
11811                 printk(KERN_ERR PFX "Cannot find power management"
11812                        " capability, aborting\n");
11813                 rc = -EIO;
11814                 goto err_out_release;
11815         }
11816
11817         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11818         if (bp->pcie_cap == 0) {
11819                 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11820                        " aborting\n");
11821                 rc = -EIO;
11822                 goto err_out_release;
11823         }
11824
11825         if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
11826                 bp->flags |= USING_DAC_FLAG;
11827                 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
11828                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11829                                " failed, aborting\n");
11830                         rc = -EIO;
11831                         goto err_out_release;
11832                 }
11833
11834         } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
11835                 printk(KERN_ERR PFX "System does not support DMA,"
11836                        " aborting\n");
11837                 rc = -EIO;
11838                 goto err_out_release;
11839         }
11840
11841         dev->mem_start = pci_resource_start(pdev, 0);
11842         dev->base_addr = dev->mem_start;
11843         dev->mem_end = pci_resource_end(pdev, 0);
11844
11845         dev->irq = pdev->irq;
11846
11847         bp->regview = pci_ioremap_bar(pdev, 0);
11848         if (!bp->regview) {
11849                 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11850                 rc = -ENOMEM;
11851                 goto err_out_release;
11852         }
11853
11854         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11855                                         min_t(u64, BNX2X_DB_SIZE,
11856                                               pci_resource_len(pdev, 2)));
11857         if (!bp->doorbells) {
11858                 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11859                 rc = -ENOMEM;
11860                 goto err_out_unmap;
11861         }
11862
11863         bnx2x_set_power_state(bp, PCI_D0);
11864
11865         /* clean indirect addresses */
11866         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11867                                PCICFG_VENDOR_ID_OFFSET);
11868         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11869         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11870         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11871         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11872
11873         dev->watchdog_timeo = TX_TIMEOUT;
11874
11875         dev->netdev_ops = &bnx2x_netdev_ops;
11876         dev->ethtool_ops = &bnx2x_ethtool_ops;
11877         dev->features |= NETIF_F_SG;
11878         dev->features |= NETIF_F_HW_CSUM;
11879         if (bp->flags & USING_DAC_FLAG)
11880                 dev->features |= NETIF_F_HIGHDMA;
11881         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11882         dev->features |= NETIF_F_TSO6;
11883 #ifdef BCM_VLAN
11884         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
11885         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11886
11887         dev->vlan_features |= NETIF_F_SG;
11888         dev->vlan_features |= NETIF_F_HW_CSUM;
11889         if (bp->flags & USING_DAC_FLAG)
11890                 dev->vlan_features |= NETIF_F_HIGHDMA;
11891         dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11892         dev->vlan_features |= NETIF_F_TSO6;
11893 #endif
11894
11895         /* get_port_hwinfo() will set prtad and mmds properly */
11896         bp->mdio.prtad = MDIO_PRTAD_NONE;
11897         bp->mdio.mmds = 0;
11898         bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11899         bp->mdio.dev = dev;
11900         bp->mdio.mdio_read = bnx2x_mdio_read;
11901         bp->mdio.mdio_write = bnx2x_mdio_write;
11902
11903         return 0;
11904
11905 err_out_unmap:
11906         if (bp->regview) {
11907                 iounmap(bp->regview);
11908                 bp->regview = NULL;
11909         }
11910         if (bp->doorbells) {
11911                 iounmap(bp->doorbells);
11912                 bp->doorbells = NULL;
11913         }
11914
11915 err_out_release:
11916         if (atomic_read(&pdev->enable_cnt) == 1)
11917                 pci_release_regions(pdev);
11918
11919 err_out_disable:
11920         pci_disable_device(pdev);
11921         pci_set_drvdata(pdev, NULL);
11922
11923 err_out:
11924         return rc;
11925 }
11926
11927 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11928                                                  int *width, int *speed)
11929 {
11930         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11931
11932         *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11933
11934         /* return value of 1=2.5GHz 2=5GHz */
11935         *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11936 }
11937
11938 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11939 {
11940         const struct firmware *firmware = bp->firmware;
11941         struct bnx2x_fw_file_hdr *fw_hdr;
11942         struct bnx2x_fw_file_section *sections;
11943         u32 offset, len, num_ops;
11944         u16 *ops_offsets;
11945         int i;
11946         const u8 *fw_ver;
11947
11948         if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11949                 return -EINVAL;
11950
11951         fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11952         sections = (struct bnx2x_fw_file_section *)fw_hdr;
11953
11954         /* Make sure none of the offsets and sizes make us read beyond
11955          * the end of the firmware data */
11956         for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11957                 offset = be32_to_cpu(sections[i].offset);
11958                 len = be32_to_cpu(sections[i].len);
11959                 if (offset + len > firmware->size) {
11960                         printk(KERN_ERR PFX "Section %d length is out of "
11961                                             "bounds\n", i);
11962                         return -EINVAL;
11963                 }
11964         }
11965
11966         /* Likewise for the init_ops offsets */
11967         offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11968         ops_offsets = (u16 *)(firmware->data + offset);
11969         num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11970
11971         for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11972                 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11973                         printk(KERN_ERR PFX "Section offset %d is out of "
11974                                             "bounds\n", i);
11975                         return -EINVAL;
11976                 }
11977         }
11978
11979         /* Check FW version */
11980         offset = be32_to_cpu(fw_hdr->fw_version.offset);
11981         fw_ver = firmware->data + offset;
11982         if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11983             (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11984             (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11985             (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11986                 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11987                                     " Should be %d.%d.%d.%d\n",
11988                        fw_ver[0], fw_ver[1], fw_ver[2],
11989                        fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11990                        BCM_5710_FW_MINOR_VERSION,
11991                        BCM_5710_FW_REVISION_VERSION,
11992                        BCM_5710_FW_ENGINEERING_VERSION);
11993                 return -EINVAL;
11994         }
11995
11996         return 0;
11997 }
11998
11999 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
12000 {
12001         const __be32 *source = (const __be32 *)_source;
12002         u32 *target = (u32 *)_target;
12003         u32 i;
12004
12005         for (i = 0; i < n/4; i++)
12006                 target[i] = be32_to_cpu(source[i]);
12007 }
12008
12009 /*
12010    Ops array is stored in the following format:
12011    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
12012  */
12013 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
12014 {
12015         const __be32 *source = (const __be32 *)_source;
12016         struct raw_op *target = (struct raw_op *)_target;
12017         u32 i, j, tmp;
12018
12019         for (i = 0, j = 0; i < n/8; i++, j += 2) {
12020                 tmp = be32_to_cpu(source[j]);
12021                 target[i].op = (tmp >> 24) & 0xff;
12022                 target[i].offset =  tmp & 0xffffff;
12023                 target[i].raw_data = be32_to_cpu(source[j+1]);
12024         }
12025 }
12026
12027 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
12028 {
12029         const __be16 *source = (const __be16 *)_source;
12030         u16 *target = (u16 *)_target;
12031         u32 i;
12032
12033         for (i = 0; i < n/2; i++)
12034                 target[i] = be16_to_cpu(source[i]);
12035 }
12036
12037 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
12038         do { \
12039                 u32 len = be32_to_cpu(fw_hdr->arr.len); \
12040                 bp->arr = kmalloc(len, GFP_KERNEL); \
12041                 if (!bp->arr) { \
12042                         printk(KERN_ERR PFX "Failed to allocate %d bytes " \
12043                                             "for "#arr"\n", len); \
12044                         goto lbl; \
12045                 } \
12046                 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
12047                      (u8 *)bp->arr, len); \
12048         } while (0)
12049
12050 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
12051 {
12052         const char *fw_file_name;
12053         struct bnx2x_fw_file_hdr *fw_hdr;
12054         int rc;
12055
12056         if (CHIP_IS_E1(bp))
12057                 fw_file_name = FW_FILE_NAME_E1;
12058         else
12059                 fw_file_name = FW_FILE_NAME_E1H;
12060
12061         printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
12062
12063         rc = request_firmware(&bp->firmware, fw_file_name, dev);
12064         if (rc) {
12065                 printk(KERN_ERR PFX "Can't load firmware file %s\n",
12066                        fw_file_name);
12067                 goto request_firmware_exit;
12068         }
12069
12070         rc = bnx2x_check_firmware(bp);
12071         if (rc) {
12072                 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
12073                 goto request_firmware_exit;
12074         }
12075
12076         fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
12077
12078         /* Initialize the pointers to the init arrays */
12079         /* Blob */
12080         BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
12081
12082         /* Opcodes */
12083         BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
12084
12085         /* Offsets */
12086         BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
12087                             be16_to_cpu_n);
12088
12089         /* STORMs firmware */
12090         INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12091                         be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
12092         INIT_TSEM_PRAM_DATA(bp)      = bp->firmware->data +
12093                         be32_to_cpu(fw_hdr->tsem_pram_data.offset);
12094         INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12095                         be32_to_cpu(fw_hdr->usem_int_table_data.offset);
12096         INIT_USEM_PRAM_DATA(bp)      = bp->firmware->data +
12097                         be32_to_cpu(fw_hdr->usem_pram_data.offset);
12098         INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12099                         be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
12100         INIT_XSEM_PRAM_DATA(bp)      = bp->firmware->data +
12101                         be32_to_cpu(fw_hdr->xsem_pram_data.offset);
12102         INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12103                         be32_to_cpu(fw_hdr->csem_int_table_data.offset);
12104         INIT_CSEM_PRAM_DATA(bp)      = bp->firmware->data +
12105                         be32_to_cpu(fw_hdr->csem_pram_data.offset);
12106
12107         return 0;
12108
12109 init_offsets_alloc_err:
12110         kfree(bp->init_ops);
12111 init_ops_alloc_err:
12112         kfree(bp->init_data);
12113 request_firmware_exit:
12114         release_firmware(bp->firmware);
12115
12116         return rc;
12117 }
12118
12119
12120 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12121                                     const struct pci_device_id *ent)
12122 {
12123         struct net_device *dev = NULL;
12124         struct bnx2x *bp;
12125         int pcie_width, pcie_speed;
12126         int rc;
12127
12128         /* dev zeroed in init_etherdev */
12129         dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
12130         if (!dev) {
12131                 printk(KERN_ERR PFX "Cannot allocate net device\n");
12132                 return -ENOMEM;
12133         }
12134
12135         bp = netdev_priv(dev);
12136         bp->msglevel = debug;
12137
12138         pci_set_drvdata(pdev, dev);
12139
12140         rc = bnx2x_init_dev(pdev, dev);
12141         if (rc < 0) {
12142                 free_netdev(dev);
12143                 return rc;
12144         }
12145
12146         rc = bnx2x_init_bp(bp);
12147         if (rc)
12148                 goto init_one_exit;
12149
12150         /* Set init arrays */
12151         rc = bnx2x_init_firmware(bp, &pdev->dev);
12152         if (rc) {
12153                 printk(KERN_ERR PFX "Error loading firmware\n");
12154                 goto init_one_exit;
12155         }
12156
12157         rc = register_netdev(dev);
12158         if (rc) {
12159                 dev_err(&pdev->dev, "Cannot register net device\n");
12160                 goto init_one_exit;
12161         }
12162
12163         bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
12164         printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
12165                " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
12166                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
12167                pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
12168                dev->base_addr, bp->pdev->irq);
12169         printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
12170
12171         return 0;
12172
12173 init_one_exit:
12174         if (bp->regview)
12175                 iounmap(bp->regview);
12176
12177         if (bp->doorbells)
12178                 iounmap(bp->doorbells);
12179
12180         free_netdev(dev);
12181
12182         if (atomic_read(&pdev->enable_cnt) == 1)
12183                 pci_release_regions(pdev);
12184
12185         pci_disable_device(pdev);
12186         pci_set_drvdata(pdev, NULL);
12187
12188         return rc;
12189 }
12190
12191 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
12192 {
12193         struct net_device *dev = pci_get_drvdata(pdev);
12194         struct bnx2x *bp;
12195
12196         if (!dev) {
12197                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12198                 return;
12199         }
12200         bp = netdev_priv(dev);
12201
12202         unregister_netdev(dev);
12203
12204         kfree(bp->init_ops_offsets);
12205         kfree(bp->init_ops);
12206         kfree(bp->init_data);
12207         release_firmware(bp->firmware);
12208
12209         if (bp->regview)
12210                 iounmap(bp->regview);
12211
12212         if (bp->doorbells)
12213                 iounmap(bp->doorbells);
12214
12215         free_netdev(dev);
12216
12217         if (atomic_read(&pdev->enable_cnt) == 1)
12218                 pci_release_regions(pdev);
12219
12220         pci_disable_device(pdev);
12221         pci_set_drvdata(pdev, NULL);
12222 }
12223
12224 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12225 {
12226         struct net_device *dev = pci_get_drvdata(pdev);
12227         struct bnx2x *bp;
12228
12229         if (!dev) {
12230                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12231                 return -ENODEV;
12232         }
12233         bp = netdev_priv(dev);
12234
12235         rtnl_lock();
12236
12237         pci_save_state(pdev);
12238
12239         if (!netif_running(dev)) {
12240                 rtnl_unlock();
12241                 return 0;
12242         }
12243
12244         netif_device_detach(dev);
12245
12246         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
12247
12248         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
12249
12250         rtnl_unlock();
12251
12252         return 0;
12253 }
12254
12255 static int bnx2x_resume(struct pci_dev *pdev)
12256 {
12257         struct net_device *dev = pci_get_drvdata(pdev);
12258         struct bnx2x *bp;
12259         int rc;
12260
12261         if (!dev) {
12262                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12263                 return -ENODEV;
12264         }
12265         bp = netdev_priv(dev);
12266
12267         rtnl_lock();
12268
12269         pci_restore_state(pdev);
12270
12271         if (!netif_running(dev)) {
12272                 rtnl_unlock();
12273                 return 0;
12274         }
12275
12276         bnx2x_set_power_state(bp, PCI_D0);
12277         netif_device_attach(dev);
12278
12279         rc = bnx2x_nic_load(bp, LOAD_OPEN);
12280
12281         rtnl_unlock();
12282
12283         return rc;
12284 }
12285
12286 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12287 {
12288         int i;
12289
12290         bp->state = BNX2X_STATE_ERROR;
12291
12292         bp->rx_mode = BNX2X_RX_MODE_NONE;
12293
12294         bnx2x_netif_stop(bp, 0);
12295
12296         del_timer_sync(&bp->timer);
12297         bp->stats_state = STATS_STATE_DISABLED;
12298         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12299
12300         /* Release IRQs */
12301         bnx2x_free_irq(bp);
12302
12303         if (CHIP_IS_E1(bp)) {
12304                 struct mac_configuration_cmd *config =
12305                                                 bnx2x_sp(bp, mcast_config);
12306
12307                 for (i = 0; i < config->hdr.length; i++)
12308                         CAM_INVALIDATE(config->config_table[i]);
12309         }
12310
12311         /* Free SKBs, SGEs, TPA pool and driver internals */
12312         bnx2x_free_skbs(bp);
12313         for_each_queue(bp, i)
12314                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
12315         for_each_queue(bp, i)
12316                 netif_napi_del(&bnx2x_fp(bp, i, napi));
12317         bnx2x_free_mem(bp);
12318
12319         bp->state = BNX2X_STATE_CLOSED;
12320
12321         netif_carrier_off(bp->dev);
12322
12323         return 0;
12324 }
12325
12326 static void bnx2x_eeh_recover(struct bnx2x *bp)
12327 {
12328         u32 val;
12329
12330         mutex_init(&bp->port.phy_mutex);
12331
12332         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12333         bp->link_params.shmem_base = bp->common.shmem_base;
12334         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12335
12336         if (!bp->common.shmem_base ||
12337             (bp->common.shmem_base < 0xA0000) ||
12338             (bp->common.shmem_base >= 0xC0000)) {
12339                 BNX2X_DEV_INFO("MCP not active\n");
12340                 bp->flags |= NO_MCP_FLAG;
12341                 return;
12342         }
12343
12344         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12345         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12346                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12347                 BNX2X_ERR("BAD MCP validity signature\n");
12348
12349         if (!BP_NOMCP(bp)) {
12350                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12351                               & DRV_MSG_SEQ_NUMBER_MASK);
12352                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12353         }
12354 }
12355
12356 /**
12357  * bnx2x_io_error_detected - called when PCI error is detected
12358  * @pdev: Pointer to PCI device
12359  * @state: The current pci connection state
12360  *
12361  * This function is called after a PCI bus error affecting
12362  * this device has been detected.
12363  */
12364 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12365                                                 pci_channel_state_t state)
12366 {
12367         struct net_device *dev = pci_get_drvdata(pdev);
12368         struct bnx2x *bp = netdev_priv(dev);
12369
12370         rtnl_lock();
12371
12372         netif_device_detach(dev);
12373
12374         if (state == pci_channel_io_perm_failure) {
12375                 rtnl_unlock();
12376                 return PCI_ERS_RESULT_DISCONNECT;
12377         }
12378
12379         if (netif_running(dev))
12380                 bnx2x_eeh_nic_unload(bp);
12381
12382         pci_disable_device(pdev);
12383
12384         rtnl_unlock();
12385
12386         /* Request a slot reset */
12387         return PCI_ERS_RESULT_NEED_RESET;
12388 }
12389
12390 /**
12391  * bnx2x_io_slot_reset - called after the PCI bus has been reset
12392  * @pdev: Pointer to PCI device
12393  *
12394  * Restart the card from scratch, as if from a cold-boot.
12395  */
12396 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12397 {
12398         struct net_device *dev = pci_get_drvdata(pdev);
12399         struct bnx2x *bp = netdev_priv(dev);
12400
12401         rtnl_lock();
12402
12403         if (pci_enable_device(pdev)) {
12404                 dev_err(&pdev->dev,
12405                         "Cannot re-enable PCI device after reset\n");
12406                 rtnl_unlock();
12407                 return PCI_ERS_RESULT_DISCONNECT;
12408         }
12409
12410         pci_set_master(pdev);
12411         pci_restore_state(pdev);
12412
12413         if (netif_running(dev))
12414                 bnx2x_set_power_state(bp, PCI_D0);
12415
12416         rtnl_unlock();
12417
12418         return PCI_ERS_RESULT_RECOVERED;
12419 }
12420
12421 /**
12422  * bnx2x_io_resume - called when traffic can start flowing again
12423  * @pdev: Pointer to PCI device
12424  *
12425  * This callback is called when the error recovery driver tells us that
12426  * its OK to resume normal operation.
12427  */
12428 static void bnx2x_io_resume(struct pci_dev *pdev)
12429 {
12430         struct net_device *dev = pci_get_drvdata(pdev);
12431         struct bnx2x *bp = netdev_priv(dev);
12432
12433         rtnl_lock();
12434
12435         bnx2x_eeh_recover(bp);
12436
12437         if (netif_running(dev))
12438                 bnx2x_nic_load(bp, LOAD_NORMAL);
12439
12440         netif_device_attach(dev);
12441
12442         rtnl_unlock();
12443 }
12444
12445 static struct pci_error_handlers bnx2x_err_handler = {
12446         .error_detected = bnx2x_io_error_detected,
12447         .slot_reset     = bnx2x_io_slot_reset,
12448         .resume         = bnx2x_io_resume,
12449 };
12450
12451 static struct pci_driver bnx2x_pci_driver = {
12452         .name        = DRV_MODULE_NAME,
12453         .id_table    = bnx2x_pci_tbl,
12454         .probe       = bnx2x_init_one,
12455         .remove      = __devexit_p(bnx2x_remove_one),
12456         .suspend     = bnx2x_suspend,
12457         .resume      = bnx2x_resume,
12458         .err_handler = &bnx2x_err_handler,
12459 };
12460
12461 static int __init bnx2x_init(void)
12462 {
12463         int ret;
12464
12465         printk(KERN_INFO "%s", version);
12466
12467         bnx2x_wq = create_singlethread_workqueue("bnx2x");
12468         if (bnx2x_wq == NULL) {
12469                 printk(KERN_ERR PFX "Cannot create workqueue\n");
12470                 return -ENOMEM;
12471         }
12472
12473         ret = pci_register_driver(&bnx2x_pci_driver);
12474         if (ret) {
12475                 printk(KERN_ERR PFX "Cannot register driver\n");
12476                 destroy_workqueue(bnx2x_wq);
12477         }
12478         return ret;
12479 }
12480
12481 static void __exit bnx2x_cleanup(void)
12482 {
12483         pci_unregister_driver(&bnx2x_pci_driver);
12484
12485         destroy_workqueue(bnx2x_wq);
12486 }
12487
12488 module_init(bnx2x_init);
12489 module_exit(bnx2x_cleanup);
12490
12491 #ifdef BCM_CNIC
12492
12493 /* count denotes the number of new completions we have seen */
12494 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
12495 {
12496         struct eth_spe *spe;
12497
12498 #ifdef BNX2X_STOP_ON_ERROR
12499         if (unlikely(bp->panic))
12500                 return;
12501 #endif
12502
12503         spin_lock_bh(&bp->spq_lock);
12504         bp->cnic_spq_pending -= count;
12505
12506         for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
12507              bp->cnic_spq_pending++) {
12508
12509                 if (!bp->cnic_kwq_pending)
12510                         break;
12511
12512                 spe = bnx2x_sp_get_next(bp);
12513                 *spe = *bp->cnic_kwq_cons;
12514
12515                 bp->cnic_kwq_pending--;
12516
12517                 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
12518                    bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
12519
12520                 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
12521                         bp->cnic_kwq_cons = bp->cnic_kwq;
12522                 else
12523                         bp->cnic_kwq_cons++;
12524         }
12525         bnx2x_sp_prod_update(bp);
12526         spin_unlock_bh(&bp->spq_lock);
12527 }
12528
12529 static int bnx2x_cnic_sp_queue(struct net_device *dev,
12530                                struct kwqe_16 *kwqes[], u32 count)
12531 {
12532         struct bnx2x *bp = netdev_priv(dev);
12533         int i;
12534
12535 #ifdef BNX2X_STOP_ON_ERROR
12536         if (unlikely(bp->panic))
12537                 return -EIO;
12538 #endif
12539
12540         spin_lock_bh(&bp->spq_lock);
12541
12542         for (i = 0; i < count; i++) {
12543                 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
12544
12545                 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
12546                         break;
12547
12548                 *bp->cnic_kwq_prod = *spe;
12549
12550                 bp->cnic_kwq_pending++;
12551
12552                 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
12553                    spe->hdr.conn_and_cmd_data, spe->hdr.type,
12554                    spe->data.mac_config_addr.hi,
12555                    spe->data.mac_config_addr.lo,
12556                    bp->cnic_kwq_pending);
12557
12558                 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
12559                         bp->cnic_kwq_prod = bp->cnic_kwq;
12560                 else
12561                         bp->cnic_kwq_prod++;
12562         }
12563
12564         spin_unlock_bh(&bp->spq_lock);
12565
12566         if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
12567                 bnx2x_cnic_sp_post(bp, 0);
12568
12569         return i;
12570 }
12571
12572 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12573 {
12574         struct cnic_ops *c_ops;
12575         int rc = 0;
12576
12577         mutex_lock(&bp->cnic_mutex);
12578         c_ops = bp->cnic_ops;
12579         if (c_ops)
12580                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12581         mutex_unlock(&bp->cnic_mutex);
12582
12583         return rc;
12584 }
12585
12586 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12587 {
12588         struct cnic_ops *c_ops;
12589         int rc = 0;
12590
12591         rcu_read_lock();
12592         c_ops = rcu_dereference(bp->cnic_ops);
12593         if (c_ops)
12594                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12595         rcu_read_unlock();
12596
12597         return rc;
12598 }
12599
12600 /*
12601  * for commands that have no data
12602  */
12603 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
12604 {
12605         struct cnic_ctl_info ctl = {0};
12606
12607         ctl.cmd = cmd;
12608
12609         return bnx2x_cnic_ctl_send(bp, &ctl);
12610 }
12611
12612 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
12613 {
12614         struct cnic_ctl_info ctl;
12615
12616         /* first we tell CNIC and only then we count this as a completion */
12617         ctl.cmd = CNIC_CTL_COMPLETION_CMD;
12618         ctl.data.comp.cid = cid;
12619
12620         bnx2x_cnic_ctl_send_bh(bp, &ctl);
12621         bnx2x_cnic_sp_post(bp, 1);
12622 }
12623
12624 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
12625 {
12626         struct bnx2x *bp = netdev_priv(dev);
12627         int rc = 0;
12628
12629         switch (ctl->cmd) {
12630         case DRV_CTL_CTXTBL_WR_CMD: {
12631                 u32 index = ctl->data.io.offset;
12632                 dma_addr_t addr = ctl->data.io.dma_addr;
12633
12634                 bnx2x_ilt_wr(bp, index, addr);
12635                 break;
12636         }
12637
12638         case DRV_CTL_COMPLETION_CMD: {
12639                 int count = ctl->data.comp.comp_count;
12640
12641                 bnx2x_cnic_sp_post(bp, count);
12642                 break;
12643         }
12644
12645         /* rtnl_lock is held.  */
12646         case DRV_CTL_START_L2_CMD: {
12647                 u32 cli = ctl->data.ring.client_id;
12648
12649                 bp->rx_mode_cl_mask |= (1 << cli);
12650                 bnx2x_set_storm_rx_mode(bp);
12651                 break;
12652         }
12653
12654         /* rtnl_lock is held.  */
12655         case DRV_CTL_STOP_L2_CMD: {
12656                 u32 cli = ctl->data.ring.client_id;
12657
12658                 bp->rx_mode_cl_mask &= ~(1 << cli);
12659                 bnx2x_set_storm_rx_mode(bp);
12660                 break;
12661         }
12662
12663         default:
12664                 BNX2X_ERR("unknown command %x\n", ctl->cmd);
12665                 rc = -EINVAL;
12666         }
12667
12668         return rc;
12669 }
12670
12671 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
12672 {
12673         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12674
12675         if (bp->flags & USING_MSIX_FLAG) {
12676                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
12677                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
12678                 cp->irq_arr[0].vector = bp->msix_table[1].vector;
12679         } else {
12680                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
12681                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
12682         }
12683         cp->irq_arr[0].status_blk = bp->cnic_sb;
12684         cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
12685         cp->irq_arr[1].status_blk = bp->def_status_blk;
12686         cp->irq_arr[1].status_blk_num = DEF_SB_ID;
12687
12688         cp->num_irq = 2;
12689 }
12690
12691 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
12692                                void *data)
12693 {
12694         struct bnx2x *bp = netdev_priv(dev);
12695         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12696
12697         if (ops == NULL)
12698                 return -EINVAL;
12699
12700         if (atomic_read(&bp->intr_sem) != 0)
12701                 return -EBUSY;
12702
12703         bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
12704         if (!bp->cnic_kwq)
12705                 return -ENOMEM;
12706
12707         bp->cnic_kwq_cons = bp->cnic_kwq;
12708         bp->cnic_kwq_prod = bp->cnic_kwq;
12709         bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
12710
12711         bp->cnic_spq_pending = 0;
12712         bp->cnic_kwq_pending = 0;
12713
12714         bp->cnic_data = data;
12715
12716         cp->num_irq = 0;
12717         cp->drv_state = CNIC_DRV_STATE_REGD;
12718
12719         bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
12720
12721         bnx2x_setup_cnic_irq_info(bp);
12722         bnx2x_set_iscsi_eth_mac_addr(bp, 1);
12723         bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
12724         rcu_assign_pointer(bp->cnic_ops, ops);
12725
12726         return 0;
12727 }
12728
12729 static int bnx2x_unregister_cnic(struct net_device *dev)
12730 {
12731         struct bnx2x *bp = netdev_priv(dev);
12732         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12733
12734         mutex_lock(&bp->cnic_mutex);
12735         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
12736                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
12737                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
12738         }
12739         cp->drv_state = 0;
12740         rcu_assign_pointer(bp->cnic_ops, NULL);
12741         mutex_unlock(&bp->cnic_mutex);
12742         synchronize_rcu();
12743         kfree(bp->cnic_kwq);
12744         bp->cnic_kwq = NULL;
12745
12746         return 0;
12747 }
12748
12749 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
12750 {
12751         struct bnx2x *bp = netdev_priv(dev);
12752         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12753
12754         cp->drv_owner = THIS_MODULE;
12755         cp->chip_id = CHIP_ID(bp);
12756         cp->pdev = bp->pdev;
12757         cp->io_base = bp->regview;
12758         cp->io_base2 = bp->doorbells;
12759         cp->max_kwqe_pending = 8;
12760         cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
12761         cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
12762         cp->ctx_tbl_len = CNIC_ILT_LINES;
12763         cp->starting_cid = BCM_CNIC_CID_START;
12764         cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
12765         cp->drv_ctl = bnx2x_drv_ctl;
12766         cp->drv_register_cnic = bnx2x_register_cnic;
12767         cp->drv_unregister_cnic = bnx2x_unregister_cnic;
12768
12769         return cp;
12770 }
12771 EXPORT_SYMBOL(bnx2x_cnic_probe);
12772
12773 #endif /* BCM_CNIC */
12774