bnx2x: Remove two prefetch()
[safe/jmp/linux-2.6] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2010 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52 #include <linux/stringify.h>
53
54
55 #include "bnx2x.h"
56 #include "bnx2x_init.h"
57 #include "bnx2x_init_ops.h"
58 #include "bnx2x_dump.h"
59
60 #define DRV_MODULE_VERSION      "1.52.53-1"
61 #define DRV_MODULE_RELDATE      "2010/18/04"
62 #define BNX2X_BC_VER            0x040200
63
64 #include <linux/firmware.h>
65 #include "bnx2x_fw_file_hdr.h"
66 /* FW files */
67 #define FW_FILE_VERSION                                 \
68         __stringify(BCM_5710_FW_MAJOR_VERSION) "."      \
69         __stringify(BCM_5710_FW_MINOR_VERSION) "."      \
70         __stringify(BCM_5710_FW_REVISION_VERSION) "."   \
71         __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72 #define FW_FILE_NAME_E1         "bnx2x-e1-" FW_FILE_VERSION ".fw"
73 #define FW_FILE_NAME_E1H        "bnx2x-e1h-" FW_FILE_VERSION ".fw"
74
75 /* Time in jiffies before concluding the transmitter is hung */
76 #define TX_TIMEOUT              (5*HZ)
77
78 static char version[] __devinitdata =
79         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
80         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
81
82 MODULE_AUTHOR("Eliezer Tamir");
83 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
84 MODULE_LICENSE("GPL");
85 MODULE_VERSION(DRV_MODULE_VERSION);
86 MODULE_FIRMWARE(FW_FILE_NAME_E1);
87 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
88
89 static int multi_mode = 1;
90 module_param(multi_mode, int, 0);
91 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92                              "(0 Disable; 1 Enable (default))");
93
94 static int num_queues;
95 module_param(num_queues, int, 0);
96 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97                                 " (default is as a number of CPUs)");
98
99 static int disable_tpa;
100 module_param(disable_tpa, int, 0);
101 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
102
103 static int int_mode;
104 module_param(int_mode, int, 0);
105 MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
106                                 "(1 INT#x; 2 MSI)");
107
108 static int dropless_fc;
109 module_param(dropless_fc, int, 0);
110 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
111
112 static int poll;
113 module_param(poll, int, 0);
114 MODULE_PARM_DESC(poll, " Use polling (for debug)");
115
116 static int mrrs = -1;
117 module_param(mrrs, int, 0);
118 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
119
120 static int debug;
121 module_param(debug, int, 0);
122 MODULE_PARM_DESC(debug, " Default debug msglevel");
123
124 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
125
126 static struct workqueue_struct *bnx2x_wq;
127
128 enum bnx2x_board_type {
129         BCM57710 = 0,
130         BCM57711 = 1,
131         BCM57711E = 2,
132 };
133
134 /* indexed by board_type, above */
135 static struct {
136         char *name;
137 } board_info[] __devinitdata = {
138         { "Broadcom NetXtreme II BCM57710 XGb" },
139         { "Broadcom NetXtreme II BCM57711 XGb" },
140         { "Broadcom NetXtreme II BCM57711E XGb" }
141 };
142
143
144 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
145         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
146         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
147         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
148         { 0 }
149 };
150
151 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
152
153 /****************************************************************************
154 * General service functions
155 ****************************************************************************/
156
157 /* used only at init
158  * locking is done by mcp
159  */
160 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
161 {
162         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
163         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
164         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
165                                PCICFG_VENDOR_ID_OFFSET);
166 }
167
168 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
169 {
170         u32 val;
171
172         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
173         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
174         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
175                                PCICFG_VENDOR_ID_OFFSET);
176
177         return val;
178 }
179
180 static const u32 dmae_reg_go_c[] = {
181         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
182         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
183         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
184         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
185 };
186
187 /* copy command into DMAE command memory and set DMAE command go */
188 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
189                             int idx)
190 {
191         u32 cmd_offset;
192         int i;
193
194         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
195         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
196                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
197
198                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
199                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
200         }
201         REG_WR(bp, dmae_reg_go_c[idx], 1);
202 }
203
204 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
205                       u32 len32)
206 {
207         struct dmae_command dmae;
208         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
209         int cnt = 200;
210
211         if (!bp->dmae_ready) {
212                 u32 *data = bnx2x_sp(bp, wb_data[0]);
213
214                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
215                    "  using indirect\n", dst_addr, len32);
216                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
217                 return;
218         }
219
220         memset(&dmae, 0, sizeof(struct dmae_command));
221
222         dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
223                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
224                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
225 #ifdef __BIG_ENDIAN
226                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
227 #else
228                        DMAE_CMD_ENDIANITY_DW_SWAP |
229 #endif
230                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
231                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
232         dmae.src_addr_lo = U64_LO(dma_addr);
233         dmae.src_addr_hi = U64_HI(dma_addr);
234         dmae.dst_addr_lo = dst_addr >> 2;
235         dmae.dst_addr_hi = 0;
236         dmae.len = len32;
237         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
238         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
239         dmae.comp_val = DMAE_COMP_VAL;
240
241         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
242            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
243                     "dst_addr [%x:%08x (%08x)]\n"
244            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
245            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
246            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
247            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
248         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
249            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
250            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
251
252         mutex_lock(&bp->dmae_mutex);
253
254         *wb_comp = 0;
255
256         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
257
258         udelay(5);
259
260         while (*wb_comp != DMAE_COMP_VAL) {
261                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
262
263                 if (!cnt) {
264                         BNX2X_ERR("DMAE timeout!\n");
265                         break;
266                 }
267                 cnt--;
268                 /* adjust delay for emulation/FPGA */
269                 if (CHIP_REV_IS_SLOW(bp))
270                         msleep(100);
271                 else
272                         udelay(5);
273         }
274
275         mutex_unlock(&bp->dmae_mutex);
276 }
277
278 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
279 {
280         struct dmae_command dmae;
281         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
282         int cnt = 200;
283
284         if (!bp->dmae_ready) {
285                 u32 *data = bnx2x_sp(bp, wb_data[0]);
286                 int i;
287
288                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
289                    "  using indirect\n", src_addr, len32);
290                 for (i = 0; i < len32; i++)
291                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
292                 return;
293         }
294
295         memset(&dmae, 0, sizeof(struct dmae_command));
296
297         dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
298                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
299                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
300 #ifdef __BIG_ENDIAN
301                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
302 #else
303                        DMAE_CMD_ENDIANITY_DW_SWAP |
304 #endif
305                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
306                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
307         dmae.src_addr_lo = src_addr >> 2;
308         dmae.src_addr_hi = 0;
309         dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
310         dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
311         dmae.len = len32;
312         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
313         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
314         dmae.comp_val = DMAE_COMP_VAL;
315
316         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
317            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
318                     "dst_addr [%x:%08x (%08x)]\n"
319            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
320            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
321            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
322            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
323
324         mutex_lock(&bp->dmae_mutex);
325
326         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
327         *wb_comp = 0;
328
329         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
330
331         udelay(5);
332
333         while (*wb_comp != DMAE_COMP_VAL) {
334
335                 if (!cnt) {
336                         BNX2X_ERR("DMAE timeout!\n");
337                         break;
338                 }
339                 cnt--;
340                 /* adjust delay for emulation/FPGA */
341                 if (CHIP_REV_IS_SLOW(bp))
342                         msleep(100);
343                 else
344                         udelay(5);
345         }
346         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
347            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
348            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
349
350         mutex_unlock(&bp->dmae_mutex);
351 }
352
353 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
354                                u32 addr, u32 len)
355 {
356         int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
357         int offset = 0;
358
359         while (len > dmae_wr_max) {
360                 bnx2x_write_dmae(bp, phys_addr + offset,
361                                  addr + offset, dmae_wr_max);
362                 offset += dmae_wr_max * 4;
363                 len -= dmae_wr_max;
364         }
365
366         bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
367 }
368
369 /* used only for slowpath so not inlined */
370 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
371 {
372         u32 wb_write[2];
373
374         wb_write[0] = val_hi;
375         wb_write[1] = val_lo;
376         REG_WR_DMAE(bp, reg, wb_write, 2);
377 }
378
379 #ifdef USE_WB_RD
380 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
381 {
382         u32 wb_data[2];
383
384         REG_RD_DMAE(bp, reg, wb_data, 2);
385
386         return HILO_U64(wb_data[0], wb_data[1]);
387 }
388 #endif
389
390 static int bnx2x_mc_assert(struct bnx2x *bp)
391 {
392         char last_idx;
393         int i, rc = 0;
394         u32 row0, row1, row2, row3;
395
396         /* XSTORM */
397         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
398                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
399         if (last_idx)
400                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
401
402         /* print the asserts */
403         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
404
405                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406                               XSTORM_ASSERT_LIST_OFFSET(i));
407                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
408                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
409                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
410                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
411                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
412                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
413
414                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
415                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
416                                   " 0x%08x 0x%08x 0x%08x\n",
417                                   i, row3, row2, row1, row0);
418                         rc++;
419                 } else {
420                         break;
421                 }
422         }
423
424         /* TSTORM */
425         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
426                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
427         if (last_idx)
428                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
429
430         /* print the asserts */
431         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
432
433                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434                               TSTORM_ASSERT_LIST_OFFSET(i));
435                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
436                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
437                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
438                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
439                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
440                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
441
442                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
443                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
444                                   " 0x%08x 0x%08x 0x%08x\n",
445                                   i, row3, row2, row1, row0);
446                         rc++;
447                 } else {
448                         break;
449                 }
450         }
451
452         /* CSTORM */
453         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
454                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
455         if (last_idx)
456                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
457
458         /* print the asserts */
459         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
460
461                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462                               CSTORM_ASSERT_LIST_OFFSET(i));
463                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
464                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
465                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
466                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
467                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
468                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
469
470                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
471                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
472                                   " 0x%08x 0x%08x 0x%08x\n",
473                                   i, row3, row2, row1, row0);
474                         rc++;
475                 } else {
476                         break;
477                 }
478         }
479
480         /* USTORM */
481         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
482                            USTORM_ASSERT_LIST_INDEX_OFFSET);
483         if (last_idx)
484                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
485
486         /* print the asserts */
487         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
488
489                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
490                               USTORM_ASSERT_LIST_OFFSET(i));
491                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
492                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
493                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
494                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
495                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
496                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
497
498                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
499                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
500                                   " 0x%08x 0x%08x 0x%08x\n",
501                                   i, row3, row2, row1, row0);
502                         rc++;
503                 } else {
504                         break;
505                 }
506         }
507
508         return rc;
509 }
510
511 static void bnx2x_fw_dump(struct bnx2x *bp)
512 {
513         u32 addr;
514         u32 mark, offset;
515         __be32 data[9];
516         int word;
517
518         if (BP_NOMCP(bp)) {
519                 BNX2X_ERR("NO MCP - can not dump\n");
520                 return;
521         }
522
523         addr = bp->common.shmem_base - 0x0800 + 4;
524         mark = REG_RD(bp, addr);
525         mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
526         pr_err("begin fw dump (mark 0x%x)\n", mark);
527
528         pr_err("");
529         for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
530                 for (word = 0; word < 8; word++)
531                         data[word] = htonl(REG_RD(bp, offset + 4*word));
532                 data[8] = 0x0;
533                 pr_cont("%s", (char *)data);
534         }
535         for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
536                 for (word = 0; word < 8; word++)
537                         data[word] = htonl(REG_RD(bp, offset + 4*word));
538                 data[8] = 0x0;
539                 pr_cont("%s", (char *)data);
540         }
541         pr_err("end of fw dump\n");
542 }
543
544 static void bnx2x_panic_dump(struct bnx2x *bp)
545 {
546         int i;
547         u16 j, start, end;
548
549         bp->stats_state = STATS_STATE_DISABLED;
550         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
551
552         BNX2X_ERR("begin crash dump -----------------\n");
553
554         /* Indices */
555         /* Common */
556         BNX2X_ERR("def_c_idx(0x%x)  def_u_idx(0x%x)  def_x_idx(0x%x)"
557                   "  def_t_idx(0x%x)  def_att_idx(0x%x)  attn_state(0x%x)"
558                   "  spq_prod_idx(0x%x)\n",
559                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
560                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
561
562         /* Rx */
563         for_each_queue(bp, i) {
564                 struct bnx2x_fastpath *fp = &bp->fp[i];
565
566                 BNX2X_ERR("fp%d: rx_bd_prod(0x%x)  rx_bd_cons(0x%x)"
567                           "  *rx_bd_cons_sb(0x%x)  rx_comp_prod(0x%x)"
568                           "  rx_comp_cons(0x%x)  *rx_cons_sb(0x%x)\n",
569                           i, fp->rx_bd_prod, fp->rx_bd_cons,
570                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
571                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
572                 BNX2X_ERR("     rx_sge_prod(0x%x)  last_max_sge(0x%x)"
573                           "  fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
574                           fp->rx_sge_prod, fp->last_max_sge,
575                           le16_to_cpu(fp->fp_u_idx),
576                           fp->status_blk->u_status_block.status_block_index);
577         }
578
579         /* Tx */
580         for_each_queue(bp, i) {
581                 struct bnx2x_fastpath *fp = &bp->fp[i];
582
583                 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x)  tx_pkt_cons(0x%x)"
584                           "  tx_bd_prod(0x%x)  tx_bd_cons(0x%x)"
585                           "  *tx_cons_sb(0x%x)\n",
586                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
587                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
588                 BNX2X_ERR("     fp_c_idx(0x%x)  *sb_c_idx(0x%x)"
589                           "  tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
590                           fp->status_blk->c_status_block.status_block_index,
591                           fp->tx_db.data.prod);
592         }
593
594         /* Rings */
595         /* Rx */
596         for_each_queue(bp, i) {
597                 struct bnx2x_fastpath *fp = &bp->fp[i];
598
599                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
600                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
601                 for (j = start; j != end; j = RX_BD(j + 1)) {
602                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
603                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
604
605                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
606                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
607                 }
608
609                 start = RX_SGE(fp->rx_sge_prod);
610                 end = RX_SGE(fp->last_max_sge);
611                 for (j = start; j != end; j = RX_SGE(j + 1)) {
612                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
613                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
614
615                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
616                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
617                 }
618
619                 start = RCQ_BD(fp->rx_comp_cons - 10);
620                 end = RCQ_BD(fp->rx_comp_cons + 503);
621                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
622                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
623
624                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
625                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
626                 }
627         }
628
629         /* Tx */
630         for_each_queue(bp, i) {
631                 struct bnx2x_fastpath *fp = &bp->fp[i];
632
633                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
634                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
635                 for (j = start; j != end; j = TX_BD(j + 1)) {
636                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
637
638                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
639                                   i, j, sw_bd->skb, sw_bd->first_bd);
640                 }
641
642                 start = TX_BD(fp->tx_bd_cons - 10);
643                 end = TX_BD(fp->tx_bd_cons + 254);
644                 for (j = start; j != end; j = TX_BD(j + 1)) {
645                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
646
647                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
648                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
649                 }
650         }
651
652         bnx2x_fw_dump(bp);
653         bnx2x_mc_assert(bp);
654         BNX2X_ERR("end crash dump -----------------\n");
655 }
656
657 static void bnx2x_int_enable(struct bnx2x *bp)
658 {
659         int port = BP_PORT(bp);
660         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
661         u32 val = REG_RD(bp, addr);
662         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
663         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
664
665         if (msix) {
666                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
667                          HC_CONFIG_0_REG_INT_LINE_EN_0);
668                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
669                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
670         } else if (msi) {
671                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
672                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
673                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
674                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
675         } else {
676                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
677                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
678                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
679                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
680
681                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
682                    val, port, addr);
683
684                 REG_WR(bp, addr, val);
685
686                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
687         }
688
689         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
690            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
691
692         REG_WR(bp, addr, val);
693         /*
694          * Ensure that HC_CONFIG is written before leading/trailing edge config
695          */
696         mmiowb();
697         barrier();
698
699         if (CHIP_IS_E1H(bp)) {
700                 /* init leading/trailing edge */
701                 if (IS_E1HMF(bp)) {
702                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
703                         if (bp->port.pmf)
704                                 /* enable nig and gpio3 attention */
705                                 val |= 0x1100;
706                 } else
707                         val = 0xffff;
708
709                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
710                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
711         }
712
713         /* Make sure that interrupts are indeed enabled from here on */
714         mmiowb();
715 }
716
717 static void bnx2x_int_disable(struct bnx2x *bp)
718 {
719         int port = BP_PORT(bp);
720         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
721         u32 val = REG_RD(bp, addr);
722
723         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
724                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
725                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
726                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
727
728         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
729            val, port, addr);
730
731         /* flush all outstanding writes */
732         mmiowb();
733
734         REG_WR(bp, addr, val);
735         if (REG_RD(bp, addr) != val)
736                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
737 }
738
739 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
740 {
741         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
742         int i, offset;
743
744         /* disable interrupt handling */
745         atomic_inc(&bp->intr_sem);
746         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
747
748         if (disable_hw)
749                 /* prevent the HW from sending interrupts */
750                 bnx2x_int_disable(bp);
751
752         /* make sure all ISRs are done */
753         if (msix) {
754                 synchronize_irq(bp->msix_table[0].vector);
755                 offset = 1;
756 #ifdef BCM_CNIC
757                 offset++;
758 #endif
759                 for_each_queue(bp, i)
760                         synchronize_irq(bp->msix_table[i + offset].vector);
761         } else
762                 synchronize_irq(bp->pdev->irq);
763
764         /* make sure sp_task is not running */
765         cancel_delayed_work(&bp->sp_task);
766         flush_workqueue(bnx2x_wq);
767 }
768
769 /* fast path */
770
771 /*
772  * General service functions
773  */
774
775 /* Return true if succeeded to acquire the lock */
776 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
777 {
778         u32 lock_status;
779         u32 resource_bit = (1 << resource);
780         int func = BP_FUNC(bp);
781         u32 hw_lock_control_reg;
782
783         DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
784
785         /* Validating that the resource is within range */
786         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
787                 DP(NETIF_MSG_HW,
788                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
789                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
790                 return -EINVAL;
791         }
792
793         if (func <= 5)
794                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
795         else
796                 hw_lock_control_reg =
797                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
798
799         /* Try to acquire the lock */
800         REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
801         lock_status = REG_RD(bp, hw_lock_control_reg);
802         if (lock_status & resource_bit)
803                 return true;
804
805         DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
806         return false;
807 }
808
809 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
810                                 u8 storm, u16 index, u8 op, u8 update)
811 {
812         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
813                        COMMAND_REG_INT_ACK);
814         struct igu_ack_register igu_ack;
815
816         igu_ack.status_block_index = index;
817         igu_ack.sb_id_and_flags =
818                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
819                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
820                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
821                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
822
823         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
824            (*(u32 *)&igu_ack), hc_addr);
825         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
826
827         /* Make sure that ACK is written */
828         mmiowb();
829         barrier();
830 }
831
832 static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
833 {
834         struct host_status_block *fpsb = fp->status_blk;
835
836         barrier(); /* status block is written to by the chip */
837         fp->fp_c_idx = fpsb->c_status_block.status_block_index;
838         fp->fp_u_idx = fpsb->u_status_block.status_block_index;
839 }
840
841 static u16 bnx2x_ack_int(struct bnx2x *bp)
842 {
843         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
844                        COMMAND_REG_SIMD_MASK);
845         u32 result = REG_RD(bp, hc_addr);
846
847         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
848            result, hc_addr);
849
850         return result;
851 }
852
853
854 /*
855  * fast path service functions
856  */
857
858 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
859 {
860         /* Tell compiler that consumer and producer can change */
861         barrier();
862         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
863 }
864
865 /* free skb in the packet ring at pos idx
866  * return idx of last bd freed
867  */
868 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
869                              u16 idx)
870 {
871         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
872         struct eth_tx_start_bd *tx_start_bd;
873         struct eth_tx_bd *tx_data_bd;
874         struct sk_buff *skb = tx_buf->skb;
875         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
876         int nbd;
877
878         /* prefetch skb end pointer to speedup dev_kfree_skb() */
879         prefetch(&skb->end);
880
881         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
882            idx, tx_buf, skb);
883
884         /* unmap first bd */
885         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
886         tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
887         dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
888                          BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
889
890         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
891 #ifdef BNX2X_STOP_ON_ERROR
892         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
893                 BNX2X_ERR("BAD nbd!\n");
894                 bnx2x_panic();
895         }
896 #endif
897         new_cons = nbd + tx_buf->first_bd;
898
899         /* Get the next bd */
900         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
901
902         /* Skip a parse bd... */
903         --nbd;
904         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
905
906         /* ...and the TSO split header bd since they have no mapping */
907         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
908                 --nbd;
909                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
910         }
911
912         /* now free frags */
913         while (nbd > 0) {
914
915                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
916                 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
917                 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
918                                BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
919                 if (--nbd)
920                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
921         }
922
923         /* release skb */
924         WARN_ON(!skb);
925         dev_kfree_skb(skb);
926         tx_buf->first_bd = 0;
927         tx_buf->skb = NULL;
928
929         return new_cons;
930 }
931
932 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
933 {
934         s16 used;
935         u16 prod;
936         u16 cons;
937
938         prod = fp->tx_bd_prod;
939         cons = fp->tx_bd_cons;
940
941         /* NUM_TX_RINGS = number of "next-page" entries
942            It will be used as a threshold */
943         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
944
945 #ifdef BNX2X_STOP_ON_ERROR
946         WARN_ON(used < 0);
947         WARN_ON(used > fp->bp->tx_ring_size);
948         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
949 #endif
950
951         return (s16)(fp->bp->tx_ring_size) - used;
952 }
953
954 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
955 {
956         u16 hw_cons;
957
958         /* Tell compiler that status block fields can change */
959         barrier();
960         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
961         return hw_cons != fp->tx_pkt_cons;
962 }
963
964 static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
965 {
966         struct bnx2x *bp = fp->bp;
967         struct netdev_queue *txq;
968         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
969
970 #ifdef BNX2X_STOP_ON_ERROR
971         if (unlikely(bp->panic))
972                 return -1;
973 #endif
974
975         txq = netdev_get_tx_queue(bp->dev, fp->index);
976         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
977         sw_cons = fp->tx_pkt_cons;
978
979         while (sw_cons != hw_cons) {
980                 u16 pkt_cons;
981
982                 pkt_cons = TX_BD(sw_cons);
983
984                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
985
986                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
987                    hw_cons, sw_cons, pkt_cons);
988
989 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
990                         rmb();
991                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
992                 }
993 */
994                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
995                 sw_cons++;
996         }
997
998         fp->tx_pkt_cons = sw_cons;
999         fp->tx_bd_cons = bd_cons;
1000
1001         /* Need to make the tx_bd_cons update visible to start_xmit()
1002          * before checking for netif_tx_queue_stopped().  Without the
1003          * memory barrier, there is a small possibility that
1004          * start_xmit() will miss it and cause the queue to be stopped
1005          * forever.
1006          */
1007         smp_mb();
1008
1009         /* TBD need a thresh? */
1010         if (unlikely(netif_tx_queue_stopped(txq))) {
1011                 /* Taking tx_lock() is needed to prevent reenabling the queue
1012                  * while it's empty. This could have happen if rx_action() gets
1013                  * suspended in bnx2x_tx_int() after the condition before
1014                  * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
1015                  *
1016                  * stops the queue->sees fresh tx_bd_cons->releases the queue->
1017                  * sends some packets consuming the whole queue again->
1018                  * stops the queue
1019                  */
1020
1021                 __netif_tx_lock(txq, smp_processor_id());
1022
1023                 if ((netif_tx_queue_stopped(txq)) &&
1024                     (bp->state == BNX2X_STATE_OPEN) &&
1025                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
1026                         netif_tx_wake_queue(txq);
1027
1028                 __netif_tx_unlock(txq);
1029         }
1030         return 0;
1031 }
1032
1033 #ifdef BCM_CNIC
1034 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1035 #endif
1036
1037 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1038                            union eth_rx_cqe *rr_cqe)
1039 {
1040         struct bnx2x *bp = fp->bp;
1041         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1042         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1043
1044         DP(BNX2X_MSG_SP,
1045            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
1046            fp->index, cid, command, bp->state,
1047            rr_cqe->ramrod_cqe.ramrod_type);
1048
1049         bp->spq_left++;
1050
1051         if (fp->index) {
1052                 switch (command | fp->state) {
1053                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1054                                                 BNX2X_FP_STATE_OPENING):
1055                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1056                            cid);
1057                         fp->state = BNX2X_FP_STATE_OPEN;
1058                         break;
1059
1060                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1061                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1062                            cid);
1063                         fp->state = BNX2X_FP_STATE_HALTED;
1064                         break;
1065
1066                 default:
1067                         BNX2X_ERR("unexpected MC reply (%d)  "
1068                                   "fp[%d] state is %x\n",
1069                                   command, fp->index, fp->state);
1070                         break;
1071                 }
1072                 mb(); /* force bnx2x_wait_ramrod() to see the change */
1073                 return;
1074         }
1075
1076         switch (command | bp->state) {
1077         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1078                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1079                 bp->state = BNX2X_STATE_OPEN;
1080                 break;
1081
1082         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1083                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1084                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1085                 fp->state = BNX2X_FP_STATE_HALTED;
1086                 break;
1087
1088         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1089                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1090                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1091                 break;
1092
1093 #ifdef BCM_CNIC
1094         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1095                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1096                 bnx2x_cnic_cfc_comp(bp, cid);
1097                 break;
1098 #endif
1099
1100         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1101         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1102                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1103                 bp->set_mac_pending--;
1104                 smp_wmb();
1105                 break;
1106
1107         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1108                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1109                 bp->set_mac_pending--;
1110                 smp_wmb();
1111                 break;
1112
1113         default:
1114                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
1115                           command, bp->state);
1116                 break;
1117         }
1118         mb(); /* force bnx2x_wait_ramrod() to see the change */
1119 }
1120
1121 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1122                                      struct bnx2x_fastpath *fp, u16 index)
1123 {
1124         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1125         struct page *page = sw_buf->page;
1126         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1127
1128         /* Skip "next page" elements */
1129         if (!page)
1130                 return;
1131
1132         dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
1133                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1134         __free_pages(page, PAGES_PER_SGE_SHIFT);
1135
1136         sw_buf->page = NULL;
1137         sge->addr_hi = 0;
1138         sge->addr_lo = 0;
1139 }
1140
1141 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1142                                            struct bnx2x_fastpath *fp, int last)
1143 {
1144         int i;
1145
1146         for (i = 0; i < last; i++)
1147                 bnx2x_free_rx_sge(bp, fp, i);
1148 }
1149
1150 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1151                                      struct bnx2x_fastpath *fp, u16 index)
1152 {
1153         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1154         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1155         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1156         dma_addr_t mapping;
1157
1158         if (unlikely(page == NULL))
1159                 return -ENOMEM;
1160
1161         mapping = dma_map_page(&bp->pdev->dev, page, 0,
1162                                SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
1163         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1164                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1165                 return -ENOMEM;
1166         }
1167
1168         sw_buf->page = page;
1169         dma_unmap_addr_set(sw_buf, mapping, mapping);
1170
1171         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1172         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1173
1174         return 0;
1175 }
1176
1177 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1178                                      struct bnx2x_fastpath *fp, u16 index)
1179 {
1180         struct sk_buff *skb;
1181         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1182         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1183         dma_addr_t mapping;
1184
1185         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1186         if (unlikely(skb == NULL))
1187                 return -ENOMEM;
1188
1189         mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
1190                                  DMA_FROM_DEVICE);
1191         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1192                 dev_kfree_skb(skb);
1193                 return -ENOMEM;
1194         }
1195
1196         rx_buf->skb = skb;
1197         dma_unmap_addr_set(rx_buf, mapping, mapping);
1198
1199         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1200         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1201
1202         return 0;
1203 }
1204
1205 /* note that we are not allocating a new skb,
1206  * we are just moving one from cons to prod
1207  * we are not creating a new mapping,
1208  * so there is no need to check for dma_mapping_error().
1209  */
1210 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1211                                struct sk_buff *skb, u16 cons, u16 prod)
1212 {
1213         struct bnx2x *bp = fp->bp;
1214         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1215         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1216         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1217         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1218
1219         dma_sync_single_for_device(&bp->pdev->dev,
1220                                    dma_unmap_addr(cons_rx_buf, mapping),
1221                                    RX_COPY_THRESH, DMA_FROM_DEVICE);
1222
1223         prod_rx_buf->skb = cons_rx_buf->skb;
1224         dma_unmap_addr_set(prod_rx_buf, mapping,
1225                            dma_unmap_addr(cons_rx_buf, mapping));
1226         *prod_bd = *cons_bd;
1227 }
1228
1229 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1230                                              u16 idx)
1231 {
1232         u16 last_max = fp->last_max_sge;
1233
1234         if (SUB_S16(idx, last_max) > 0)
1235                 fp->last_max_sge = idx;
1236 }
1237
1238 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1239 {
1240         int i, j;
1241
1242         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1243                 int idx = RX_SGE_CNT * i - 1;
1244
1245                 for (j = 0; j < 2; j++) {
1246                         SGE_MASK_CLEAR_BIT(fp, idx);
1247                         idx--;
1248                 }
1249         }
1250 }
1251
1252 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1253                                   struct eth_fast_path_rx_cqe *fp_cqe)
1254 {
1255         struct bnx2x *bp = fp->bp;
1256         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1257                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1258                       SGE_PAGE_SHIFT;
1259         u16 last_max, last_elem, first_elem;
1260         u16 delta = 0;
1261         u16 i;
1262
1263         if (!sge_len)
1264                 return;
1265
1266         /* First mark all used pages */
1267         for (i = 0; i < sge_len; i++)
1268                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1269
1270         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1271            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1272
1273         /* Here we assume that the last SGE index is the biggest */
1274         prefetch((void *)(fp->sge_mask));
1275         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1276
1277         last_max = RX_SGE(fp->last_max_sge);
1278         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1279         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1280
1281         /* If ring is not full */
1282         if (last_elem + 1 != first_elem)
1283                 last_elem++;
1284
1285         /* Now update the prod */
1286         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1287                 if (likely(fp->sge_mask[i]))
1288                         break;
1289
1290                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1291                 delta += RX_SGE_MASK_ELEM_SZ;
1292         }
1293
1294         if (delta > 0) {
1295                 fp->rx_sge_prod += delta;
1296                 /* clear page-end entries */
1297                 bnx2x_clear_sge_mask_next_elems(fp);
1298         }
1299
1300         DP(NETIF_MSG_RX_STATUS,
1301            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1302            fp->last_max_sge, fp->rx_sge_prod);
1303 }
1304
1305 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1306 {
1307         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1308         memset(fp->sge_mask, 0xff,
1309                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1310
1311         /* Clear the two last indices in the page to 1:
1312            these are the indices that correspond to the "next" element,
1313            hence will never be indicated and should be removed from
1314            the calculations. */
1315         bnx2x_clear_sge_mask_next_elems(fp);
1316 }
1317
1318 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1319                             struct sk_buff *skb, u16 cons, u16 prod)
1320 {
1321         struct bnx2x *bp = fp->bp;
1322         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1323         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1324         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1325         dma_addr_t mapping;
1326
1327         /* move empty skb from pool to prod and map it */
1328         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1329         mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
1330                                  bp->rx_buf_size, DMA_FROM_DEVICE);
1331         dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
1332
1333         /* move partial skb from cons to pool (don't unmap yet) */
1334         fp->tpa_pool[queue] = *cons_rx_buf;
1335
1336         /* mark bin state as start - print error if current state != stop */
1337         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1338                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1339
1340         fp->tpa_state[queue] = BNX2X_TPA_START;
1341
1342         /* point prod_bd to new skb */
1343         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1344         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1345
1346 #ifdef BNX2X_STOP_ON_ERROR
1347         fp->tpa_queue_used |= (1 << queue);
1348 #ifdef _ASM_GENERIC_INT_L64_H
1349         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1350 #else
1351         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1352 #endif
1353            fp->tpa_queue_used);
1354 #endif
1355 }
1356
1357 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1358                                struct sk_buff *skb,
1359                                struct eth_fast_path_rx_cqe *fp_cqe,
1360                                u16 cqe_idx)
1361 {
1362         struct sw_rx_page *rx_pg, old_rx_pg;
1363         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1364         u32 i, frag_len, frag_size, pages;
1365         int err;
1366         int j;
1367
1368         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1369         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1370
1371         /* This is needed in order to enable forwarding support */
1372         if (frag_size)
1373                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1374                                                max(frag_size, (u32)len_on_bd));
1375
1376 #ifdef BNX2X_STOP_ON_ERROR
1377         if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
1378                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1379                           pages, cqe_idx);
1380                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1381                           fp_cqe->pkt_len, len_on_bd);
1382                 bnx2x_panic();
1383                 return -EINVAL;
1384         }
1385 #endif
1386
1387         /* Run through the SGL and compose the fragmented skb */
1388         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1389                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1390
1391                 /* FW gives the indices of the SGE as if the ring is an array
1392                    (meaning that "next" element will consume 2 indices) */
1393                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1394                 rx_pg = &fp->rx_page_ring[sge_idx];
1395                 old_rx_pg = *rx_pg;
1396
1397                 /* If we fail to allocate a substitute page, we simply stop
1398                    where we are and drop the whole packet */
1399                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1400                 if (unlikely(err)) {
1401                         fp->eth_q_stats.rx_skb_alloc_failed++;
1402                         return err;
1403                 }
1404
1405                 /* Unmap the page as we r going to pass it to the stack */
1406                 dma_unmap_page(&bp->pdev->dev,
1407                                dma_unmap_addr(&old_rx_pg, mapping),
1408                                SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
1409
1410                 /* Add one frag and update the appropriate fields in the skb */
1411                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1412
1413                 skb->data_len += frag_len;
1414                 skb->truesize += frag_len;
1415                 skb->len += frag_len;
1416
1417                 frag_size -= frag_len;
1418         }
1419
1420         return 0;
1421 }
1422
1423 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1424                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1425                            u16 cqe_idx)
1426 {
1427         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1428         struct sk_buff *skb = rx_buf->skb;
1429         /* alloc new skb */
1430         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1431
1432         /* Unmap skb in the pool anyway, as we are going to change
1433            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1434            fails. */
1435         dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
1436                          bp->rx_buf_size, DMA_FROM_DEVICE);
1437
1438         if (likely(new_skb)) {
1439                 /* fix ip xsum and give it to the stack */
1440                 /* (no need to map the new skb) */
1441 #ifdef BCM_VLAN
1442                 int is_vlan_cqe =
1443                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1444                          PARSING_FLAGS_VLAN);
1445                 int is_not_hwaccel_vlan_cqe =
1446                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1447 #endif
1448
1449                 prefetch(skb);
1450                 prefetch(((char *)(skb)) + 128);
1451
1452 #ifdef BNX2X_STOP_ON_ERROR
1453                 if (pad + len > bp->rx_buf_size) {
1454                         BNX2X_ERR("skb_put is about to fail...  "
1455                                   "pad %d  len %d  rx_buf_size %d\n",
1456                                   pad, len, bp->rx_buf_size);
1457                         bnx2x_panic();
1458                         return;
1459                 }
1460 #endif
1461
1462                 skb_reserve(skb, pad);
1463                 skb_put(skb, len);
1464
1465                 skb->protocol = eth_type_trans(skb, bp->dev);
1466                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1467
1468                 {
1469                         struct iphdr *iph;
1470
1471                         iph = (struct iphdr *)skb->data;
1472 #ifdef BCM_VLAN
1473                         /* If there is no Rx VLAN offloading -
1474                            take VLAN tag into an account */
1475                         if (unlikely(is_not_hwaccel_vlan_cqe))
1476                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1477 #endif
1478                         iph->check = 0;
1479                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1480                 }
1481
1482                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1483                                          &cqe->fast_path_cqe, cqe_idx)) {
1484 #ifdef BCM_VLAN
1485                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1486                             (!is_not_hwaccel_vlan_cqe))
1487                                 vlan_gro_receive(&fp->napi, bp->vlgrp,
1488                                                  le16_to_cpu(cqe->fast_path_cqe.
1489                                                              vlan_tag), skb);
1490                         else
1491 #endif
1492                                 napi_gro_receive(&fp->napi, skb);
1493                 } else {
1494                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1495                            " - dropping packet!\n");
1496                         dev_kfree_skb(skb);
1497                 }
1498
1499
1500                 /* put new skb in bin */
1501                 fp->tpa_pool[queue].skb = new_skb;
1502
1503         } else {
1504                 /* else drop the packet and keep the buffer in the bin */
1505                 DP(NETIF_MSG_RX_STATUS,
1506                    "Failed to allocate new skb - dropping packet!\n");
1507                 fp->eth_q_stats.rx_skb_alloc_failed++;
1508         }
1509
1510         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1511 }
1512
1513 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1514                                         struct bnx2x_fastpath *fp,
1515                                         u16 bd_prod, u16 rx_comp_prod,
1516                                         u16 rx_sge_prod)
1517 {
1518         struct ustorm_eth_rx_producers rx_prods = {0};
1519         int i;
1520
1521         /* Update producers */
1522         rx_prods.bd_prod = bd_prod;
1523         rx_prods.cqe_prod = rx_comp_prod;
1524         rx_prods.sge_prod = rx_sge_prod;
1525
1526         /*
1527          * Make sure that the BD and SGE data is updated before updating the
1528          * producers since FW might read the BD/SGE right after the producer
1529          * is updated.
1530          * This is only applicable for weak-ordered memory model archs such
1531          * as IA-64. The following barrier is also mandatory since FW will
1532          * assumes BDs must have buffers.
1533          */
1534         wmb();
1535
1536         for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1537                 REG_WR(bp, BAR_USTRORM_INTMEM +
1538                        USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1539                        ((u32 *)&rx_prods)[i]);
1540
1541         mmiowb(); /* keep prod updates ordered */
1542
1543         DP(NETIF_MSG_RX_STATUS,
1544            "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
1545            fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1546 }
1547
1548 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1549 {
1550         struct bnx2x *bp = fp->bp;
1551         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1552         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1553         int rx_pkt = 0;
1554
1555 #ifdef BNX2X_STOP_ON_ERROR
1556         if (unlikely(bp->panic))
1557                 return 0;
1558 #endif
1559
1560         /* CQ "next element" is of the size of the regular element,
1561            that's why it's ok here */
1562         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1563         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1564                 hw_comp_cons++;
1565
1566         bd_cons = fp->rx_bd_cons;
1567         bd_prod = fp->rx_bd_prod;
1568         bd_prod_fw = bd_prod;
1569         sw_comp_cons = fp->rx_comp_cons;
1570         sw_comp_prod = fp->rx_comp_prod;
1571
1572         /* Memory barrier necessary as speculative reads of the rx
1573          * buffer can be ahead of the index in the status block
1574          */
1575         rmb();
1576
1577         DP(NETIF_MSG_RX_STATUS,
1578            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1579            fp->index, hw_comp_cons, sw_comp_cons);
1580
1581         while (sw_comp_cons != hw_comp_cons) {
1582                 struct sw_rx_bd *rx_buf = NULL;
1583                 struct sk_buff *skb;
1584                 union eth_rx_cqe *cqe;
1585                 u8 cqe_fp_flags, cqe_fp_status_flags;
1586                 u16 len, pad;
1587
1588                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1589                 bd_prod = RX_BD(bd_prod);
1590                 bd_cons = RX_BD(bd_cons);
1591
1592                 /* Prefetch the page containing the BD descriptor
1593                    at producer's index. It will be needed when new skb is
1594                    allocated */
1595                 prefetch((void *)(PAGE_ALIGN((unsigned long)
1596                                              (&fp->rx_desc_ring[bd_prod])) -
1597                                   PAGE_SIZE + 1));
1598
1599                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1600                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1601                 cqe_fp_status_flags = cqe->fast_path_cqe.status_flags;
1602
1603                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1604                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1605                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1606                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1607                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1608                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1609
1610                 /* is this a slowpath msg? */
1611                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1612                         bnx2x_sp_event(fp, cqe);
1613                         goto next_cqe;
1614
1615                 /* this is an rx packet */
1616                 } else {
1617                         rx_buf = &fp->rx_buf_ring[bd_cons];
1618                         skb = rx_buf->skb;
1619                         prefetch(skb);
1620                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1621                         pad = cqe->fast_path_cqe.placement_offset;
1622
1623                         /* If CQE is marked both TPA_START and TPA_END
1624                            it is a non-TPA CQE */
1625                         if ((!fp->disable_tpa) &&
1626                             (TPA_TYPE(cqe_fp_flags) !=
1627                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1628                                 u16 queue = cqe->fast_path_cqe.queue_index;
1629
1630                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1631                                         DP(NETIF_MSG_RX_STATUS,
1632                                            "calling tpa_start on queue %d\n",
1633                                            queue);
1634
1635                                         bnx2x_tpa_start(fp, queue, skb,
1636                                                         bd_cons, bd_prod);
1637                                         goto next_rx;
1638                                 }
1639
1640                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1641                                         DP(NETIF_MSG_RX_STATUS,
1642                                            "calling tpa_stop on queue %d\n",
1643                                            queue);
1644
1645                                         if (!BNX2X_RX_SUM_FIX(cqe))
1646                                                 BNX2X_ERR("STOP on none TCP "
1647                                                           "data\n");
1648
1649                                         /* This is a size of the linear data
1650                                            on this skb */
1651                                         len = le16_to_cpu(cqe->fast_path_cqe.
1652                                                                 len_on_bd);
1653                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1654                                                     len, cqe, comp_ring_cons);
1655 #ifdef BNX2X_STOP_ON_ERROR
1656                                         if (bp->panic)
1657                                                 return 0;
1658 #endif
1659
1660                                         bnx2x_update_sge_prod(fp,
1661                                                         &cqe->fast_path_cqe);
1662                                         goto next_cqe;
1663                                 }
1664                         }
1665
1666                         dma_sync_single_for_device(&bp->pdev->dev,
1667                                         dma_unmap_addr(rx_buf, mapping),
1668                                                    pad + RX_COPY_THRESH,
1669                                                    DMA_FROM_DEVICE);
1670                         prefetch(((char *)(skb)) + 128);
1671
1672                         /* is this an error packet? */
1673                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1674                                 DP(NETIF_MSG_RX_ERR,
1675                                    "ERROR  flags %x  rx packet %u\n",
1676                                    cqe_fp_flags, sw_comp_cons);
1677                                 fp->eth_q_stats.rx_err_discard_pkt++;
1678                                 goto reuse_rx;
1679                         }
1680
1681                         /* Since we don't have a jumbo ring
1682                          * copy small packets if mtu > 1500
1683                          */
1684                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1685                             (len <= RX_COPY_THRESH)) {
1686                                 struct sk_buff *new_skb;
1687
1688                                 new_skb = netdev_alloc_skb(bp->dev,
1689                                                            len + pad);
1690                                 if (new_skb == NULL) {
1691                                         DP(NETIF_MSG_RX_ERR,
1692                                            "ERROR  packet dropped "
1693                                            "because of alloc failure\n");
1694                                         fp->eth_q_stats.rx_skb_alloc_failed++;
1695                                         goto reuse_rx;
1696                                 }
1697
1698                                 /* aligned copy */
1699                                 skb_copy_from_linear_data_offset(skb, pad,
1700                                                     new_skb->data + pad, len);
1701                                 skb_reserve(new_skb, pad);
1702                                 skb_put(new_skb, len);
1703
1704                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1705
1706                                 skb = new_skb;
1707
1708                         } else
1709                         if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1710                                 dma_unmap_single(&bp->pdev->dev,
1711                                         dma_unmap_addr(rx_buf, mapping),
1712                                                  bp->rx_buf_size,
1713                                                  DMA_FROM_DEVICE);
1714                                 skb_reserve(skb, pad);
1715                                 skb_put(skb, len);
1716
1717                         } else {
1718                                 DP(NETIF_MSG_RX_ERR,
1719                                    "ERROR  packet dropped because "
1720                                    "of alloc failure\n");
1721                                 fp->eth_q_stats.rx_skb_alloc_failed++;
1722 reuse_rx:
1723                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1724                                 goto next_rx;
1725                         }
1726
1727                         skb->protocol = eth_type_trans(skb, bp->dev);
1728
1729                         if ((bp->dev->features & ETH_FLAG_RXHASH) &&
1730                             (cqe_fp_status_flags &
1731                              ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
1732                                 skb->rxhash = le32_to_cpu(
1733                                     cqe->fast_path_cqe.rss_hash_result);
1734
1735                         skb->ip_summed = CHECKSUM_NONE;
1736                         if (bp->rx_csum) {
1737                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1738                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1739                                 else
1740                                         fp->eth_q_stats.hw_csum_err++;
1741                         }
1742                 }
1743
1744                 skb_record_rx_queue(skb, fp->index);
1745
1746 #ifdef BCM_VLAN
1747                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1748                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1749                      PARSING_FLAGS_VLAN))
1750                         vlan_gro_receive(&fp->napi, bp->vlgrp,
1751                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
1752                 else
1753 #endif
1754                         napi_gro_receive(&fp->napi, skb);
1755
1756
1757 next_rx:
1758                 rx_buf->skb = NULL;
1759
1760                 bd_cons = NEXT_RX_IDX(bd_cons);
1761                 bd_prod = NEXT_RX_IDX(bd_prod);
1762                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1763                 rx_pkt++;
1764 next_cqe:
1765                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1766                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1767
1768                 if (rx_pkt == budget)
1769                         break;
1770         } /* while */
1771
1772         fp->rx_bd_cons = bd_cons;
1773         fp->rx_bd_prod = bd_prod_fw;
1774         fp->rx_comp_cons = sw_comp_cons;
1775         fp->rx_comp_prod = sw_comp_prod;
1776
1777         /* Update producers */
1778         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1779                              fp->rx_sge_prod);
1780
1781         fp->rx_pkt += rx_pkt;
1782         fp->rx_calls++;
1783
1784         return rx_pkt;
1785 }
1786
1787 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1788 {
1789         struct bnx2x_fastpath *fp = fp_cookie;
1790         struct bnx2x *bp = fp->bp;
1791
1792         /* Return here if interrupt is disabled */
1793         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1794                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1795                 return IRQ_HANDLED;
1796         }
1797
1798         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1799            fp->index, fp->sb_id);
1800         bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1801
1802 #ifdef BNX2X_STOP_ON_ERROR
1803         if (unlikely(bp->panic))
1804                 return IRQ_HANDLED;
1805 #endif
1806
1807         /* Handle Rx and Tx according to MSI-X vector */
1808         prefetch(fp->rx_cons_sb);
1809         prefetch(fp->tx_cons_sb);
1810         prefetch(&fp->status_blk->u_status_block.status_block_index);
1811         prefetch(&fp->status_blk->c_status_block.status_block_index);
1812         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1813
1814         return IRQ_HANDLED;
1815 }
1816
1817 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1818 {
1819         struct bnx2x *bp = netdev_priv(dev_instance);
1820         u16 status = bnx2x_ack_int(bp);
1821         u16 mask;
1822         int i;
1823
1824         /* Return here if interrupt is shared and it's not for us */
1825         if (unlikely(status == 0)) {
1826                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1827                 return IRQ_NONE;
1828         }
1829         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1830
1831         /* Return here if interrupt is disabled */
1832         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1833                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1834                 return IRQ_HANDLED;
1835         }
1836
1837 #ifdef BNX2X_STOP_ON_ERROR
1838         if (unlikely(bp->panic))
1839                 return IRQ_HANDLED;
1840 #endif
1841
1842         for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1843                 struct bnx2x_fastpath *fp = &bp->fp[i];
1844
1845                 mask = 0x2 << fp->sb_id;
1846                 if (status & mask) {
1847                         /* Handle Rx and Tx according to SB id */
1848                         prefetch(fp->rx_cons_sb);
1849                         prefetch(&fp->status_blk->u_status_block.
1850                                                 status_block_index);
1851                         prefetch(fp->tx_cons_sb);
1852                         prefetch(&fp->status_blk->c_status_block.
1853                                                 status_block_index);
1854                         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1855                         status &= ~mask;
1856                 }
1857         }
1858
1859 #ifdef BCM_CNIC
1860         mask = 0x2 << CNIC_SB_ID(bp);
1861         if (status & (mask | 0x1)) {
1862                 struct cnic_ops *c_ops = NULL;
1863
1864                 rcu_read_lock();
1865                 c_ops = rcu_dereference(bp->cnic_ops);
1866                 if (c_ops)
1867                         c_ops->cnic_handler(bp->cnic_data, NULL);
1868                 rcu_read_unlock();
1869
1870                 status &= ~mask;
1871         }
1872 #endif
1873
1874         if (unlikely(status & 0x1)) {
1875                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1876
1877                 status &= ~0x1;
1878                 if (!status)
1879                         return IRQ_HANDLED;
1880         }
1881
1882         if (unlikely(status))
1883                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1884                    status);
1885
1886         return IRQ_HANDLED;
1887 }
1888
1889 /* end of fast path */
1890
1891 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1892
1893 /* Link */
1894
1895 /*
1896  * General service functions
1897  */
1898
1899 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1900 {
1901         u32 lock_status;
1902         u32 resource_bit = (1 << resource);
1903         int func = BP_FUNC(bp);
1904         u32 hw_lock_control_reg;
1905         int cnt;
1906
1907         /* Validating that the resource is within range */
1908         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1909                 DP(NETIF_MSG_HW,
1910                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1911                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1912                 return -EINVAL;
1913         }
1914
1915         if (func <= 5) {
1916                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1917         } else {
1918                 hw_lock_control_reg =
1919                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1920         }
1921
1922         /* Validating that the resource is not already taken */
1923         lock_status = REG_RD(bp, hw_lock_control_reg);
1924         if (lock_status & resource_bit) {
1925                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1926                    lock_status, resource_bit);
1927                 return -EEXIST;
1928         }
1929
1930         /* Try for 5 second every 5ms */
1931         for (cnt = 0; cnt < 1000; cnt++) {
1932                 /* Try to acquire the lock */
1933                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1934                 lock_status = REG_RD(bp, hw_lock_control_reg);
1935                 if (lock_status & resource_bit)
1936                         return 0;
1937
1938                 msleep(5);
1939         }
1940         DP(NETIF_MSG_HW, "Timeout\n");
1941         return -EAGAIN;
1942 }
1943
1944 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1945 {
1946         u32 lock_status;
1947         u32 resource_bit = (1 << resource);
1948         int func = BP_FUNC(bp);
1949         u32 hw_lock_control_reg;
1950
1951         DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1952
1953         /* Validating that the resource is within range */
1954         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1955                 DP(NETIF_MSG_HW,
1956                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1957                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1958                 return -EINVAL;
1959         }
1960
1961         if (func <= 5) {
1962                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1963         } else {
1964                 hw_lock_control_reg =
1965                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1966         }
1967
1968         /* Validating that the resource is currently taken */
1969         lock_status = REG_RD(bp, hw_lock_control_reg);
1970         if (!(lock_status & resource_bit)) {
1971                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1972                    lock_status, resource_bit);
1973                 return -EFAULT;
1974         }
1975
1976         REG_WR(bp, hw_lock_control_reg, resource_bit);
1977         return 0;
1978 }
1979
1980 /* HW Lock for shared dual port PHYs */
1981 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1982 {
1983         mutex_lock(&bp->port.phy_mutex);
1984
1985         if (bp->port.need_hw_lock)
1986                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1987 }
1988
1989 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1990 {
1991         if (bp->port.need_hw_lock)
1992                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1993
1994         mutex_unlock(&bp->port.phy_mutex);
1995 }
1996
1997 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1998 {
1999         /* The GPIO should be swapped if swap register is set and active */
2000         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2001                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2002         int gpio_shift = gpio_num +
2003                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2004         u32 gpio_mask = (1 << gpio_shift);
2005         u32 gpio_reg;
2006         int value;
2007
2008         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2009                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2010                 return -EINVAL;
2011         }
2012
2013         /* read GPIO value */
2014         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2015
2016         /* get the requested pin value */
2017         if ((gpio_reg & gpio_mask) == gpio_mask)
2018                 value = 1;
2019         else
2020                 value = 0;
2021
2022         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
2023
2024         return value;
2025 }
2026
2027 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2028 {
2029         /* The GPIO should be swapped if swap register is set and active */
2030         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2031                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2032         int gpio_shift = gpio_num +
2033                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2034         u32 gpio_mask = (1 << gpio_shift);
2035         u32 gpio_reg;
2036
2037         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2038                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2039                 return -EINVAL;
2040         }
2041
2042         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2043         /* read GPIO and mask except the float bits */
2044         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2045
2046         switch (mode) {
2047         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2048                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
2049                    gpio_num, gpio_shift);
2050                 /* clear FLOAT and set CLR */
2051                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2052                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2053                 break;
2054
2055         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2056                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2057                    gpio_num, gpio_shift);
2058                 /* clear FLOAT and set SET */
2059                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2060                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2061                 break;
2062
2063         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2064                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2065                    gpio_num, gpio_shift);
2066                 /* set FLOAT */
2067                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2068                 break;
2069
2070         default:
2071                 break;
2072         }
2073
2074         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2075         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2076
2077         return 0;
2078 }
2079
2080 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2081 {
2082         /* The GPIO should be swapped if swap register is set and active */
2083         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2084                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2085         int gpio_shift = gpio_num +
2086                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2087         u32 gpio_mask = (1 << gpio_shift);
2088         u32 gpio_reg;
2089
2090         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2091                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2092                 return -EINVAL;
2093         }
2094
2095         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2096         /* read GPIO int */
2097         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2098
2099         switch (mode) {
2100         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2101                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2102                                    "output low\n", gpio_num, gpio_shift);
2103                 /* clear SET and set CLR */
2104                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2105                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2106                 break;
2107
2108         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2109                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2110                                    "output high\n", gpio_num, gpio_shift);
2111                 /* clear CLR and set SET */
2112                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2113                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2114                 break;
2115
2116         default:
2117                 break;
2118         }
2119
2120         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2121         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2122
2123         return 0;
2124 }
2125
2126 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2127 {
2128         u32 spio_mask = (1 << spio_num);
2129         u32 spio_reg;
2130
2131         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2132             (spio_num > MISC_REGISTERS_SPIO_7)) {
2133                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2134                 return -EINVAL;
2135         }
2136
2137         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2138         /* read SPIO and mask except the float bits */
2139         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2140
2141         switch (mode) {
2142         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2143                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2144                 /* clear FLOAT and set CLR */
2145                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2146                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2147                 break;
2148
2149         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2150                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2151                 /* clear FLOAT and set SET */
2152                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2153                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2154                 break;
2155
2156         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2157                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2158                 /* set FLOAT */
2159                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2160                 break;
2161
2162         default:
2163                 break;
2164         }
2165
2166         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2167         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2168
2169         return 0;
2170 }
2171
2172 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2173 {
2174         switch (bp->link_vars.ieee_fc &
2175                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2176         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2177                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2178                                           ADVERTISED_Pause);
2179                 break;
2180
2181         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2182                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2183                                          ADVERTISED_Pause);
2184                 break;
2185
2186         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2187                 bp->port.advertising |= ADVERTISED_Asym_Pause;
2188                 break;
2189
2190         default:
2191                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2192                                           ADVERTISED_Pause);
2193                 break;
2194         }
2195 }
2196
2197 static void bnx2x_link_report(struct bnx2x *bp)
2198 {
2199         if (bp->flags & MF_FUNC_DIS) {
2200                 netif_carrier_off(bp->dev);
2201                 netdev_err(bp->dev, "NIC Link is Down\n");
2202                 return;
2203         }
2204
2205         if (bp->link_vars.link_up) {
2206                 u16 line_speed;
2207
2208                 if (bp->state == BNX2X_STATE_OPEN)
2209                         netif_carrier_on(bp->dev);
2210                 netdev_info(bp->dev, "NIC Link is Up, ");
2211
2212                 line_speed = bp->link_vars.line_speed;
2213                 if (IS_E1HMF(bp)) {
2214                         u16 vn_max_rate;
2215
2216                         vn_max_rate =
2217                                 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2218                                  FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2219                         if (vn_max_rate < line_speed)
2220                                 line_speed = vn_max_rate;
2221                 }
2222                 pr_cont("%d Mbps ", line_speed);
2223
2224                 if (bp->link_vars.duplex == DUPLEX_FULL)
2225                         pr_cont("full duplex");
2226                 else
2227                         pr_cont("half duplex");
2228
2229                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2230                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2231                                 pr_cont(", receive ");
2232                                 if (bp->link_vars.flow_ctrl &
2233                                     BNX2X_FLOW_CTRL_TX)
2234                                         pr_cont("& transmit ");
2235                         } else {
2236                                 pr_cont(", transmit ");
2237                         }
2238                         pr_cont("flow control ON");
2239                 }
2240                 pr_cont("\n");
2241
2242         } else { /* link_down */
2243                 netif_carrier_off(bp->dev);
2244                 netdev_err(bp->dev, "NIC Link is Down\n");
2245         }
2246 }
2247
2248 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2249 {
2250         if (!BP_NOMCP(bp)) {
2251                 u8 rc;
2252
2253                 /* Initialize link parameters structure variables */
2254                 /* It is recommended to turn off RX FC for jumbo frames
2255                    for better performance */
2256                 if (bp->dev->mtu > 5000)
2257                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2258                 else
2259                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2260
2261                 bnx2x_acquire_phy_lock(bp);
2262
2263                 if (load_mode == LOAD_DIAG)
2264                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2265
2266                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2267
2268                 bnx2x_release_phy_lock(bp);
2269
2270                 bnx2x_calc_fc_adv(bp);
2271
2272                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2273                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2274                         bnx2x_link_report(bp);
2275                 }
2276
2277                 return rc;
2278         }
2279         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2280         return -EINVAL;
2281 }
2282
2283 static void bnx2x_link_set(struct bnx2x *bp)
2284 {
2285         if (!BP_NOMCP(bp)) {
2286                 bnx2x_acquire_phy_lock(bp);
2287                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2288                 bnx2x_release_phy_lock(bp);
2289
2290                 bnx2x_calc_fc_adv(bp);
2291         } else
2292                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2293 }
2294
2295 static void bnx2x__link_reset(struct bnx2x *bp)
2296 {
2297         if (!BP_NOMCP(bp)) {
2298                 bnx2x_acquire_phy_lock(bp);
2299                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2300                 bnx2x_release_phy_lock(bp);
2301         } else
2302                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2303 }
2304
2305 static u8 bnx2x_link_test(struct bnx2x *bp)
2306 {
2307         u8 rc = 0;
2308
2309         if (!BP_NOMCP(bp)) {
2310                 bnx2x_acquire_phy_lock(bp);
2311                 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2312                 bnx2x_release_phy_lock(bp);
2313         } else
2314                 BNX2X_ERR("Bootcode is missing - can not test link\n");
2315
2316         return rc;
2317 }
2318
2319 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2320 {
2321         u32 r_param = bp->link_vars.line_speed / 8;
2322         u32 fair_periodic_timeout_usec;
2323         u32 t_fair;
2324
2325         memset(&(bp->cmng.rs_vars), 0,
2326                sizeof(struct rate_shaping_vars_per_port));
2327         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2328
2329         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2330         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2331
2332         /* this is the threshold below which no timer arming will occur
2333            1.25 coefficient is for the threshold to be a little bigger
2334            than the real time, to compensate for timer in-accuracy */
2335         bp->cmng.rs_vars.rs_threshold =
2336                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2337
2338         /* resolution of fairness timer */
2339         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2340         /* for 10G it is 1000usec. for 1G it is 10000usec. */
2341         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2342
2343         /* this is the threshold below which we won't arm the timer anymore */
2344         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2345
2346         /* we multiply by 1e3/8 to get bytes/msec.
2347            We don't want the credits to pass a credit
2348            of the t_fair*FAIR_MEM (algorithm resolution) */
2349         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2350         /* since each tick is 4 usec */
2351         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2352 }
2353
2354 /* Calculates the sum of vn_min_rates.
2355    It's needed for further normalizing of the min_rates.
2356    Returns:
2357      sum of vn_min_rates.
2358        or
2359      0 - if all the min_rates are 0.
2360      In the later case fainess algorithm should be deactivated.
2361      If not all min_rates are zero then those that are zeroes will be set to 1.
2362  */
2363 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2364 {
2365         int all_zero = 1;
2366         int port = BP_PORT(bp);
2367         int vn;
2368
2369         bp->vn_weight_sum = 0;
2370         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2371                 int func = 2*vn + port;
2372                 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2373                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2374                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2375
2376                 /* Skip hidden vns */
2377                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2378                         continue;
2379
2380                 /* If min rate is zero - set it to 1 */
2381                 if (!vn_min_rate)
2382                         vn_min_rate = DEF_MIN_RATE;
2383                 else
2384                         all_zero = 0;
2385
2386                 bp->vn_weight_sum += vn_min_rate;
2387         }
2388
2389         /* ... only if all min rates are zeros - disable fairness */
2390         if (all_zero) {
2391                 bp->cmng.flags.cmng_enables &=
2392                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2393                 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2394                    "  fairness will be disabled\n");
2395         } else
2396                 bp->cmng.flags.cmng_enables |=
2397                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2398 }
2399
2400 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2401 {
2402         struct rate_shaping_vars_per_vn m_rs_vn;
2403         struct fairness_vars_per_vn m_fair_vn;
2404         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2405         u16 vn_min_rate, vn_max_rate;
2406         int i;
2407
2408         /* If function is hidden - set min and max to zeroes */
2409         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2410                 vn_min_rate = 0;
2411                 vn_max_rate = 0;
2412
2413         } else {
2414                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2415                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2416                 /* If min rate is zero - set it to 1 */
2417                 if (!vn_min_rate)
2418                         vn_min_rate = DEF_MIN_RATE;
2419                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2420                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2421         }
2422         DP(NETIF_MSG_IFUP,
2423            "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
2424            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2425
2426         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2427         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2428
2429         /* global vn counter - maximal Mbps for this vn */
2430         m_rs_vn.vn_counter.rate = vn_max_rate;
2431
2432         /* quota - number of bytes transmitted in this period */
2433         m_rs_vn.vn_counter.quota =
2434                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2435
2436         if (bp->vn_weight_sum) {
2437                 /* credit for each period of the fairness algorithm:
2438                    number of bytes in T_FAIR (the vn share the port rate).
2439                    vn_weight_sum should not be larger than 10000, thus
2440                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2441                    than zero */
2442                 m_fair_vn.vn_credit_delta =
2443                         max_t(u32, (vn_min_rate * (T_FAIR_COEF /
2444                                                    (8 * bp->vn_weight_sum))),
2445                               (bp->cmng.fair_vars.fair_threshold * 2));
2446                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
2447                    m_fair_vn.vn_credit_delta);
2448         }
2449
2450         /* Store it to internal memory */
2451         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2452                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2453                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2454                        ((u32 *)(&m_rs_vn))[i]);
2455
2456         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2457                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2458                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2459                        ((u32 *)(&m_fair_vn))[i]);
2460 }
2461
2462
2463 /* This function is called upon link interrupt */
2464 static void bnx2x_link_attn(struct bnx2x *bp)
2465 {
2466         u32 prev_link_status = bp->link_vars.link_status;
2467         /* Make sure that we are synced with the current statistics */
2468         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2469
2470         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2471
2472         if (bp->link_vars.link_up) {
2473
2474                 /* dropless flow control */
2475                 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
2476                         int port = BP_PORT(bp);
2477                         u32 pause_enabled = 0;
2478
2479                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2480                                 pause_enabled = 1;
2481
2482                         REG_WR(bp, BAR_USTRORM_INTMEM +
2483                                USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2484                                pause_enabled);
2485                 }
2486
2487                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2488                         struct host_port_stats *pstats;
2489
2490                         pstats = bnx2x_sp(bp, port_stats);
2491                         /* reset old bmac stats */
2492                         memset(&(pstats->mac_stx[0]), 0,
2493                                sizeof(struct mac_stx));
2494                 }
2495                 if (bp->state == BNX2X_STATE_OPEN)
2496                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2497         }
2498
2499         /* indicate link status only if link status actually changed */
2500         if (prev_link_status != bp->link_vars.link_status)
2501                 bnx2x_link_report(bp);
2502
2503         if (IS_E1HMF(bp)) {
2504                 int port = BP_PORT(bp);
2505                 int func;
2506                 int vn;
2507
2508                 /* Set the attention towards other drivers on the same port */
2509                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2510                         if (vn == BP_E1HVN(bp))
2511                                 continue;
2512
2513                         func = ((vn << 1) | port);
2514                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2515                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2516                 }
2517
2518                 if (bp->link_vars.link_up) {
2519                         int i;
2520
2521                         /* Init rate shaping and fairness contexts */
2522                         bnx2x_init_port_minmax(bp);
2523
2524                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2525                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2526
2527                         /* Store it to internal memory */
2528                         for (i = 0;
2529                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
2530                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2531                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2532                                        ((u32 *)(&bp->cmng))[i]);
2533                 }
2534         }
2535 }
2536
2537 static void bnx2x__link_status_update(struct bnx2x *bp)
2538 {
2539         if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2540                 return;
2541
2542         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2543
2544         if (bp->link_vars.link_up)
2545                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2546         else
2547                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2548
2549         bnx2x_calc_vn_weight_sum(bp);
2550
2551         /* indicate link status */
2552         bnx2x_link_report(bp);
2553 }
2554
2555 static void bnx2x_pmf_update(struct bnx2x *bp)
2556 {
2557         int port = BP_PORT(bp);
2558         u32 val;
2559
2560         bp->port.pmf = 1;
2561         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2562
2563         /* enable nig attention */
2564         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2565         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2566         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2567
2568         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2569 }
2570
2571 /* end of Link */
2572
2573 /* slow path */
2574
2575 /*
2576  * General service functions
2577  */
2578
2579 /* send the MCP a request, block until there is a reply */
2580 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2581 {
2582         int func = BP_FUNC(bp);
2583         u32 seq = ++bp->fw_seq;
2584         u32 rc = 0;
2585         u32 cnt = 1;
2586         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2587
2588         mutex_lock(&bp->fw_mb_mutex);
2589         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2590         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2591
2592         do {
2593                 /* let the FW do it's magic ... */
2594                 msleep(delay);
2595
2596                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2597
2598                 /* Give the FW up to 5 second (500*10ms) */
2599         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2600
2601         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2602            cnt*delay, rc, seq);
2603
2604         /* is this a reply to our command? */
2605         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2606                 rc &= FW_MSG_CODE_MASK;
2607         else {
2608                 /* FW BUG! */
2609                 BNX2X_ERR("FW failed to respond!\n");
2610                 bnx2x_fw_dump(bp);
2611                 rc = 0;
2612         }
2613         mutex_unlock(&bp->fw_mb_mutex);
2614
2615         return rc;
2616 }
2617
2618 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2619 static void bnx2x_set_rx_mode(struct net_device *dev);
2620
2621 static void bnx2x_e1h_disable(struct bnx2x *bp)
2622 {
2623         int port = BP_PORT(bp);
2624
2625         netif_tx_disable(bp->dev);
2626
2627         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2628
2629         netif_carrier_off(bp->dev);
2630 }
2631
2632 static void bnx2x_e1h_enable(struct bnx2x *bp)
2633 {
2634         int port = BP_PORT(bp);
2635
2636         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2637
2638         /* Tx queue should be only reenabled */
2639         netif_tx_wake_all_queues(bp->dev);
2640
2641         /*
2642          * Should not call netif_carrier_on since it will be called if the link
2643          * is up when checking for link state
2644          */
2645 }
2646
2647 static void bnx2x_update_min_max(struct bnx2x *bp)
2648 {
2649         int port = BP_PORT(bp);
2650         int vn, i;
2651
2652         /* Init rate shaping and fairness contexts */
2653         bnx2x_init_port_minmax(bp);
2654
2655         bnx2x_calc_vn_weight_sum(bp);
2656
2657         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2658                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2659
2660         if (bp->port.pmf) {
2661                 int func;
2662
2663                 /* Set the attention towards other drivers on the same port */
2664                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2665                         if (vn == BP_E1HVN(bp))
2666                                 continue;
2667
2668                         func = ((vn << 1) | port);
2669                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2670                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2671                 }
2672
2673                 /* Store it to internal memory */
2674                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2675                         REG_WR(bp, BAR_XSTRORM_INTMEM +
2676                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2677                                ((u32 *)(&bp->cmng))[i]);
2678         }
2679 }
2680
2681 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2682 {
2683         DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2684
2685         if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2686
2687                 /*
2688                  * This is the only place besides the function initialization
2689                  * where the bp->flags can change so it is done without any
2690                  * locks
2691                  */
2692                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2693                         DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2694                         bp->flags |= MF_FUNC_DIS;
2695
2696                         bnx2x_e1h_disable(bp);
2697                 } else {
2698                         DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2699                         bp->flags &= ~MF_FUNC_DIS;
2700
2701                         bnx2x_e1h_enable(bp);
2702                 }
2703                 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2704         }
2705         if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2706
2707                 bnx2x_update_min_max(bp);
2708                 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2709         }
2710
2711         /* Report results to MCP */
2712         if (dcc_event)
2713                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2714         else
2715                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2716 }
2717
2718 /* must be called under the spq lock */
2719 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2720 {
2721         struct eth_spe *next_spe = bp->spq_prod_bd;
2722
2723         if (bp->spq_prod_bd == bp->spq_last_bd) {
2724                 bp->spq_prod_bd = bp->spq;
2725                 bp->spq_prod_idx = 0;
2726                 DP(NETIF_MSG_TIMER, "end of spq\n");
2727         } else {
2728                 bp->spq_prod_bd++;
2729                 bp->spq_prod_idx++;
2730         }
2731         return next_spe;
2732 }
2733
2734 /* must be called under the spq lock */
2735 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2736 {
2737         int func = BP_FUNC(bp);
2738
2739         /* Make sure that BD data is updated before writing the producer */
2740         wmb();
2741
2742         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2743                bp->spq_prod_idx);
2744         mmiowb();
2745 }
2746
2747 /* the slow path queue is odd since completions arrive on the fastpath ring */
2748 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2749                          u32 data_hi, u32 data_lo, int common)
2750 {
2751         struct eth_spe *spe;
2752
2753 #ifdef BNX2X_STOP_ON_ERROR
2754         if (unlikely(bp->panic))
2755                 return -EIO;
2756 #endif
2757
2758         spin_lock_bh(&bp->spq_lock);
2759
2760         if (!bp->spq_left) {
2761                 BNX2X_ERR("BUG! SPQ ring full!\n");
2762                 spin_unlock_bh(&bp->spq_lock);
2763                 bnx2x_panic();
2764                 return -EBUSY;
2765         }
2766
2767         spe = bnx2x_sp_get_next(bp);
2768
2769         /* CID needs port number to be encoded int it */
2770         spe->hdr.conn_and_cmd_data =
2771                         cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2772                                     HW_CID(bp, cid));
2773         spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2774         if (common)
2775                 spe->hdr.type |=
2776                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2777
2778         spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2779         spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2780
2781         bp->spq_left--;
2782
2783         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2784            "SPQE[%x] (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2785            bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2786            (u32)(U64_LO(bp->spq_mapping) +
2787            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2788            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2789
2790         bnx2x_sp_prod_update(bp);
2791         spin_unlock_bh(&bp->spq_lock);
2792         return 0;
2793 }
2794
2795 /* acquire split MCP access lock register */
2796 static int bnx2x_acquire_alr(struct bnx2x *bp)
2797 {
2798         u32 j, val;
2799         int rc = 0;
2800
2801         might_sleep();
2802         for (j = 0; j < 1000; j++) {
2803                 val = (1UL << 31);
2804                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2805                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2806                 if (val & (1L << 31))
2807                         break;
2808
2809                 msleep(5);
2810         }
2811         if (!(val & (1L << 31))) {
2812                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2813                 rc = -EBUSY;
2814         }
2815
2816         return rc;
2817 }
2818
2819 /* release split MCP access lock register */
2820 static void bnx2x_release_alr(struct bnx2x *bp)
2821 {
2822         REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
2823 }
2824
2825 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2826 {
2827         struct host_def_status_block *def_sb = bp->def_status_blk;
2828         u16 rc = 0;
2829
2830         barrier(); /* status block is written to by the chip */
2831         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2832                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2833                 rc |= 1;
2834         }
2835         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2836                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2837                 rc |= 2;
2838         }
2839         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2840                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2841                 rc |= 4;
2842         }
2843         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2844                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2845                 rc |= 8;
2846         }
2847         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2848                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2849                 rc |= 16;
2850         }
2851         return rc;
2852 }
2853
2854 /*
2855  * slow path service functions
2856  */
2857
2858 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2859 {
2860         int port = BP_PORT(bp);
2861         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2862                        COMMAND_REG_ATTN_BITS_SET);
2863         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2864                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2865         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2866                                        NIG_REG_MASK_INTERRUPT_PORT0;
2867         u32 aeu_mask;
2868         u32 nig_mask = 0;
2869
2870         if (bp->attn_state & asserted)
2871                 BNX2X_ERR("IGU ERROR\n");
2872
2873         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2874         aeu_mask = REG_RD(bp, aeu_addr);
2875
2876         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2877            aeu_mask, asserted);
2878         aeu_mask &= ~(asserted & 0x3ff);
2879         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2880
2881         REG_WR(bp, aeu_addr, aeu_mask);
2882         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2883
2884         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2885         bp->attn_state |= asserted;
2886         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2887
2888         if (asserted & ATTN_HARD_WIRED_MASK) {
2889                 if (asserted & ATTN_NIG_FOR_FUNC) {
2890
2891                         bnx2x_acquire_phy_lock(bp);
2892
2893                         /* save nig interrupt mask */
2894                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2895                         REG_WR(bp, nig_int_mask_addr, 0);
2896
2897                         bnx2x_link_attn(bp);
2898
2899                         /* handle unicore attn? */
2900                 }
2901                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2902                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2903
2904                 if (asserted & GPIO_2_FUNC)
2905                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2906
2907                 if (asserted & GPIO_3_FUNC)
2908                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2909
2910                 if (asserted & GPIO_4_FUNC)
2911                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2912
2913                 if (port == 0) {
2914                         if (asserted & ATTN_GENERAL_ATTN_1) {
2915                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2916                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2917                         }
2918                         if (asserted & ATTN_GENERAL_ATTN_2) {
2919                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2920                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2921                         }
2922                         if (asserted & ATTN_GENERAL_ATTN_3) {
2923                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2924                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2925                         }
2926                 } else {
2927                         if (asserted & ATTN_GENERAL_ATTN_4) {
2928                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2929                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2930                         }
2931                         if (asserted & ATTN_GENERAL_ATTN_5) {
2932                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2933                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2934                         }
2935                         if (asserted & ATTN_GENERAL_ATTN_6) {
2936                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2937                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2938                         }
2939                 }
2940
2941         } /* if hardwired */
2942
2943         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2944            asserted, hc_addr);
2945         REG_WR(bp, hc_addr, asserted);
2946
2947         /* now set back the mask */
2948         if (asserted & ATTN_NIG_FOR_FUNC) {
2949                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2950                 bnx2x_release_phy_lock(bp);
2951         }
2952 }
2953
2954 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2955 {
2956         int port = BP_PORT(bp);
2957
2958         /* mark the failure */
2959         bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2960         bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2961         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2962                  bp->link_params.ext_phy_config);
2963
2964         /* log the failure */
2965         netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2966                " the driver to shutdown the card to prevent permanent"
2967                " damage.  Please contact OEM Support for assistance\n");
2968 }
2969
2970 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2971 {
2972         int port = BP_PORT(bp);
2973         int reg_offset;
2974         u32 val, swap_val, swap_override;
2975
2976         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2977                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2978
2979         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2980
2981                 val = REG_RD(bp, reg_offset);
2982                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2983                 REG_WR(bp, reg_offset, val);
2984
2985                 BNX2X_ERR("SPIO5 hw attention\n");
2986
2987                 /* Fan failure attention */
2988                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2989                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2990                         /* Low power mode is controlled by GPIO 2 */
2991                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2992                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2993                         /* The PHY reset is controlled by GPIO 1 */
2994                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2995                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2996                         break;
2997
2998                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2999                         /* The PHY reset is controlled by GPIO 1 */
3000                         /* fake the port number to cancel the swap done in
3001                            set_gpio() */
3002                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
3003                         swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
3004                         port = (swap_val && swap_override) ^ 1;
3005                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3006                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
3007                         break;
3008
3009                 default:
3010                         break;
3011                 }
3012                 bnx2x_fan_failure(bp);
3013         }
3014
3015         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
3016                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
3017                 bnx2x_acquire_phy_lock(bp);
3018                 bnx2x_handle_module_detect_int(&bp->link_params);
3019                 bnx2x_release_phy_lock(bp);
3020         }
3021
3022         if (attn & HW_INTERRUT_ASSERT_SET_0) {
3023
3024                 val = REG_RD(bp, reg_offset);
3025                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
3026                 REG_WR(bp, reg_offset, val);
3027
3028                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
3029                           (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
3030                 bnx2x_panic();
3031         }
3032 }
3033
3034 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3035 {
3036         u32 val;
3037
3038         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
3039
3040                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3041                 BNX2X_ERR("DB hw attention 0x%x\n", val);
3042                 /* DORQ discard attention */
3043                 if (val & 0x2)
3044                         BNX2X_ERR("FATAL error from DORQ\n");
3045         }
3046
3047         if (attn & HW_INTERRUT_ASSERT_SET_1) {
3048
3049                 int port = BP_PORT(bp);
3050                 int reg_offset;
3051
3052                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3053                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3054
3055                 val = REG_RD(bp, reg_offset);
3056                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3057                 REG_WR(bp, reg_offset, val);
3058
3059                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
3060                           (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
3061                 bnx2x_panic();
3062         }
3063 }
3064
3065 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3066 {
3067         u32 val;
3068
3069         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3070
3071                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3072                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3073                 /* CFC error attention */
3074                 if (val & 0x2)
3075                         BNX2X_ERR("FATAL error from CFC\n");
3076         }
3077
3078         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3079
3080                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3081                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3082                 /* RQ_USDMDP_FIFO_OVERFLOW */
3083                 if (val & 0x18000)
3084                         BNX2X_ERR("FATAL error from PXP\n");
3085         }
3086
3087         if (attn & HW_INTERRUT_ASSERT_SET_2) {
3088
3089                 int port = BP_PORT(bp);
3090                 int reg_offset;
3091
3092                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3093                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3094
3095                 val = REG_RD(bp, reg_offset);
3096                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3097                 REG_WR(bp, reg_offset, val);
3098
3099                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3100                           (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3101                 bnx2x_panic();
3102         }
3103 }
3104
3105 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3106 {
3107         u32 val;
3108
3109         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3110
3111                 if (attn & BNX2X_PMF_LINK_ASSERT) {
3112                         int func = BP_FUNC(bp);
3113
3114                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3115                         bp->mf_config = SHMEM_RD(bp,
3116                                            mf_cfg.func_mf_config[func].config);
3117                         val = SHMEM_RD(bp, func_mb[func].drv_status);
3118                         if (val & DRV_STATUS_DCC_EVENT_MASK)
3119                                 bnx2x_dcc_event(bp,
3120                                             (val & DRV_STATUS_DCC_EVENT_MASK));
3121                         bnx2x__link_status_update(bp);
3122                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3123                                 bnx2x_pmf_update(bp);
3124
3125                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3126
3127                         BNX2X_ERR("MC assert!\n");
3128                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3129                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3130                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3131                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3132                         bnx2x_panic();
3133
3134                 } else if (attn & BNX2X_MCP_ASSERT) {
3135
3136                         BNX2X_ERR("MCP assert!\n");
3137                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3138                         bnx2x_fw_dump(bp);
3139
3140                 } else
3141                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3142         }
3143
3144         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3145                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3146                 if (attn & BNX2X_GRC_TIMEOUT) {
3147                         val = CHIP_IS_E1H(bp) ?
3148                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3149                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
3150                 }
3151                 if (attn & BNX2X_GRC_RSV) {
3152                         val = CHIP_IS_E1H(bp) ?
3153                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3154                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
3155                 }
3156                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3157         }
3158 }
3159
3160 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
3161 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
3162
3163
3164 #define BNX2X_MISC_GEN_REG      MISC_REG_GENERIC_POR_1
3165 #define LOAD_COUNTER_BITS       16 /* Number of bits for load counter */
3166 #define LOAD_COUNTER_MASK       (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3167 #define RESET_DONE_FLAG_MASK    (~LOAD_COUNTER_MASK)
3168 #define RESET_DONE_FLAG_SHIFT   LOAD_COUNTER_BITS
3169 #define CHIP_PARITY_SUPPORTED(bp)   (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
3170 /*
3171  * should be run under rtnl lock
3172  */
3173 static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3174 {
3175         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3176         val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3177         REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3178         barrier();
3179         mmiowb();
3180 }
3181
3182 /*
3183  * should be run under rtnl lock
3184  */
3185 static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3186 {
3187         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3188         val |= (1 << 16);
3189         REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3190         barrier();
3191         mmiowb();
3192 }
3193
3194 /*
3195  * should be run under rtnl lock
3196  */
3197 static inline bool bnx2x_reset_is_done(struct bnx2x *bp)
3198 {
3199         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3200         DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3201         return (val & RESET_DONE_FLAG_MASK) ? false : true;
3202 }
3203
3204 /*
3205  * should be run under rtnl lock
3206  */
3207 static inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3208 {
3209         u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3210
3211         DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3212
3213         val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3214         REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3215         barrier();
3216         mmiowb();
3217 }
3218
3219 /*
3220  * should be run under rtnl lock
3221  */
3222 static inline u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3223 {
3224         u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3225
3226         DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3227
3228         val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3229         REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3230         barrier();
3231         mmiowb();
3232
3233         return val1;
3234 }
3235
3236 /*
3237  * should be run under rtnl lock
3238  */
3239 static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3240 {
3241         return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3242 }
3243
3244 static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3245 {
3246         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3247         REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3248 }
3249
3250 static inline void _print_next_block(int idx, const char *blk)
3251 {
3252         if (idx)
3253                 pr_cont(", ");
3254         pr_cont("%s", blk);
3255 }
3256
3257 static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3258 {
3259         int i = 0;
3260         u32 cur_bit = 0;
3261         for (i = 0; sig; i++) {
3262                 cur_bit = ((u32)0x1 << i);
3263                 if (sig & cur_bit) {
3264                         switch (cur_bit) {
3265                         case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3266                                 _print_next_block(par_num++, "BRB");
3267                                 break;
3268                         case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3269                                 _print_next_block(par_num++, "PARSER");
3270                                 break;
3271                         case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3272                                 _print_next_block(par_num++, "TSDM");
3273                                 break;
3274                         case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3275                                 _print_next_block(par_num++, "SEARCHER");
3276                                 break;
3277                         case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3278                                 _print_next_block(par_num++, "TSEMI");
3279                                 break;
3280                         }
3281
3282                         /* Clear the bit */
3283                         sig &= ~cur_bit;
3284                 }
3285         }
3286
3287         return par_num;
3288 }
3289
3290 static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3291 {
3292         int i = 0;
3293         u32 cur_bit = 0;
3294         for (i = 0; sig; i++) {
3295                 cur_bit = ((u32)0x1 << i);
3296                 if (sig & cur_bit) {
3297                         switch (cur_bit) {
3298                         case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3299                                 _print_next_block(par_num++, "PBCLIENT");
3300                                 break;
3301                         case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3302                                 _print_next_block(par_num++, "QM");
3303                                 break;
3304                         case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3305                                 _print_next_block(par_num++, "XSDM");
3306                                 break;
3307                         case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3308                                 _print_next_block(par_num++, "XSEMI");
3309                                 break;
3310                         case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3311                                 _print_next_block(par_num++, "DOORBELLQ");
3312                                 break;
3313                         case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3314                                 _print_next_block(par_num++, "VAUX PCI CORE");
3315                                 break;
3316                         case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3317                                 _print_next_block(par_num++, "DEBUG");
3318                                 break;
3319                         case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3320                                 _print_next_block(par_num++, "USDM");
3321                                 break;
3322                         case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3323                                 _print_next_block(par_num++, "USEMI");
3324                                 break;
3325                         case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3326                                 _print_next_block(par_num++, "UPB");
3327                                 break;
3328                         case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3329                                 _print_next_block(par_num++, "CSDM");
3330                                 break;
3331                         }
3332
3333                         /* Clear the bit */
3334                         sig &= ~cur_bit;
3335                 }
3336         }
3337
3338         return par_num;
3339 }
3340
3341 static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3342 {
3343         int i = 0;
3344         u32 cur_bit = 0;
3345         for (i = 0; sig; i++) {
3346                 cur_bit = ((u32)0x1 << i);
3347                 if (sig & cur_bit) {
3348                         switch (cur_bit) {
3349                         case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3350                                 _print_next_block(par_num++, "CSEMI");
3351                                 break;
3352                         case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3353                                 _print_next_block(par_num++, "PXP");
3354                                 break;
3355                         case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3356                                 _print_next_block(par_num++,
3357                                         "PXPPCICLOCKCLIENT");
3358                                 break;
3359                         case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3360                                 _print_next_block(par_num++, "CFC");
3361                                 break;
3362                         case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3363                                 _print_next_block(par_num++, "CDU");
3364                                 break;
3365                         case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3366                                 _print_next_block(par_num++, "IGU");
3367                                 break;
3368                         case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3369                                 _print_next_block(par_num++, "MISC");
3370                                 break;
3371                         }
3372
3373                         /* Clear the bit */
3374                         sig &= ~cur_bit;
3375                 }
3376         }
3377
3378         return par_num;
3379 }
3380
3381 static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3382 {
3383         int i = 0;
3384         u32 cur_bit = 0;
3385         for (i = 0; sig; i++) {
3386                 cur_bit = ((u32)0x1 << i);
3387                 if (sig & cur_bit) {
3388                         switch (cur_bit) {
3389                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3390                                 _print_next_block(par_num++, "MCP ROM");
3391                                 break;
3392                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3393                                 _print_next_block(par_num++, "MCP UMP RX");
3394                                 break;
3395                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3396                                 _print_next_block(par_num++, "MCP UMP TX");
3397                                 break;
3398                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3399                                 _print_next_block(par_num++, "MCP SCPAD");
3400                                 break;
3401                         }
3402
3403                         /* Clear the bit */
3404                         sig &= ~cur_bit;
3405                 }
3406         }
3407
3408         return par_num;
3409 }
3410
3411 static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3412                                      u32 sig2, u32 sig3)
3413 {
3414         if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3415             (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3416                 int par_num = 0;
3417                 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3418                         "[0]:0x%08x [1]:0x%08x "
3419                         "[2]:0x%08x [3]:0x%08x\n",
3420                           sig0 & HW_PRTY_ASSERT_SET_0,
3421                           sig1 & HW_PRTY_ASSERT_SET_1,
3422                           sig2 & HW_PRTY_ASSERT_SET_2,
3423                           sig3 & HW_PRTY_ASSERT_SET_3);
3424                 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3425                        bp->dev->name);
3426                 par_num = bnx2x_print_blocks_with_parity0(
3427                         sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3428                 par_num = bnx2x_print_blocks_with_parity1(
3429                         sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3430                 par_num = bnx2x_print_blocks_with_parity2(
3431                         sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3432                 par_num = bnx2x_print_blocks_with_parity3(
3433                         sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3434                 printk("\n");
3435                 return true;
3436         } else
3437                 return false;
3438 }
3439
3440 static bool bnx2x_chk_parity_attn(struct bnx2x *bp)
3441 {
3442         struct attn_route attn;
3443         int port = BP_PORT(bp);
3444
3445         attn.sig[0] = REG_RD(bp,
3446                 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3447                              port*4);
3448         attn.sig[1] = REG_RD(bp,
3449                 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3450                              port*4);
3451         attn.sig[2] = REG_RD(bp,
3452                 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3453                              port*4);
3454         attn.sig[3] = REG_RD(bp,
3455                 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3456                              port*4);
3457
3458         return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3459                                         attn.sig[3]);
3460 }
3461
3462 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3463 {
3464         struct attn_route attn, *group_mask;
3465         int port = BP_PORT(bp);
3466         int index;
3467         u32 reg_addr;
3468         u32 val;
3469         u32 aeu_mask;
3470
3471         /* need to take HW lock because MCP or other port might also
3472            try to handle this event */
3473         bnx2x_acquire_alr(bp);
3474
3475         if (bnx2x_chk_parity_attn(bp)) {
3476                 bp->recovery_state = BNX2X_RECOVERY_INIT;
3477                 bnx2x_set_reset_in_progress(bp);
3478                 schedule_delayed_work(&bp->reset_task, 0);
3479                 /* Disable HW interrupts */
3480                 bnx2x_int_disable(bp);
3481                 bnx2x_release_alr(bp);
3482                 /* In case of parity errors don't handle attentions so that
3483                  * other function would "see" parity errors.
3484                  */
3485                 return;
3486         }
3487
3488         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3489         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3490         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3491         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3492         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3493            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3494
3495         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3496                 if (deasserted & (1 << index)) {
3497                         group_mask = &bp->attn_group[index];
3498
3499                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3500                            index, group_mask->sig[0], group_mask->sig[1],
3501                            group_mask->sig[2], group_mask->sig[3]);
3502
3503                         bnx2x_attn_int_deasserted3(bp,
3504                                         attn.sig[3] & group_mask->sig[3]);
3505                         bnx2x_attn_int_deasserted1(bp,
3506                                         attn.sig[1] & group_mask->sig[1]);
3507                         bnx2x_attn_int_deasserted2(bp,
3508                                         attn.sig[2] & group_mask->sig[2]);
3509                         bnx2x_attn_int_deasserted0(bp,
3510                                         attn.sig[0] & group_mask->sig[0]);
3511                 }
3512         }
3513
3514         bnx2x_release_alr(bp);
3515
3516         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3517
3518         val = ~deasserted;
3519         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3520            val, reg_addr);
3521         REG_WR(bp, reg_addr, val);
3522
3523         if (~bp->attn_state & deasserted)
3524                 BNX2X_ERR("IGU ERROR\n");
3525
3526         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3527                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
3528
3529         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3530         aeu_mask = REG_RD(bp, reg_addr);
3531
3532         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
3533            aeu_mask, deasserted);
3534         aeu_mask |= (deasserted & 0x3ff);
3535         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3536
3537         REG_WR(bp, reg_addr, aeu_mask);
3538         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3539
3540         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3541         bp->attn_state &= ~deasserted;
3542         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3543 }
3544
3545 static void bnx2x_attn_int(struct bnx2x *bp)
3546 {
3547         /* read local copy of bits */
3548         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3549                                                                 attn_bits);
3550         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3551                                                                 attn_bits_ack);
3552         u32 attn_state = bp->attn_state;
3553
3554         /* look for changed bits */
3555         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
3556         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
3557
3558         DP(NETIF_MSG_HW,
3559            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
3560            attn_bits, attn_ack, asserted, deasserted);
3561
3562         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3563                 BNX2X_ERR("BAD attention state\n");
3564
3565         /* handle bits that were raised */
3566         if (asserted)
3567                 bnx2x_attn_int_asserted(bp, asserted);
3568
3569         if (deasserted)
3570                 bnx2x_attn_int_deasserted(bp, deasserted);
3571 }
3572
3573 static void bnx2x_sp_task(struct work_struct *work)
3574 {
3575         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3576         u16 status;
3577
3578         /* Return here if interrupt is disabled */
3579         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3580                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3581                 return;
3582         }
3583
3584         status = bnx2x_update_dsb_idx(bp);
3585 /*      if (status == 0)                                     */
3586 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
3587
3588         DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
3589
3590         /* HW attentions */
3591         if (status & 0x1) {
3592                 bnx2x_attn_int(bp);
3593                 status &= ~0x1;
3594         }
3595
3596         /* CStorm events: STAT_QUERY */
3597         if (status & 0x2) {
3598                 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
3599                 status &= ~0x2;
3600         }
3601
3602         if (unlikely(status))
3603                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3604                    status);
3605
3606         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3607                      IGU_INT_NOP, 1);
3608         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3609                      IGU_INT_NOP, 1);
3610         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3611                      IGU_INT_NOP, 1);
3612         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3613                      IGU_INT_NOP, 1);
3614         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3615                      IGU_INT_ENABLE, 1);
3616 }
3617
3618 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3619 {
3620         struct net_device *dev = dev_instance;
3621         struct bnx2x *bp = netdev_priv(dev);
3622
3623         /* Return here if interrupt is disabled */
3624         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3625                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3626                 return IRQ_HANDLED;
3627         }
3628
3629         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3630
3631 #ifdef BNX2X_STOP_ON_ERROR
3632         if (unlikely(bp->panic))
3633                 return IRQ_HANDLED;
3634 #endif
3635
3636 #ifdef BCM_CNIC
3637         {
3638                 struct cnic_ops *c_ops;
3639
3640                 rcu_read_lock();
3641                 c_ops = rcu_dereference(bp->cnic_ops);
3642                 if (c_ops)
3643                         c_ops->cnic_handler(bp->cnic_data, NULL);
3644                 rcu_read_unlock();
3645         }
3646 #endif
3647         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3648
3649         return IRQ_HANDLED;
3650 }
3651
3652 /* end of slow path */
3653
3654 /* Statistics */
3655
3656 /****************************************************************************
3657 * Macros
3658 ****************************************************************************/
3659
3660 /* sum[hi:lo] += add[hi:lo] */
3661 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3662         do { \
3663                 s_lo += a_lo; \
3664                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3665         } while (0)
3666
3667 /* difference = minuend - subtrahend */
3668 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3669         do { \
3670                 if (m_lo < s_lo) { \
3671                         /* underflow */ \
3672                         d_hi = m_hi - s_hi; \
3673                         if (d_hi > 0) { \
3674                                 /* we can 'loan' 1 */ \
3675                                 d_hi--; \
3676                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3677                         } else { \
3678                                 /* m_hi <= s_hi */ \
3679                                 d_hi = 0; \
3680                                 d_lo = 0; \
3681                         } \
3682                 } else { \
3683                         /* m_lo >= s_lo */ \
3684                         if (m_hi < s_hi) { \
3685                                 d_hi = 0; \
3686                                 d_lo = 0; \
3687                         } else { \
3688                                 /* m_hi >= s_hi */ \
3689                                 d_hi = m_hi - s_hi; \
3690                                 d_lo = m_lo - s_lo; \
3691                         } \
3692                 } \
3693         } while (0)
3694
3695 #define UPDATE_STAT64(s, t) \
3696         do { \
3697                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3698                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3699                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3700                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3701                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3702                        pstats->mac_stx[1].t##_lo, diff.lo); \
3703         } while (0)
3704
3705 #define UPDATE_STAT64_NIG(s, t) \
3706         do { \
3707                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3708                         diff.lo, new->s##_lo, old->s##_lo); \
3709                 ADD_64(estats->t##_hi, diff.hi, \
3710                        estats->t##_lo, diff.lo); \
3711         } while (0)
3712
3713 /* sum[hi:lo] += add */
3714 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3715         do { \
3716                 s_lo += a; \
3717                 s_hi += (s_lo < a) ? 1 : 0; \
3718         } while (0)
3719
3720 #define UPDATE_EXTEND_STAT(s) \
3721         do { \
3722                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3723                               pstats->mac_stx[1].s##_lo, \
3724                               new->s); \
3725         } while (0)
3726
3727 #define UPDATE_EXTEND_TSTAT(s, t) \
3728         do { \
3729                 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3730                 old_tclient->s = tclient->s; \
3731                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3732         } while (0)
3733
3734 #define UPDATE_EXTEND_USTAT(s, t) \
3735         do { \
3736                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3737                 old_uclient->s = uclient->s; \
3738                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3739         } while (0)
3740
3741 #define UPDATE_EXTEND_XSTAT(s, t) \
3742         do { \
3743                 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3744                 old_xclient->s = xclient->s; \
3745                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3746         } while (0)
3747
3748 /* minuend -= subtrahend */
3749 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3750         do { \
3751                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3752         } while (0)
3753
3754 /* minuend[hi:lo] -= subtrahend */
3755 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3756         do { \
3757                 SUB_64(m_hi, 0, m_lo, s); \
3758         } while (0)
3759
3760 #define SUB_EXTEND_USTAT(s, t) \
3761         do { \
3762                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3763                 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3764         } while (0)
3765
3766 /*
3767  * General service functions
3768  */
3769
3770 static inline long bnx2x_hilo(u32 *hiref)
3771 {
3772         u32 lo = *(hiref + 1);
3773 #if (BITS_PER_LONG == 64)
3774         u32 hi = *hiref;
3775
3776         return HILO_U64(hi, lo);
3777 #else
3778         return lo;
3779 #endif
3780 }
3781
3782 /*
3783  * Init service functions
3784  */
3785
3786 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3787 {
3788         if (!bp->stats_pending) {
3789                 struct eth_query_ramrod_data ramrod_data = {0};
3790                 int i, rc;
3791
3792                 ramrod_data.drv_counter = bp->stats_counter++;
3793                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3794                 for_each_queue(bp, i)
3795                         ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3796
3797                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3798                                    ((u32 *)&ramrod_data)[1],
3799                                    ((u32 *)&ramrod_data)[0], 0);
3800                 if (rc == 0) {
3801                         /* stats ramrod has it's own slot on the spq */
3802                         bp->spq_left++;
3803                         bp->stats_pending = 1;
3804                 }
3805         }
3806 }
3807
3808 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3809 {
3810         struct dmae_command *dmae = &bp->stats_dmae;
3811         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3812
3813         *stats_comp = DMAE_COMP_VAL;
3814         if (CHIP_REV_IS_SLOW(bp))
3815                 return;
3816
3817         /* loader */
3818         if (bp->executer_idx) {
3819                 int loader_idx = PMF_DMAE_C(bp);
3820
3821                 memset(dmae, 0, sizeof(struct dmae_command));
3822
3823                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3824                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3825                                 DMAE_CMD_DST_RESET |
3826 #ifdef __BIG_ENDIAN
3827                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3828 #else
3829                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3830 #endif
3831                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3832                                                DMAE_CMD_PORT_0) |
3833                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3834                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3835                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3836                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3837                                      sizeof(struct dmae_command) *
3838                                      (loader_idx + 1)) >> 2;
3839                 dmae->dst_addr_hi = 0;
3840                 dmae->len = sizeof(struct dmae_command) >> 2;
3841                 if (CHIP_IS_E1(bp))
3842                         dmae->len--;
3843                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3844                 dmae->comp_addr_hi = 0;
3845                 dmae->comp_val = 1;
3846
3847                 *stats_comp = 0;
3848                 bnx2x_post_dmae(bp, dmae, loader_idx);
3849
3850         } else if (bp->func_stx) {
3851                 *stats_comp = 0;
3852                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3853         }
3854 }
3855
3856 static int bnx2x_stats_comp(struct bnx2x *bp)
3857 {
3858         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3859         int cnt = 10;
3860
3861         might_sleep();
3862         while (*stats_comp != DMAE_COMP_VAL) {
3863                 if (!cnt) {
3864                         BNX2X_ERR("timeout waiting for stats finished\n");
3865                         break;
3866                 }
3867                 cnt--;
3868                 msleep(1);
3869         }
3870         return 1;
3871 }
3872
3873 /*
3874  * Statistics service functions
3875  */
3876
3877 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3878 {
3879         struct dmae_command *dmae;
3880         u32 opcode;
3881         int loader_idx = PMF_DMAE_C(bp);
3882         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3883
3884         /* sanity */
3885         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3886                 BNX2X_ERR("BUG!\n");
3887                 return;
3888         }
3889
3890         bp->executer_idx = 0;
3891
3892         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3893                   DMAE_CMD_C_ENABLE |
3894                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3895 #ifdef __BIG_ENDIAN
3896                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3897 #else
3898                   DMAE_CMD_ENDIANITY_DW_SWAP |
3899 #endif
3900                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3901                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3902
3903         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3904         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3905         dmae->src_addr_lo = bp->port.port_stx >> 2;
3906         dmae->src_addr_hi = 0;
3907         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3908         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3909         dmae->len = DMAE_LEN32_RD_MAX;
3910         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3911         dmae->comp_addr_hi = 0;
3912         dmae->comp_val = 1;
3913
3914         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3915         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3916         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3917         dmae->src_addr_hi = 0;
3918         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3919                                    DMAE_LEN32_RD_MAX * 4);
3920         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3921                                    DMAE_LEN32_RD_MAX * 4);
3922         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3923         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3924         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3925         dmae->comp_val = DMAE_COMP_VAL;
3926
3927         *stats_comp = 0;
3928         bnx2x_hw_stats_post(bp);
3929         bnx2x_stats_comp(bp);
3930 }
3931
3932 static void bnx2x_port_stats_init(struct bnx2x *bp)
3933 {
3934         struct dmae_command *dmae;
3935         int port = BP_PORT(bp);
3936         int vn = BP_E1HVN(bp);
3937         u32 opcode;
3938         int loader_idx = PMF_DMAE_C(bp);
3939         u32 mac_addr;
3940         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3941
3942         /* sanity */
3943         if (!bp->link_vars.link_up || !bp->port.pmf) {
3944                 BNX2X_ERR("BUG!\n");
3945                 return;
3946         }
3947
3948         bp->executer_idx = 0;
3949
3950         /* MCP */
3951         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3952                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3953                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3954 #ifdef __BIG_ENDIAN
3955                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3956 #else
3957                   DMAE_CMD_ENDIANITY_DW_SWAP |
3958 #endif
3959                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3960                   (vn << DMAE_CMD_E1HVN_SHIFT));
3961
3962         if (bp->port.port_stx) {
3963
3964                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3965                 dmae->opcode = opcode;
3966                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3967                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3968                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3969                 dmae->dst_addr_hi = 0;
3970                 dmae->len = sizeof(struct host_port_stats) >> 2;
3971                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3972                 dmae->comp_addr_hi = 0;
3973                 dmae->comp_val = 1;
3974         }
3975
3976         if (bp->func_stx) {
3977
3978                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3979                 dmae->opcode = opcode;
3980                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3981                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3982                 dmae->dst_addr_lo = bp->func_stx >> 2;
3983                 dmae->dst_addr_hi = 0;
3984                 dmae->len = sizeof(struct host_func_stats) >> 2;
3985                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3986                 dmae->comp_addr_hi = 0;
3987                 dmae->comp_val = 1;
3988         }
3989
3990         /* MAC */
3991         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3992                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3993                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3994 #ifdef __BIG_ENDIAN
3995                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3996 #else
3997                   DMAE_CMD_ENDIANITY_DW_SWAP |
3998 #endif
3999                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4000                   (vn << DMAE_CMD_E1HVN_SHIFT));
4001
4002         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
4003
4004                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
4005                                    NIG_REG_INGRESS_BMAC0_MEM);
4006
4007                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
4008                    BIGMAC_REGISTER_TX_STAT_GTBYT */
4009                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4010                 dmae->opcode = opcode;
4011                 dmae->src_addr_lo = (mac_addr +
4012                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4013                 dmae->src_addr_hi = 0;
4014                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4015                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4016                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
4017                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4018                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4019                 dmae->comp_addr_hi = 0;
4020                 dmae->comp_val = 1;
4021
4022                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
4023                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
4024                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4025                 dmae->opcode = opcode;
4026                 dmae->src_addr_lo = (mac_addr +
4027                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4028                 dmae->src_addr_hi = 0;
4029                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4030                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
4031                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4032                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
4033                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
4034                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4035                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4036                 dmae->comp_addr_hi = 0;
4037                 dmae->comp_val = 1;
4038
4039         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
4040
4041                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
4042
4043                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
4044                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4045                 dmae->opcode = opcode;
4046                 dmae->src_addr_lo = (mac_addr +
4047                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
4048                 dmae->src_addr_hi = 0;
4049                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4050                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4051                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
4052                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4053                 dmae->comp_addr_hi = 0;
4054                 dmae->comp_val = 1;
4055
4056                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
4057                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4058                 dmae->opcode = opcode;
4059                 dmae->src_addr_lo = (mac_addr +
4060                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
4061                 dmae->src_addr_hi = 0;
4062                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4063                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
4064                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4065                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
4066                 dmae->len = 1;
4067                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4068                 dmae->comp_addr_hi = 0;
4069                 dmae->comp_val = 1;
4070
4071                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
4072                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4073                 dmae->opcode = opcode;
4074                 dmae->src_addr_lo = (mac_addr +
4075                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
4076                 dmae->src_addr_hi = 0;
4077                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4078                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
4079                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4080                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
4081                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
4082                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4083                 dmae->comp_addr_hi = 0;
4084                 dmae->comp_val = 1;
4085         }
4086
4087         /* NIG */
4088         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4089         dmae->opcode = opcode;
4090         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
4091                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
4092         dmae->src_addr_hi = 0;
4093         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
4094         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
4095         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
4096         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4097         dmae->comp_addr_hi = 0;
4098         dmae->comp_val = 1;
4099
4100         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4101         dmae->opcode = opcode;
4102         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
4103                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
4104         dmae->src_addr_hi = 0;
4105         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4106                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
4107         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4108                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
4109         dmae->len = (2*sizeof(u32)) >> 2;
4110         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4111         dmae->comp_addr_hi = 0;
4112         dmae->comp_val = 1;
4113
4114         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4115         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4116                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4117                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4118 #ifdef __BIG_ENDIAN
4119                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4120 #else
4121                         DMAE_CMD_ENDIANITY_DW_SWAP |
4122 #endif
4123                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4124                         (vn << DMAE_CMD_E1HVN_SHIFT));
4125         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
4126                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
4127         dmae->src_addr_hi = 0;
4128         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4129                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
4130         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4131                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
4132         dmae->len = (2*sizeof(u32)) >> 2;
4133         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4134         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4135         dmae->comp_val = DMAE_COMP_VAL;
4136
4137         *stats_comp = 0;
4138 }
4139
4140 static void bnx2x_func_stats_init(struct bnx2x *bp)
4141 {
4142         struct dmae_command *dmae = &bp->stats_dmae;
4143         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4144
4145         /* sanity */
4146         if (!bp->func_stx) {
4147                 BNX2X_ERR("BUG!\n");
4148                 return;
4149         }
4150
4151         bp->executer_idx = 0;
4152         memset(dmae, 0, sizeof(struct dmae_command));
4153
4154         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4155                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4156                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4157 #ifdef __BIG_ENDIAN
4158                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4159 #else
4160                         DMAE_CMD_ENDIANITY_DW_SWAP |
4161 #endif
4162                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4163                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4164         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4165         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4166         dmae->dst_addr_lo = bp->func_stx >> 2;
4167         dmae->dst_addr_hi = 0;
4168         dmae->len = sizeof(struct host_func_stats) >> 2;
4169         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4170         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4171         dmae->comp_val = DMAE_COMP_VAL;
4172
4173         *stats_comp = 0;
4174 }
4175
4176 static void bnx2x_stats_start(struct bnx2x *bp)
4177 {
4178         if (bp->port.pmf)
4179                 bnx2x_port_stats_init(bp);
4180
4181         else if (bp->func_stx)
4182                 bnx2x_func_stats_init(bp);
4183
4184         bnx2x_hw_stats_post(bp);
4185         bnx2x_storm_stats_post(bp);
4186 }
4187
4188 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
4189 {
4190         bnx2x_stats_comp(bp);
4191         bnx2x_stats_pmf_update(bp);
4192         bnx2x_stats_start(bp);
4193 }
4194
4195 static void bnx2x_stats_restart(struct bnx2x *bp)
4196 {
4197         bnx2x_stats_comp(bp);
4198         bnx2x_stats_start(bp);
4199 }
4200
4201 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
4202 {
4203         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
4204         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4205         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4206         struct {
4207                 u32 lo;
4208                 u32 hi;
4209         } diff;
4210
4211         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
4212         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
4213         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
4214         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
4215         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
4216         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
4217         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
4218         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
4219         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
4220         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
4221         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
4222         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
4223         UPDATE_STAT64(tx_stat_gt127,
4224                                 tx_stat_etherstatspkts65octetsto127octets);
4225         UPDATE_STAT64(tx_stat_gt255,
4226                                 tx_stat_etherstatspkts128octetsto255octets);
4227         UPDATE_STAT64(tx_stat_gt511,
4228                                 tx_stat_etherstatspkts256octetsto511octets);
4229         UPDATE_STAT64(tx_stat_gt1023,
4230                                 tx_stat_etherstatspkts512octetsto1023octets);
4231         UPDATE_STAT64(tx_stat_gt1518,
4232                                 tx_stat_etherstatspkts1024octetsto1522octets);
4233         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
4234         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
4235         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
4236         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
4237         UPDATE_STAT64(tx_stat_gterr,
4238                                 tx_stat_dot3statsinternalmactransmiterrors);
4239         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
4240
4241         estats->pause_frames_received_hi =
4242                                 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
4243         estats->pause_frames_received_lo =
4244                                 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
4245
4246         estats->pause_frames_sent_hi =
4247                                 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
4248         estats->pause_frames_sent_lo =
4249                                 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
4250 }
4251
4252 static void bnx2x_emac_stats_update(struct bnx2x *bp)
4253 {
4254         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
4255         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4256         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4257
4258         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
4259         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
4260         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
4261         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
4262         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
4263         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
4264         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
4265         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
4266         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
4267         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
4268         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
4269         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
4270         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
4271         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
4272         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
4273         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
4274         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
4275         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
4276         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
4277         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
4278         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
4279         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
4280         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
4281         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
4282         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
4283         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
4284         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
4285         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
4286         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
4287         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
4288         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
4289
4290         estats->pause_frames_received_hi =
4291                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
4292         estats->pause_frames_received_lo =
4293                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
4294         ADD_64(estats->pause_frames_received_hi,
4295                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
4296                estats->pause_frames_received_lo,
4297                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
4298
4299         estats->pause_frames_sent_hi =
4300                         pstats->mac_stx[1].tx_stat_outxonsent_hi;
4301         estats->pause_frames_sent_lo =
4302                         pstats->mac_stx[1].tx_stat_outxonsent_lo;
4303         ADD_64(estats->pause_frames_sent_hi,
4304                pstats->mac_stx[1].tx_stat_outxoffsent_hi,
4305                estats->pause_frames_sent_lo,
4306                pstats->mac_stx[1].tx_stat_outxoffsent_lo);
4307 }
4308
4309 static int bnx2x_hw_stats_update(struct bnx2x *bp)
4310 {
4311         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
4312         struct nig_stats *old = &(bp->port.old_nig_stats);
4313         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4314         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4315         struct {
4316                 u32 lo;
4317                 u32 hi;
4318         } diff;
4319
4320         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
4321                 bnx2x_bmac_stats_update(bp);
4322
4323         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
4324                 bnx2x_emac_stats_update(bp);
4325
4326         else { /* unreached */
4327                 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
4328                 return -1;
4329         }
4330
4331         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
4332                       new->brb_discard - old->brb_discard);
4333         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
4334                       new->brb_truncate - old->brb_truncate);
4335
4336         UPDATE_STAT64_NIG(egress_mac_pkt0,
4337                                         etherstatspkts1024octetsto1522octets);
4338         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
4339
4340         memcpy(old, new, sizeof(struct nig_stats));
4341
4342         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
4343                sizeof(struct mac_stx));
4344         estats->brb_drop_hi = pstats->brb_drop_hi;
4345         estats->brb_drop_lo = pstats->brb_drop_lo;
4346
4347         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
4348
4349         if (!BP_NOMCP(bp)) {
4350                 u32 nig_timer_max =
4351                         SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
4352                 if (nig_timer_max != estats->nig_timer_max) {
4353                         estats->nig_timer_max = nig_timer_max;
4354                         BNX2X_ERR("NIG timer max (%u)\n",
4355                                   estats->nig_timer_max);
4356                 }
4357         }
4358
4359         return 0;
4360 }
4361
4362 static int bnx2x_storm_stats_update(struct bnx2x *bp)
4363 {
4364         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
4365         struct tstorm_per_port_stats *tport =
4366                                         &stats->tstorm_common.port_statistics;
4367         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
4368         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4369         int i;
4370
4371         memcpy(&(fstats->total_bytes_received_hi),
4372                &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
4373                sizeof(struct host_func_stats) - 2*sizeof(u32));
4374         estats->error_bytes_received_hi = 0;
4375         estats->error_bytes_received_lo = 0;
4376         estats->etherstatsoverrsizepkts_hi = 0;
4377         estats->etherstatsoverrsizepkts_lo = 0;
4378         estats->no_buff_discard_hi = 0;
4379         estats->no_buff_discard_lo = 0;
4380
4381         for_each_queue(bp, i) {
4382                 struct bnx2x_fastpath *fp = &bp->fp[i];
4383                 int cl_id = fp->cl_id;
4384                 struct tstorm_per_client_stats *tclient =
4385                                 &stats->tstorm_common.client_statistics[cl_id];
4386                 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4387                 struct ustorm_per_client_stats *uclient =
4388                                 &stats->ustorm_common.client_statistics[cl_id];
4389                 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4390                 struct xstorm_per_client_stats *xclient =
4391                                 &stats->xstorm_common.client_statistics[cl_id];
4392                 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4393                 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4394                 u32 diff;
4395
4396                 /* are storm stats valid? */
4397                 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4398                                                         bp->stats_counter) {
4399                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4400                            "  xstorm counter (0x%x) != stats_counter (0x%x)\n",
4401                            i, xclient->stats_counter, bp->stats_counter);
4402                         return -1;
4403                 }
4404                 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4405                                                         bp->stats_counter) {
4406                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4407                            "  tstorm counter (0x%x) != stats_counter (0x%x)\n",
4408                            i, tclient->stats_counter, bp->stats_counter);
4409                         return -2;
4410                 }
4411                 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4412                                                         bp->stats_counter) {
4413                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4414                            "  ustorm counter (0x%x) != stats_counter (0x%x)\n",
4415                            i, uclient->stats_counter, bp->stats_counter);
4416                         return -4;
4417                 }
4418
4419                 qstats->total_bytes_received_hi =
4420                         le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4421                 qstats->total_bytes_received_lo =
4422                         le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4423
4424                 ADD_64(qstats->total_bytes_received_hi,
4425                        le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4426                        qstats->total_bytes_received_lo,
4427                        le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4428
4429                 ADD_64(qstats->total_bytes_received_hi,
4430                        le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4431                        qstats->total_bytes_received_lo,
4432                        le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4433
4434                 SUB_64(qstats->total_bytes_received_hi,
4435                        le32_to_cpu(uclient->bcast_no_buff_bytes.hi),
4436                        qstats->total_bytes_received_lo,
4437                        le32_to_cpu(uclient->bcast_no_buff_bytes.lo));
4438
4439                 SUB_64(qstats->total_bytes_received_hi,
4440                        le32_to_cpu(uclient->mcast_no_buff_bytes.hi),
4441                        qstats->total_bytes_received_lo,
4442                        le32_to_cpu(uclient->mcast_no_buff_bytes.lo));
4443
4444                 SUB_64(qstats->total_bytes_received_hi,
4445                        le32_to_cpu(uclient->ucast_no_buff_bytes.hi),
4446                        qstats->total_bytes_received_lo,
4447                        le32_to_cpu(uclient->ucast_no_buff_bytes.lo));
4448
4449                 qstats->valid_bytes_received_hi =
4450                                         qstats->total_bytes_received_hi;
4451                 qstats->valid_bytes_received_lo =
4452                                         qstats->total_bytes_received_lo;
4453
4454                 qstats->error_bytes_received_hi =
4455                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
4456                 qstats->error_bytes_received_lo =
4457                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
4458
4459                 ADD_64(qstats->total_bytes_received_hi,
4460                        qstats->error_bytes_received_hi,
4461                        qstats->total_bytes_received_lo,
4462                        qstats->error_bytes_received_lo);
4463
4464                 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4465                                         total_unicast_packets_received);
4466                 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4467                                         total_multicast_packets_received);
4468                 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4469                                         total_broadcast_packets_received);
4470                 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4471                                         etherstatsoverrsizepkts);
4472                 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4473
4474                 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4475                                         total_unicast_packets_received);
4476                 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4477                                         total_multicast_packets_received);
4478                 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4479                                         total_broadcast_packets_received);
4480                 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4481                 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4482                 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4483
4484                 qstats->total_bytes_transmitted_hi =
4485                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4486                 qstats->total_bytes_transmitted_lo =
4487                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4488
4489                 ADD_64(qstats->total_bytes_transmitted_hi,
4490                        le32_to_cpu(xclient->multicast_bytes_sent.hi),
4491                        qstats->total_bytes_transmitted_lo,
4492                        le32_to_cpu(xclient->multicast_bytes_sent.lo));
4493
4494                 ADD_64(qstats->total_bytes_transmitted_hi,
4495                        le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4496                        qstats->total_bytes_transmitted_lo,
4497                        le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4498
4499                 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4500                                         total_unicast_packets_transmitted);
4501                 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4502                                         total_multicast_packets_transmitted);
4503                 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4504                                         total_broadcast_packets_transmitted);
4505
4506                 old_tclient->checksum_discard = tclient->checksum_discard;
4507                 old_tclient->ttl0_discard = tclient->ttl0_discard;
4508
4509                 ADD_64(fstats->total_bytes_received_hi,
4510                        qstats->total_bytes_received_hi,
4511                        fstats->total_bytes_received_lo,
4512                        qstats->total_bytes_received_lo);
4513                 ADD_64(fstats->total_bytes_transmitted_hi,
4514                        qstats->total_bytes_transmitted_hi,
4515                        fstats->total_bytes_transmitted_lo,
4516                        qstats->total_bytes_transmitted_lo);
4517                 ADD_64(fstats->total_unicast_packets_received_hi,
4518                        qstats->total_unicast_packets_received_hi,
4519                        fstats->total_unicast_packets_received_lo,
4520                        qstats->total_unicast_packets_received_lo);
4521                 ADD_64(fstats->total_multicast_packets_received_hi,
4522                        qstats->total_multicast_packets_received_hi,
4523                        fstats->total_multicast_packets_received_lo,
4524                        qstats->total_multicast_packets_received_lo);
4525                 ADD_64(fstats->total_broadcast_packets_received_hi,
4526                        qstats->total_broadcast_packets_received_hi,
4527                        fstats->total_broadcast_packets_received_lo,
4528                        qstats->total_broadcast_packets_received_lo);
4529                 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4530                        qstats->total_unicast_packets_transmitted_hi,
4531                        fstats->total_unicast_packets_transmitted_lo,
4532                        qstats->total_unicast_packets_transmitted_lo);
4533                 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4534                        qstats->total_multicast_packets_transmitted_hi,
4535                        fstats->total_multicast_packets_transmitted_lo,
4536                        qstats->total_multicast_packets_transmitted_lo);
4537                 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4538                        qstats->total_broadcast_packets_transmitted_hi,
4539                        fstats->total_broadcast_packets_transmitted_lo,
4540                        qstats->total_broadcast_packets_transmitted_lo);
4541                 ADD_64(fstats->valid_bytes_received_hi,
4542                        qstats->valid_bytes_received_hi,
4543                        fstats->valid_bytes_received_lo,
4544                        qstats->valid_bytes_received_lo);
4545
4546                 ADD_64(estats->error_bytes_received_hi,
4547                        qstats->error_bytes_received_hi,
4548                        estats->error_bytes_received_lo,
4549                        qstats->error_bytes_received_lo);
4550                 ADD_64(estats->etherstatsoverrsizepkts_hi,
4551                        qstats->etherstatsoverrsizepkts_hi,
4552                        estats->etherstatsoverrsizepkts_lo,
4553                        qstats->etherstatsoverrsizepkts_lo);
4554                 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4555                        estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4556         }
4557
4558         ADD_64(fstats->total_bytes_received_hi,
4559                estats->rx_stat_ifhcinbadoctets_hi,
4560                fstats->total_bytes_received_lo,
4561                estats->rx_stat_ifhcinbadoctets_lo);
4562
4563         memcpy(estats, &(fstats->total_bytes_received_hi),
4564                sizeof(struct host_func_stats) - 2*sizeof(u32));
4565
4566         ADD_64(estats->etherstatsoverrsizepkts_hi,
4567                estats->rx_stat_dot3statsframestoolong_hi,
4568                estats->etherstatsoverrsizepkts_lo,
4569                estats->rx_stat_dot3statsframestoolong_lo);
4570         ADD_64(estats->error_bytes_received_hi,
4571                estats->rx_stat_ifhcinbadoctets_hi,
4572                estats->error_bytes_received_lo,
4573                estats->rx_stat_ifhcinbadoctets_lo);
4574
4575         if (bp->port.pmf) {
4576                 estats->mac_filter_discard =
4577                                 le32_to_cpu(tport->mac_filter_discard);
4578                 estats->xxoverflow_discard =
4579                                 le32_to_cpu(tport->xxoverflow_discard);
4580                 estats->brb_truncate_discard =
4581                                 le32_to_cpu(tport->brb_truncate_discard);
4582                 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4583         }
4584
4585         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4586
4587         bp->stats_pending = 0;
4588
4589         return 0;
4590 }
4591
4592 static void bnx2x_net_stats_update(struct bnx2x *bp)
4593 {
4594         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4595         struct net_device_stats *nstats = &bp->dev->stats;
4596         int i;
4597
4598         nstats->rx_packets =
4599                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4600                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4601                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4602
4603         nstats->tx_packets =
4604                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4605                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4606                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4607
4608         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4609
4610         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4611
4612         nstats->rx_dropped = estats->mac_discard;
4613         for_each_queue(bp, i)
4614                 nstats->rx_dropped +=
4615                         le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4616
4617         nstats->tx_dropped = 0;
4618
4619         nstats->multicast =
4620                 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4621
4622         nstats->collisions =
4623                 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4624
4625         nstats->rx_length_errors =
4626                 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4627                 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4628         nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4629                                  bnx2x_hilo(&estats->brb_truncate_hi);
4630         nstats->rx_crc_errors =
4631                 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4632         nstats->rx_frame_errors =
4633                 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4634         nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4635         nstats->rx_missed_errors = estats->xxoverflow_discard;
4636
4637         nstats->rx_errors = nstats->rx_length_errors +
4638                             nstats->rx_over_errors +
4639                             nstats->rx_crc_errors +
4640                             nstats->rx_frame_errors +
4641                             nstats->rx_fifo_errors +
4642                             nstats->rx_missed_errors;
4643
4644         nstats->tx_aborted_errors =
4645                 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4646                 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4647         nstats->tx_carrier_errors =
4648                 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4649         nstats->tx_fifo_errors = 0;
4650         nstats->tx_heartbeat_errors = 0;
4651         nstats->tx_window_errors = 0;
4652
4653         nstats->tx_errors = nstats->tx_aborted_errors +
4654                             nstats->tx_carrier_errors +
4655             bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4656 }
4657
4658 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4659 {
4660         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4661         int i;
4662
4663         estats->driver_xoff = 0;
4664         estats->rx_err_discard_pkt = 0;
4665         estats->rx_skb_alloc_failed = 0;
4666         estats->hw_csum_err = 0;
4667         for_each_queue(bp, i) {
4668                 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4669
4670                 estats->driver_xoff += qstats->driver_xoff;
4671                 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4672                 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4673                 estats->hw_csum_err += qstats->hw_csum_err;
4674         }
4675 }
4676
4677 static void bnx2x_stats_update(struct bnx2x *bp)
4678 {
4679         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4680
4681         if (*stats_comp != DMAE_COMP_VAL)
4682                 return;
4683
4684         if (bp->port.pmf)
4685                 bnx2x_hw_stats_update(bp);
4686
4687         if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4688                 BNX2X_ERR("storm stats were not updated for 3 times\n");
4689                 bnx2x_panic();
4690                 return;
4691         }
4692
4693         bnx2x_net_stats_update(bp);
4694         bnx2x_drv_stats_update(bp);
4695
4696         if (netif_msg_timer(bp)) {
4697                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4698                 int i;
4699
4700                 printk(KERN_DEBUG "%s: brb drops %u  brb truncate %u\n",
4701                        bp->dev->name,
4702                        estats->brb_drop_lo, estats->brb_truncate_lo);
4703
4704                 for_each_queue(bp, i) {
4705                         struct bnx2x_fastpath *fp = &bp->fp[i];
4706                         struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4707
4708                         printk(KERN_DEBUG "%s: rx usage(%4u)  *rx_cons_sb(%u)"
4709                                           "  rx pkt(%lu)  rx calls(%lu %lu)\n",
4710                                fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
4711                                fp->rx_comp_cons),
4712                                le16_to_cpu(*fp->rx_cons_sb),
4713                                bnx2x_hilo(&qstats->
4714                                           total_unicast_packets_received_hi),
4715                                fp->rx_calls, fp->rx_pkt);
4716                 }
4717
4718                 for_each_queue(bp, i) {
4719                         struct bnx2x_fastpath *fp = &bp->fp[i];
4720                         struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4721                         struct netdev_queue *txq =
4722                                 netdev_get_tx_queue(bp->dev, i);
4723
4724                         printk(KERN_DEBUG "%s: tx avail(%4u)  *tx_cons_sb(%u)"
4725                                           "  tx pkt(%lu) tx calls (%lu)"
4726                                           "  %s (Xoff events %u)\n",
4727                                fp->name, bnx2x_tx_avail(fp),
4728                                le16_to_cpu(*fp->tx_cons_sb),
4729                                bnx2x_hilo(&qstats->
4730                                           total_unicast_packets_transmitted_hi),
4731                                fp->tx_pkt,
4732                                (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"),
4733                                qstats->driver_xoff);
4734                 }
4735         }
4736
4737         bnx2x_hw_stats_post(bp);
4738         bnx2x_storm_stats_post(bp);
4739 }
4740
4741 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4742 {
4743         struct dmae_command *dmae;
4744         u32 opcode;
4745         int loader_idx = PMF_DMAE_C(bp);
4746         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4747
4748         bp->executer_idx = 0;
4749
4750         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4751                   DMAE_CMD_C_ENABLE |
4752                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4753 #ifdef __BIG_ENDIAN
4754                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4755 #else
4756                   DMAE_CMD_ENDIANITY_DW_SWAP |
4757 #endif
4758                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4759                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4760
4761         if (bp->port.port_stx) {
4762
4763                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4764                 if (bp->func_stx)
4765                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4766                 else
4767                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4768                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4769                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4770                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4771                 dmae->dst_addr_hi = 0;
4772                 dmae->len = sizeof(struct host_port_stats) >> 2;
4773                 if (bp->func_stx) {
4774                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4775                         dmae->comp_addr_hi = 0;
4776                         dmae->comp_val = 1;
4777                 } else {
4778                         dmae->comp_addr_lo =
4779                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4780                         dmae->comp_addr_hi =
4781                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4782                         dmae->comp_val = DMAE_COMP_VAL;
4783
4784                         *stats_comp = 0;
4785                 }
4786         }
4787
4788         if (bp->func_stx) {
4789
4790                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4791                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4792                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4793                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4794                 dmae->dst_addr_lo = bp->func_stx >> 2;
4795                 dmae->dst_addr_hi = 0;
4796                 dmae->len = sizeof(struct host_func_stats) >> 2;
4797                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4798                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4799                 dmae->comp_val = DMAE_COMP_VAL;
4800
4801                 *stats_comp = 0;
4802         }
4803 }
4804
4805 static void bnx2x_stats_stop(struct bnx2x *bp)
4806 {
4807         int update = 0;
4808
4809         bnx2x_stats_comp(bp);
4810
4811         if (bp->port.pmf)
4812                 update = (bnx2x_hw_stats_update(bp) == 0);
4813
4814         update |= (bnx2x_storm_stats_update(bp) == 0);
4815
4816         if (update) {
4817                 bnx2x_net_stats_update(bp);
4818
4819                 if (bp->port.pmf)
4820                         bnx2x_port_stats_stop(bp);
4821
4822                 bnx2x_hw_stats_post(bp);
4823                 bnx2x_stats_comp(bp);
4824         }
4825 }
4826
4827 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4828 {
4829 }
4830
4831 static const struct {
4832         void (*action)(struct bnx2x *bp);
4833         enum bnx2x_stats_state next_state;
4834 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4835 /* state        event   */
4836 {
4837 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4838 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
4839 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4840 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4841 },
4842 {
4843 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
4844 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
4845 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
4846 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
4847 }
4848 };
4849
4850 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4851 {
4852         enum bnx2x_stats_state state = bp->stats_state;
4853
4854         if (unlikely(bp->panic))
4855                 return;
4856
4857         bnx2x_stats_stm[state][event].action(bp);
4858         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4859
4860         /* Make sure the state has been "changed" */
4861         smp_wmb();
4862
4863         if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
4864                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4865                    state, event, bp->stats_state);
4866 }
4867
4868 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4869 {
4870         struct dmae_command *dmae;
4871         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4872
4873         /* sanity */
4874         if (!bp->port.pmf || !bp->port.port_stx) {
4875                 BNX2X_ERR("BUG!\n");
4876                 return;
4877         }
4878
4879         bp->executer_idx = 0;
4880
4881         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4882         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4883                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4884                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4885 #ifdef __BIG_ENDIAN
4886                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4887 #else
4888                         DMAE_CMD_ENDIANITY_DW_SWAP |
4889 #endif
4890                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4891                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4892         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4893         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4894         dmae->dst_addr_lo = bp->port.port_stx >> 2;
4895         dmae->dst_addr_hi = 0;
4896         dmae->len = sizeof(struct host_port_stats) >> 2;
4897         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4898         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4899         dmae->comp_val = DMAE_COMP_VAL;
4900
4901         *stats_comp = 0;
4902         bnx2x_hw_stats_post(bp);
4903         bnx2x_stats_comp(bp);
4904 }
4905
4906 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4907 {
4908         int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4909         int port = BP_PORT(bp);
4910         int func;
4911         u32 func_stx;
4912
4913         /* sanity */
4914         if (!bp->port.pmf || !bp->func_stx) {
4915                 BNX2X_ERR("BUG!\n");
4916                 return;
4917         }
4918
4919         /* save our func_stx */
4920         func_stx = bp->func_stx;
4921
4922         for (vn = VN_0; vn < vn_max; vn++) {
4923                 func = 2*vn + port;
4924
4925                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4926                 bnx2x_func_stats_init(bp);
4927                 bnx2x_hw_stats_post(bp);
4928                 bnx2x_stats_comp(bp);
4929         }
4930
4931         /* restore our func_stx */
4932         bp->func_stx = func_stx;
4933 }
4934
4935 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4936 {
4937         struct dmae_command *dmae = &bp->stats_dmae;
4938         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4939
4940         /* sanity */
4941         if (!bp->func_stx) {
4942                 BNX2X_ERR("BUG!\n");
4943                 return;
4944         }
4945
4946         bp->executer_idx = 0;
4947         memset(dmae, 0, sizeof(struct dmae_command));
4948
4949         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4950                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4951                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4952 #ifdef __BIG_ENDIAN
4953                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4954 #else
4955                         DMAE_CMD_ENDIANITY_DW_SWAP |
4956 #endif
4957                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4958                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4959         dmae->src_addr_lo = bp->func_stx >> 2;
4960         dmae->src_addr_hi = 0;
4961         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4962         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4963         dmae->len = sizeof(struct host_func_stats) >> 2;
4964         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4965         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4966         dmae->comp_val = DMAE_COMP_VAL;
4967
4968         *stats_comp = 0;
4969         bnx2x_hw_stats_post(bp);
4970         bnx2x_stats_comp(bp);
4971 }
4972
4973 static void bnx2x_stats_init(struct bnx2x *bp)
4974 {
4975         int port = BP_PORT(bp);
4976         int func = BP_FUNC(bp);
4977         int i;
4978
4979         bp->stats_pending = 0;
4980         bp->executer_idx = 0;
4981         bp->stats_counter = 0;
4982
4983         /* port and func stats for management */
4984         if (!BP_NOMCP(bp)) {
4985                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4986                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4987
4988         } else {
4989                 bp->port.port_stx = 0;
4990                 bp->func_stx = 0;
4991         }
4992         DP(BNX2X_MSG_STATS, "port_stx 0x%x  func_stx 0x%x\n",
4993            bp->port.port_stx, bp->func_stx);
4994
4995         /* port stats */
4996         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4997         bp->port.old_nig_stats.brb_discard =
4998                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4999         bp->port.old_nig_stats.brb_truncate =
5000                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
5001         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
5002                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
5003         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
5004                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
5005
5006         /* function stats */
5007         for_each_queue(bp, i) {
5008                 struct bnx2x_fastpath *fp = &bp->fp[i];
5009
5010                 memset(&fp->old_tclient, 0,
5011                        sizeof(struct tstorm_per_client_stats));
5012                 memset(&fp->old_uclient, 0,
5013                        sizeof(struct ustorm_per_client_stats));
5014                 memset(&fp->old_xclient, 0,
5015                        sizeof(struct xstorm_per_client_stats));
5016                 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
5017         }
5018
5019         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
5020         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
5021
5022         bp->stats_state = STATS_STATE_DISABLED;
5023
5024         if (bp->port.pmf) {
5025                 if (bp->port.port_stx)
5026                         bnx2x_port_stats_base_init(bp);
5027
5028                 if (bp->func_stx)
5029                         bnx2x_func_stats_base_init(bp);
5030
5031         } else if (bp->func_stx)
5032                 bnx2x_func_stats_base_update(bp);
5033 }
5034
5035 static void bnx2x_timer(unsigned long data)
5036 {
5037         struct bnx2x *bp = (struct bnx2x *) data;
5038
5039         if (!netif_running(bp->dev))
5040                 return;
5041
5042         if (atomic_read(&bp->intr_sem) != 0)
5043                 goto timer_restart;
5044
5045         if (poll) {
5046                 struct bnx2x_fastpath *fp = &bp->fp[0];
5047                 int rc;
5048
5049                 bnx2x_tx_int(fp);
5050                 rc = bnx2x_rx_int(fp, 1000);
5051         }
5052
5053         if (!BP_NOMCP(bp)) {
5054                 int func = BP_FUNC(bp);
5055                 u32 drv_pulse;
5056                 u32 mcp_pulse;
5057
5058                 ++bp->fw_drv_pulse_wr_seq;
5059                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5060                 /* TBD - add SYSTEM_TIME */
5061                 drv_pulse = bp->fw_drv_pulse_wr_seq;
5062                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
5063
5064                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
5065                              MCP_PULSE_SEQ_MASK);
5066                 /* The delta between driver pulse and mcp response
5067                  * should be 1 (before mcp response) or 0 (after mcp response)
5068                  */
5069                 if ((drv_pulse != mcp_pulse) &&
5070                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
5071                         /* someone lost a heartbeat... */
5072                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5073                                   drv_pulse, mcp_pulse);
5074                 }
5075         }
5076
5077         if (bp->state == BNX2X_STATE_OPEN)
5078                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
5079
5080 timer_restart:
5081         mod_timer(&bp->timer, jiffies + bp->current_interval);
5082 }
5083
5084 /* end of Statistics */
5085
5086 /* nic init */
5087
5088 /*
5089  * nic init service functions
5090  */
5091
5092 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
5093 {
5094         int port = BP_PORT(bp);
5095
5096         /* "CSTORM" */
5097         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5098                         CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
5099                         CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
5100         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5101                         CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
5102                         CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
5103 }
5104
5105 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
5106                           dma_addr_t mapping, int sb_id)
5107 {
5108         int port = BP_PORT(bp);
5109         int func = BP_FUNC(bp);
5110         int index;
5111         u64 section;
5112
5113         /* USTORM */
5114         section = ((u64)mapping) + offsetof(struct host_status_block,
5115                                             u_status_block);
5116         sb->u_status_block.status_block_id = sb_id;
5117
5118         REG_WR(bp, BAR_CSTRORM_INTMEM +
5119                CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
5120         REG_WR(bp, BAR_CSTRORM_INTMEM +
5121                ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
5122                U64_HI(section));
5123         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
5124                 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
5125
5126         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
5127                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5128                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
5129
5130         /* CSTORM */
5131         section = ((u64)mapping) + offsetof(struct host_status_block,
5132                                             c_status_block);
5133         sb->c_status_block.status_block_id = sb_id;
5134
5135         REG_WR(bp, BAR_CSTRORM_INTMEM +
5136                CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
5137         REG_WR(bp, BAR_CSTRORM_INTMEM +
5138                ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
5139                U64_HI(section));
5140         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
5141                 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
5142
5143         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
5144                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5145                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
5146
5147         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5148 }
5149
5150 static void bnx2x_zero_def_sb(struct bnx2x *bp)
5151 {
5152         int func = BP_FUNC(bp);
5153
5154         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
5155                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5156                         sizeof(struct tstorm_def_status_block)/4);
5157         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5158                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
5159                         sizeof(struct cstorm_def_status_block_u)/4);
5160         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5161                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
5162                         sizeof(struct cstorm_def_status_block_c)/4);
5163         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
5164                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5165                         sizeof(struct xstorm_def_status_block)/4);
5166 }
5167
5168 static void bnx2x_init_def_sb(struct bnx2x *bp,
5169                               struct host_def_status_block *def_sb,
5170                               dma_addr_t mapping, int sb_id)
5171 {
5172         int port = BP_PORT(bp);
5173         int func = BP_FUNC(bp);
5174         int index, val, reg_offset;
5175         u64 section;
5176
5177         /* ATTN */
5178         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5179                                             atten_status_block);
5180         def_sb->atten_status_block.status_block_id = sb_id;
5181
5182         bp->attn_state = 0;
5183
5184         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5185                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5186
5187         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
5188                 bp->attn_group[index].sig[0] = REG_RD(bp,
5189                                                      reg_offset + 0x10*index);
5190                 bp->attn_group[index].sig[1] = REG_RD(bp,
5191                                                reg_offset + 0x4 + 0x10*index);
5192                 bp->attn_group[index].sig[2] = REG_RD(bp,
5193                                                reg_offset + 0x8 + 0x10*index);
5194                 bp->attn_group[index].sig[3] = REG_RD(bp,
5195                                                reg_offset + 0xc + 0x10*index);
5196         }
5197
5198         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
5199                              HC_REG_ATTN_MSG0_ADDR_L);
5200
5201         REG_WR(bp, reg_offset, U64_LO(section));
5202         REG_WR(bp, reg_offset + 4, U64_HI(section));
5203
5204         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
5205
5206         val = REG_RD(bp, reg_offset);
5207         val |= sb_id;
5208         REG_WR(bp, reg_offset, val);
5209
5210         /* USTORM */
5211         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5212                                             u_def_status_block);
5213         def_sb->u_def_status_block.status_block_id = sb_id;
5214
5215         REG_WR(bp, BAR_CSTRORM_INTMEM +
5216                CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
5217         REG_WR(bp, BAR_CSTRORM_INTMEM +
5218                ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
5219                U64_HI(section));
5220         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
5221                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
5222
5223         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
5224                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5225                          CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
5226
5227         /* CSTORM */
5228         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5229                                             c_def_status_block);
5230         def_sb->c_def_status_block.status_block_id = sb_id;
5231
5232         REG_WR(bp, BAR_CSTRORM_INTMEM +
5233                CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
5234         REG_WR(bp, BAR_CSTRORM_INTMEM +
5235                ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
5236                U64_HI(section));
5237         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
5238                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
5239
5240         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
5241                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5242                          CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
5243
5244         /* TSTORM */
5245         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5246                                             t_def_status_block);
5247         def_sb->t_def_status_block.status_block_id = sb_id;
5248
5249         REG_WR(bp, BAR_TSTRORM_INTMEM +
5250                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
5251         REG_WR(bp, BAR_TSTRORM_INTMEM +
5252                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
5253                U64_HI(section));
5254         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
5255                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
5256
5257         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
5258                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
5259                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
5260
5261         /* XSTORM */
5262         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5263                                             x_def_status_block);
5264         def_sb->x_def_status_block.status_block_id = sb_id;
5265
5266         REG_WR(bp, BAR_XSTRORM_INTMEM +
5267                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
5268         REG_WR(bp, BAR_XSTRORM_INTMEM +
5269                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
5270                U64_HI(section));
5271         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
5272                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
5273
5274         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
5275                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
5276                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
5277
5278         bp->stats_pending = 0;
5279         bp->set_mac_pending = 0;
5280
5281         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5282 }
5283
5284 static void bnx2x_update_coalesce(struct bnx2x *bp)
5285 {
5286         int port = BP_PORT(bp);
5287         int i;
5288
5289         for_each_queue(bp, i) {
5290                 int sb_id = bp->fp[i].sb_id;
5291
5292                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
5293                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5294                         CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
5295                                                       U_SB_ETH_RX_CQ_INDEX),
5296                         bp->rx_ticks/(4 * BNX2X_BTR));
5297                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5298                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
5299                                                        U_SB_ETH_RX_CQ_INDEX),
5300                          (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
5301
5302                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
5303                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5304                         CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
5305                                                       C_SB_ETH_TX_CQ_INDEX),
5306                         bp->tx_ticks/(4 * BNX2X_BTR));
5307                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5308                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
5309                                                        C_SB_ETH_TX_CQ_INDEX),
5310                          (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
5311         }
5312 }
5313
5314 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
5315                                        struct bnx2x_fastpath *fp, int last)
5316 {
5317         int i;
5318
5319         for (i = 0; i < last; i++) {
5320                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
5321                 struct sk_buff *skb = rx_buf->skb;
5322
5323                 if (skb == NULL) {
5324                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
5325                         continue;
5326                 }
5327
5328                 if (fp->tpa_state[i] == BNX2X_TPA_START)
5329                         dma_unmap_single(&bp->pdev->dev,
5330                                          dma_unmap_addr(rx_buf, mapping),
5331                                          bp->rx_buf_size, DMA_FROM_DEVICE);
5332
5333                 dev_kfree_skb(skb);
5334                 rx_buf->skb = NULL;
5335         }
5336 }
5337
5338 static void bnx2x_init_rx_rings(struct bnx2x *bp)
5339 {
5340         int func = BP_FUNC(bp);
5341         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
5342                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
5343         u16 ring_prod, cqe_ring_prod;
5344         int i, j;
5345
5346         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
5347         DP(NETIF_MSG_IFUP,
5348            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
5349
5350         if (bp->flags & TPA_ENABLE_FLAG) {
5351
5352                 for_each_queue(bp, j) {
5353                         struct bnx2x_fastpath *fp = &bp->fp[j];
5354
5355                         for (i = 0; i < max_agg_queues; i++) {
5356                                 fp->tpa_pool[i].skb =
5357                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
5358                                 if (!fp->tpa_pool[i].skb) {
5359                                         BNX2X_ERR("Failed to allocate TPA "
5360                                                   "skb pool for queue[%d] - "
5361                                                   "disabling TPA on this "
5362                                                   "queue!\n", j);
5363                                         bnx2x_free_tpa_pool(bp, fp, i);
5364                                         fp->disable_tpa = 1;
5365                                         break;
5366                                 }
5367                                 dma_unmap_addr_set((struct sw_rx_bd *)
5368                                                         &bp->fp->tpa_pool[i],
5369                                                    mapping, 0);
5370                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
5371                         }
5372                 }
5373         }
5374
5375         for_each_queue(bp, j) {
5376                 struct bnx2x_fastpath *fp = &bp->fp[j];
5377
5378                 fp->rx_bd_cons = 0;
5379                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5380                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
5381
5382                 /* "next page" elements initialization */
5383                 /* SGE ring */
5384                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5385                         struct eth_rx_sge *sge;
5386
5387                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5388                         sge->addr_hi =
5389                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5390                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5391                         sge->addr_lo =
5392                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5393                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5394                 }
5395
5396                 bnx2x_init_sge_ring_bit_mask(fp);
5397
5398                 /* RX BD ring */
5399                 for (i = 1; i <= NUM_RX_RINGS; i++) {
5400                         struct eth_rx_bd *rx_bd;
5401
5402                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5403                         rx_bd->addr_hi =
5404                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5405                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5406                         rx_bd->addr_lo =
5407                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5408                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5409                 }
5410
5411                 /* CQ ring */
5412                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5413                         struct eth_rx_cqe_next_page *nextpg;
5414
5415                         nextpg = (struct eth_rx_cqe_next_page *)
5416                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5417                         nextpg->addr_hi =
5418                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5419                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5420                         nextpg->addr_lo =
5421                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5422                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5423                 }
5424
5425                 /* Allocate SGEs and initialize the ring elements */
5426                 for (i = 0, ring_prod = 0;
5427                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
5428
5429                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5430                                 BNX2X_ERR("was only able to allocate "
5431                                           "%d rx sges\n", i);
5432                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5433                                 /* Cleanup already allocated elements */
5434                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
5435                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
5436                                 fp->disable_tpa = 1;
5437                                 ring_prod = 0;
5438                                 break;
5439                         }
5440                         ring_prod = NEXT_SGE_IDX(ring_prod);
5441                 }
5442                 fp->rx_sge_prod = ring_prod;
5443
5444                 /* Allocate BDs and initialize BD ring */
5445                 fp->rx_comp_cons = 0;
5446                 cqe_ring_prod = ring_prod = 0;
5447                 for (i = 0; i < bp->rx_ring_size; i++) {
5448                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5449                                 BNX2X_ERR("was only able to allocate "
5450                                           "%d rx skbs on queue[%d]\n", i, j);
5451                                 fp->eth_q_stats.rx_skb_alloc_failed++;
5452                                 break;
5453                         }
5454                         ring_prod = NEXT_RX_IDX(ring_prod);
5455                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5456                         WARN_ON(ring_prod <= i);
5457                 }
5458
5459                 fp->rx_bd_prod = ring_prod;
5460                 /* must not have more available CQEs than BDs */
5461                 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
5462                                          cqe_ring_prod);
5463                 fp->rx_pkt = fp->rx_calls = 0;
5464
5465                 /* Warning!
5466                  * this will generate an interrupt (to the TSTORM)
5467                  * must only be done after chip is initialized
5468                  */
5469                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5470                                      fp->rx_sge_prod);
5471                 if (j != 0)
5472                         continue;
5473
5474                 REG_WR(bp, BAR_USTRORM_INTMEM +
5475                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5476                        U64_LO(fp->rx_comp_mapping));
5477                 REG_WR(bp, BAR_USTRORM_INTMEM +
5478                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5479                        U64_HI(fp->rx_comp_mapping));
5480         }
5481 }
5482
5483 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5484 {
5485         int i, j;
5486
5487         for_each_queue(bp, j) {
5488                 struct bnx2x_fastpath *fp = &bp->fp[j];
5489
5490                 for (i = 1; i <= NUM_TX_RINGS; i++) {
5491                         struct eth_tx_next_bd *tx_next_bd =
5492                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5493
5494                         tx_next_bd->addr_hi =
5495                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5496                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5497                         tx_next_bd->addr_lo =
5498                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5499                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5500                 }
5501
5502                 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5503                 fp->tx_db.data.zero_fill1 = 0;
5504                 fp->tx_db.data.prod = 0;
5505
5506                 fp->tx_pkt_prod = 0;
5507                 fp->tx_pkt_cons = 0;
5508                 fp->tx_bd_prod = 0;
5509                 fp->tx_bd_cons = 0;
5510                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5511                 fp->tx_pkt = 0;
5512         }
5513 }
5514
5515 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5516 {
5517         int func = BP_FUNC(bp);
5518
5519         spin_lock_init(&bp->spq_lock);
5520
5521         bp->spq_left = MAX_SPQ_PENDING;
5522         bp->spq_prod_idx = 0;
5523         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5524         bp->spq_prod_bd = bp->spq;
5525         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5526
5527         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5528                U64_LO(bp->spq_mapping));
5529         REG_WR(bp,
5530                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5531                U64_HI(bp->spq_mapping));
5532
5533         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5534                bp->spq_prod_idx);
5535 }
5536
5537 static void bnx2x_init_context(struct bnx2x *bp)
5538 {
5539         int i;
5540
5541         /* Rx */
5542         for_each_queue(bp, i) {
5543                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5544                 struct bnx2x_fastpath *fp = &bp->fp[i];
5545                 u8 cl_id = fp->cl_id;
5546
5547                 context->ustorm_st_context.common.sb_index_numbers =
5548                                                 BNX2X_RX_SB_INDEX_NUM;
5549                 context->ustorm_st_context.common.clientId = cl_id;
5550                 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5551                 context->ustorm_st_context.common.flags =
5552                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5553                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5554                 context->ustorm_st_context.common.statistics_counter_id =
5555                                                 cl_id;
5556                 context->ustorm_st_context.common.mc_alignment_log_size =
5557                                                 BNX2X_RX_ALIGN_SHIFT;
5558                 context->ustorm_st_context.common.bd_buff_size =
5559                                                 bp->rx_buf_size;
5560                 context->ustorm_st_context.common.bd_page_base_hi =
5561                                                 U64_HI(fp->rx_desc_mapping);
5562                 context->ustorm_st_context.common.bd_page_base_lo =
5563                                                 U64_LO(fp->rx_desc_mapping);
5564                 if (!fp->disable_tpa) {
5565                         context->ustorm_st_context.common.flags |=
5566                                 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5567                         context->ustorm_st_context.common.sge_buff_size =
5568                                 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
5569                                            0xffff);
5570                         context->ustorm_st_context.common.sge_page_base_hi =
5571                                                 U64_HI(fp->rx_sge_mapping);
5572                         context->ustorm_st_context.common.sge_page_base_lo =
5573                                                 U64_LO(fp->rx_sge_mapping);
5574
5575                         context->ustorm_st_context.common.max_sges_for_packet =
5576                                 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5577                         context->ustorm_st_context.common.max_sges_for_packet =
5578                                 ((context->ustorm_st_context.common.
5579                                   max_sges_for_packet + PAGES_PER_SGE - 1) &
5580                                  (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5581                 }
5582
5583                 context->ustorm_ag_context.cdu_usage =
5584                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5585                                                CDU_REGION_NUMBER_UCM_AG,
5586                                                ETH_CONNECTION_TYPE);
5587
5588                 context->xstorm_ag_context.cdu_reserved =
5589                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5590                                                CDU_REGION_NUMBER_XCM_AG,
5591                                                ETH_CONNECTION_TYPE);
5592         }
5593
5594         /* Tx */
5595         for_each_queue(bp, i) {
5596                 struct bnx2x_fastpath *fp = &bp->fp[i];
5597                 struct eth_context *context =
5598                         bnx2x_sp(bp, context[i].eth);
5599
5600                 context->cstorm_st_context.sb_index_number =
5601                                                 C_SB_ETH_TX_CQ_INDEX;
5602                 context->cstorm_st_context.status_block_id = fp->sb_id;
5603
5604                 context->xstorm_st_context.tx_bd_page_base_hi =
5605                                                 U64_HI(fp->tx_desc_mapping);
5606                 context->xstorm_st_context.tx_bd_page_base_lo =
5607                                                 U64_LO(fp->tx_desc_mapping);
5608                 context->xstorm_st_context.statistics_data = (fp->cl_id |
5609                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5610         }
5611 }
5612
5613 static void bnx2x_init_ind_table(struct bnx2x *bp)
5614 {
5615         int func = BP_FUNC(bp);
5616         int i;
5617
5618         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5619                 return;
5620
5621         DP(NETIF_MSG_IFUP,
5622            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
5623         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5624                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5625                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5626                         bp->fp->cl_id + (i % bp->num_queues));
5627 }
5628
5629 static void bnx2x_set_client_config(struct bnx2x *bp)
5630 {
5631         struct tstorm_eth_client_config tstorm_client = {0};
5632         int port = BP_PORT(bp);
5633         int i;
5634
5635         tstorm_client.mtu = bp->dev->mtu;
5636         tstorm_client.config_flags =
5637                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5638                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5639 #ifdef BCM_VLAN
5640         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5641                 tstorm_client.config_flags |=
5642                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5643                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5644         }
5645 #endif
5646
5647         for_each_queue(bp, i) {
5648                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5649
5650                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5651                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5652                        ((u32 *)&tstorm_client)[0]);
5653                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5654                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5655                        ((u32 *)&tstorm_client)[1]);
5656         }
5657
5658         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5659            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5660 }
5661
5662 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5663 {
5664         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5665         int mode = bp->rx_mode;
5666         int mask = bp->rx_mode_cl_mask;
5667         int func = BP_FUNC(bp);
5668         int port = BP_PORT(bp);
5669         int i;
5670         /* All but management unicast packets should pass to the host as well */
5671         u32 llh_mask =
5672                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5673                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5674                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5675                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5676
5677         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
5678
5679         switch (mode) {
5680         case BNX2X_RX_MODE_NONE: /* no Rx */
5681                 tstorm_mac_filter.ucast_drop_all = mask;
5682                 tstorm_mac_filter.mcast_drop_all = mask;
5683                 tstorm_mac_filter.bcast_drop_all = mask;
5684                 break;
5685
5686         case BNX2X_RX_MODE_NORMAL:
5687                 tstorm_mac_filter.bcast_accept_all = mask;
5688                 break;
5689
5690         case BNX2X_RX_MODE_ALLMULTI:
5691                 tstorm_mac_filter.mcast_accept_all = mask;
5692                 tstorm_mac_filter.bcast_accept_all = mask;
5693                 break;
5694
5695         case BNX2X_RX_MODE_PROMISC:
5696                 tstorm_mac_filter.ucast_accept_all = mask;
5697                 tstorm_mac_filter.mcast_accept_all = mask;
5698                 tstorm_mac_filter.bcast_accept_all = mask;
5699                 /* pass management unicast packets as well */
5700                 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5701                 break;
5702
5703         default:
5704                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5705                 break;
5706         }
5707
5708         REG_WR(bp,
5709                (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5710                llh_mask);
5711
5712         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5713                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5714                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5715                        ((u32 *)&tstorm_mac_filter)[i]);
5716
5717 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5718                    ((u32 *)&tstorm_mac_filter)[i]); */
5719         }
5720
5721         if (mode != BNX2X_RX_MODE_NONE)
5722                 bnx2x_set_client_config(bp);
5723 }
5724
5725 static void bnx2x_init_internal_common(struct bnx2x *bp)
5726 {
5727         int i;
5728
5729         /* Zero this manually as its initialization is
5730            currently missing in the initTool */
5731         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5732                 REG_WR(bp, BAR_USTRORM_INTMEM +
5733                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
5734 }
5735
5736 static void bnx2x_init_internal_port(struct bnx2x *bp)
5737 {
5738         int port = BP_PORT(bp);
5739
5740         REG_WR(bp,
5741                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5742         REG_WR(bp,
5743                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5744         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5745         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5746 }
5747
5748 static void bnx2x_init_internal_func(struct bnx2x *bp)
5749 {
5750         struct tstorm_eth_function_common_config tstorm_config = {0};
5751         struct stats_indication_flags stats_flags = {0};
5752         int port = BP_PORT(bp);
5753         int func = BP_FUNC(bp);
5754         int i, j;
5755         u32 offset;
5756         u16 max_agg_size;
5757
5758         tstorm_config.config_flags = RSS_FLAGS(bp);
5759
5760         if (is_multi(bp))
5761                 tstorm_config.rss_result_mask = MULTI_MASK;
5762
5763         /* Enable TPA if needed */
5764         if (bp->flags & TPA_ENABLE_FLAG)
5765                 tstorm_config.config_flags |=
5766                         TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5767
5768         if (IS_E1HMF(bp))
5769                 tstorm_config.config_flags |=
5770                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5771
5772         tstorm_config.leading_client_id = BP_L_ID(bp);
5773
5774         REG_WR(bp, BAR_TSTRORM_INTMEM +
5775                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5776                (*(u32 *)&tstorm_config));
5777
5778         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5779         bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
5780         bnx2x_set_storm_rx_mode(bp);
5781
5782         for_each_queue(bp, i) {
5783                 u8 cl_id = bp->fp[i].cl_id;
5784
5785                 /* reset xstorm per client statistics */
5786                 offset = BAR_XSTRORM_INTMEM +
5787                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5788                 for (j = 0;
5789                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5790                         REG_WR(bp, offset + j*4, 0);
5791
5792                 /* reset tstorm per client statistics */
5793                 offset = BAR_TSTRORM_INTMEM +
5794                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5795                 for (j = 0;
5796                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5797                         REG_WR(bp, offset + j*4, 0);
5798
5799                 /* reset ustorm per client statistics */
5800                 offset = BAR_USTRORM_INTMEM +
5801                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5802                 for (j = 0;
5803                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5804                         REG_WR(bp, offset + j*4, 0);
5805         }
5806
5807         /* Init statistics related context */
5808         stats_flags.collect_eth = 1;
5809
5810         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5811                ((u32 *)&stats_flags)[0]);
5812         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5813                ((u32 *)&stats_flags)[1]);
5814
5815         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5816                ((u32 *)&stats_flags)[0]);
5817         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5818                ((u32 *)&stats_flags)[1]);
5819
5820         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5821                ((u32 *)&stats_flags)[0]);
5822         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5823                ((u32 *)&stats_flags)[1]);
5824
5825         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5826                ((u32 *)&stats_flags)[0]);
5827         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5828                ((u32 *)&stats_flags)[1]);
5829
5830         REG_WR(bp, BAR_XSTRORM_INTMEM +
5831                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5832                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5833         REG_WR(bp, BAR_XSTRORM_INTMEM +
5834                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5835                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5836
5837         REG_WR(bp, BAR_TSTRORM_INTMEM +
5838                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5839                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5840         REG_WR(bp, BAR_TSTRORM_INTMEM +
5841                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5842                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5843
5844         REG_WR(bp, BAR_USTRORM_INTMEM +
5845                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5846                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5847         REG_WR(bp, BAR_USTRORM_INTMEM +
5848                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5849                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5850
5851         if (CHIP_IS_E1H(bp)) {
5852                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5853                         IS_E1HMF(bp));
5854                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5855                         IS_E1HMF(bp));
5856                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5857                         IS_E1HMF(bp));
5858                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5859                         IS_E1HMF(bp));
5860
5861                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5862                          bp->e1hov);
5863         }
5864
5865         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5866         max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
5867                                    SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
5868         for_each_queue(bp, i) {
5869                 struct bnx2x_fastpath *fp = &bp->fp[i];
5870
5871                 REG_WR(bp, BAR_USTRORM_INTMEM +
5872                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5873                        U64_LO(fp->rx_comp_mapping));
5874                 REG_WR(bp, BAR_USTRORM_INTMEM +
5875                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5876                        U64_HI(fp->rx_comp_mapping));
5877
5878                 /* Next page */
5879                 REG_WR(bp, BAR_USTRORM_INTMEM +
5880                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5881                        U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5882                 REG_WR(bp, BAR_USTRORM_INTMEM +
5883                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5884                        U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5885
5886                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5887                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5888                          max_agg_size);
5889         }
5890
5891         /* dropless flow control */
5892         if (CHIP_IS_E1H(bp)) {
5893                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5894
5895                 rx_pause.bd_thr_low = 250;
5896                 rx_pause.cqe_thr_low = 250;
5897                 rx_pause.cos = 1;
5898                 rx_pause.sge_thr_low = 0;
5899                 rx_pause.bd_thr_high = 350;
5900                 rx_pause.cqe_thr_high = 350;
5901                 rx_pause.sge_thr_high = 0;
5902
5903                 for_each_queue(bp, i) {
5904                         struct bnx2x_fastpath *fp = &bp->fp[i];
5905
5906                         if (!fp->disable_tpa) {
5907                                 rx_pause.sge_thr_low = 150;
5908                                 rx_pause.sge_thr_high = 250;
5909                         }
5910
5911
5912                         offset = BAR_USTRORM_INTMEM +
5913                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5914                                                                    fp->cl_id);
5915                         for (j = 0;
5916                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5917                              j++)
5918                                 REG_WR(bp, offset + j*4,
5919                                        ((u32 *)&rx_pause)[j]);
5920                 }
5921         }
5922
5923         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5924
5925         /* Init rate shaping and fairness contexts */
5926         if (IS_E1HMF(bp)) {
5927                 int vn;
5928
5929                 /* During init there is no active link
5930                    Until link is up, set link rate to 10Gbps */
5931                 bp->link_vars.line_speed = SPEED_10000;
5932                 bnx2x_init_port_minmax(bp);
5933
5934                 if (!BP_NOMCP(bp))
5935                         bp->mf_config =
5936                               SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
5937                 bnx2x_calc_vn_weight_sum(bp);
5938
5939                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5940                         bnx2x_init_vn_minmax(bp, 2*vn + port);
5941
5942                 /* Enable rate shaping and fairness */
5943                 bp->cmng.flags.cmng_enables |=
5944                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5945
5946         } else {
5947                 /* rate shaping and fairness are disabled */
5948                 DP(NETIF_MSG_IFUP,
5949                    "single function mode  minmax will be disabled\n");
5950         }
5951
5952
5953         /* Store cmng structures to internal memory */
5954         if (bp->port.pmf)
5955                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5956                         REG_WR(bp, BAR_XSTRORM_INTMEM +
5957                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5958                                ((u32 *)(&bp->cmng))[i]);
5959 }
5960
5961 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5962 {
5963         switch (load_code) {
5964         case FW_MSG_CODE_DRV_LOAD_COMMON:
5965                 bnx2x_init_internal_common(bp);
5966                 /* no break */
5967
5968         case FW_MSG_CODE_DRV_LOAD_PORT:
5969                 bnx2x_init_internal_port(bp);
5970                 /* no break */
5971
5972         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5973                 bnx2x_init_internal_func(bp);
5974                 break;
5975
5976         default:
5977                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5978                 break;
5979         }
5980 }
5981
5982 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5983 {
5984         int i;
5985
5986         for_each_queue(bp, i) {
5987                 struct bnx2x_fastpath *fp = &bp->fp[i];
5988
5989                 fp->bp = bp;
5990                 fp->state = BNX2X_FP_STATE_CLOSED;
5991                 fp->index = i;
5992                 fp->cl_id = BP_L_ID(bp) + i;
5993 #ifdef BCM_CNIC
5994                 fp->sb_id = fp->cl_id + 1;
5995 #else
5996                 fp->sb_id = fp->cl_id;
5997 #endif
5998                 DP(NETIF_MSG_IFUP,
5999                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
6000                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
6001                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
6002                               fp->sb_id);
6003                 bnx2x_update_fpsb_idx(fp);
6004         }
6005
6006         /* ensure status block indices were read */
6007         rmb();
6008
6009
6010         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
6011                           DEF_SB_ID);
6012         bnx2x_update_dsb_idx(bp);
6013         bnx2x_update_coalesce(bp);
6014         bnx2x_init_rx_rings(bp);
6015         bnx2x_init_tx_ring(bp);
6016         bnx2x_init_sp_ring(bp);
6017         bnx2x_init_context(bp);
6018         bnx2x_init_internal(bp, load_code);
6019         bnx2x_init_ind_table(bp);
6020         bnx2x_stats_init(bp);
6021
6022         /* At this point, we are ready for interrupts */
6023         atomic_set(&bp->intr_sem, 0);
6024
6025         /* flush all before enabling interrupts */
6026         mb();
6027         mmiowb();
6028
6029         bnx2x_int_enable(bp);
6030
6031         /* Check for SPIO5 */
6032         bnx2x_attn_int_deasserted0(bp,
6033                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
6034                                    AEU_INPUTS_ATTN_BITS_SPIO5);
6035 }
6036
6037 /* end of nic init */
6038
6039 /*
6040  * gzip service functions
6041  */
6042
6043 static int bnx2x_gunzip_init(struct bnx2x *bp)
6044 {
6045         bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
6046                                             &bp->gunzip_mapping, GFP_KERNEL);
6047         if (bp->gunzip_buf  == NULL)
6048                 goto gunzip_nomem1;
6049
6050         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
6051         if (bp->strm  == NULL)
6052                 goto gunzip_nomem2;
6053
6054         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
6055                                       GFP_KERNEL);
6056         if (bp->strm->workspace == NULL)
6057                 goto gunzip_nomem3;
6058
6059         return 0;
6060
6061 gunzip_nomem3:
6062         kfree(bp->strm);
6063         bp->strm = NULL;
6064
6065 gunzip_nomem2:
6066         dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6067                           bp->gunzip_mapping);
6068         bp->gunzip_buf = NULL;
6069
6070 gunzip_nomem1:
6071         netdev_err(bp->dev, "Cannot allocate firmware buffer for"
6072                " un-compression\n");
6073         return -ENOMEM;
6074 }
6075
6076 static void bnx2x_gunzip_end(struct bnx2x *bp)
6077 {
6078         kfree(bp->strm->workspace);
6079
6080         kfree(bp->strm);
6081         bp->strm = NULL;
6082
6083         if (bp->gunzip_buf) {
6084                 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6085                                   bp->gunzip_mapping);
6086                 bp->gunzip_buf = NULL;
6087         }
6088 }
6089
6090 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
6091 {
6092         int n, rc;
6093
6094         /* check gzip header */
6095         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
6096                 BNX2X_ERR("Bad gzip header\n");
6097                 return -EINVAL;
6098         }
6099
6100         n = 10;
6101
6102 #define FNAME                           0x8
6103
6104         if (zbuf[3] & FNAME)
6105                 while ((zbuf[n++] != 0) && (n < len));
6106
6107         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
6108         bp->strm->avail_in = len - n;
6109         bp->strm->next_out = bp->gunzip_buf;
6110         bp->strm->avail_out = FW_BUF_SIZE;
6111
6112         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
6113         if (rc != Z_OK)
6114                 return rc;
6115
6116         rc = zlib_inflate(bp->strm, Z_FINISH);
6117         if ((rc != Z_OK) && (rc != Z_STREAM_END))
6118                 netdev_err(bp->dev, "Firmware decompression error: %s\n",
6119                            bp->strm->msg);
6120
6121         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
6122         if (bp->gunzip_outlen & 0x3)
6123                 netdev_err(bp->dev, "Firmware decompression error:"
6124                                     " gunzip_outlen (%d) not aligned\n",
6125                                 bp->gunzip_outlen);
6126         bp->gunzip_outlen >>= 2;
6127
6128         zlib_inflateEnd(bp->strm);
6129
6130         if (rc == Z_STREAM_END)
6131                 return 0;
6132
6133         return rc;
6134 }
6135
6136 /* nic load/unload */
6137
6138 /*
6139  * General service functions
6140  */
6141
6142 /* send a NIG loopback debug packet */
6143 static void bnx2x_lb_pckt(struct bnx2x *bp)
6144 {
6145         u32 wb_write[3];
6146
6147         /* Ethernet source and destination addresses */
6148         wb_write[0] = 0x55555555;
6149         wb_write[1] = 0x55555555;
6150         wb_write[2] = 0x20;             /* SOP */
6151         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6152
6153         /* NON-IP protocol */
6154         wb_write[0] = 0x09000000;
6155         wb_write[1] = 0x55555555;
6156         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
6157         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6158 }
6159
6160 /* some of the internal memories
6161  * are not directly readable from the driver
6162  * to test them we send debug packets
6163  */
6164 static int bnx2x_int_mem_test(struct bnx2x *bp)
6165 {
6166         int factor;
6167         int count, i;
6168         u32 val = 0;
6169
6170         if (CHIP_REV_IS_FPGA(bp))
6171                 factor = 120;
6172         else if (CHIP_REV_IS_EMUL(bp))
6173                 factor = 200;
6174         else
6175                 factor = 1;
6176
6177         DP(NETIF_MSG_HW, "start part1\n");
6178
6179         /* Disable inputs of parser neighbor blocks */
6180         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6181         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6182         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6183         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6184
6185         /*  Write 0 to parser credits for CFC search request */
6186         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6187
6188         /* send Ethernet packet */
6189         bnx2x_lb_pckt(bp);
6190
6191         /* TODO do i reset NIG statistic? */
6192         /* Wait until NIG register shows 1 packet of size 0x10 */
6193         count = 1000 * factor;
6194         while (count) {
6195
6196                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6197                 val = *bnx2x_sp(bp, wb_data[0]);
6198                 if (val == 0x10)
6199                         break;
6200
6201                 msleep(10);
6202                 count--;
6203         }
6204         if (val != 0x10) {
6205                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
6206                 return -1;
6207         }
6208
6209         /* Wait until PRS register shows 1 packet */
6210         count = 1000 * factor;
6211         while (count) {
6212                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6213                 if (val == 1)
6214                         break;
6215
6216                 msleep(10);
6217                 count--;
6218         }
6219         if (val != 0x1) {
6220                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6221                 return -2;
6222         }
6223
6224         /* Reset and init BRB, PRS */
6225         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6226         msleep(50);
6227         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6228         msleep(50);
6229         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6230         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6231
6232         DP(NETIF_MSG_HW, "part2\n");
6233
6234         /* Disable inputs of parser neighbor blocks */
6235         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6236         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6237         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6238         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6239
6240         /* Write 0 to parser credits for CFC search request */
6241         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6242
6243         /* send 10 Ethernet packets */
6244         for (i = 0; i < 10; i++)
6245                 bnx2x_lb_pckt(bp);
6246
6247         /* Wait until NIG register shows 10 + 1
6248            packets of size 11*0x10 = 0xb0 */
6249         count = 1000 * factor;
6250         while (count) {
6251
6252                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6253                 val = *bnx2x_sp(bp, wb_data[0]);
6254                 if (val == 0xb0)
6255                         break;
6256
6257                 msleep(10);
6258                 count--;
6259         }
6260         if (val != 0xb0) {
6261                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
6262                 return -3;
6263         }
6264
6265         /* Wait until PRS register shows 2 packets */
6266         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6267         if (val != 2)
6268                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
6269
6270         /* Write 1 to parser credits for CFC search request */
6271         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6272
6273         /* Wait until PRS register shows 3 packets */
6274         msleep(10 * factor);
6275         /* Wait until NIG register shows 1 packet of size 0x10 */
6276         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6277         if (val != 3)
6278                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
6279
6280         /* clear NIG EOP FIFO */
6281         for (i = 0; i < 11; i++)
6282                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6283         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6284         if (val != 1) {
6285                 BNX2X_ERR("clear of NIG failed\n");
6286                 return -4;
6287         }
6288
6289         /* Reset and init BRB, PRS, NIG */
6290         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6291         msleep(50);
6292         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6293         msleep(50);
6294         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6295         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6296 #ifndef BCM_CNIC
6297         /* set NIC mode */
6298         REG_WR(bp, PRS_REG_NIC_MODE, 1);
6299 #endif
6300
6301         /* Enable inputs of parser neighbor blocks */
6302         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6303         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6304         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
6305         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
6306
6307         DP(NETIF_MSG_HW, "done\n");
6308
6309         return 0; /* OK */
6310 }
6311
6312 static void enable_blocks_attention(struct bnx2x *bp)
6313 {
6314         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6315         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6316         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6317         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6318         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6319         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6320         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6321         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6322         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
6323 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
6324 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
6325         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6326         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6327         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
6328 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
6329 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
6330         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6331         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6332         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6333         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
6334 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
6335 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
6336         if (CHIP_REV_IS_FPGA(bp))
6337                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
6338         else
6339                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
6340         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6341         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6342         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
6343 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
6344 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
6345         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6346         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
6347 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
6348         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
6349 }
6350
6351 static const struct {
6352         u32 addr;
6353         u32 mask;
6354 } bnx2x_parity_mask[] = {
6355         {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
6356         {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
6357         {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
6358         {HC_REG_HC_PRTY_MASK, 0xffffffff},
6359         {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
6360         {QM_REG_QM_PRTY_MASK, 0x0},
6361         {DORQ_REG_DORQ_PRTY_MASK, 0x0},
6362         {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
6363         {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
6364         {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
6365         {CDU_REG_CDU_PRTY_MASK, 0x0},
6366         {CFC_REG_CFC_PRTY_MASK, 0x0},
6367         {DBG_REG_DBG_PRTY_MASK, 0x0},
6368         {DMAE_REG_DMAE_PRTY_MASK, 0x0},
6369         {BRB1_REG_BRB1_PRTY_MASK, 0x0},
6370         {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
6371         {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
6372         {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
6373         {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
6374         {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
6375         {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
6376         {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
6377         {USEM_REG_USEM_PRTY_MASK_0, 0x0},
6378         {USEM_REG_USEM_PRTY_MASK_1, 0x0},
6379         {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
6380         {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
6381         {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
6382         {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
6383 };
6384
6385 static void enable_blocks_parity(struct bnx2x *bp)
6386 {
6387         int i, mask_arr_len =
6388                 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
6389
6390         for (i = 0; i < mask_arr_len; i++)
6391                 REG_WR(bp, bnx2x_parity_mask[i].addr,
6392                         bnx2x_parity_mask[i].mask);
6393 }
6394
6395
6396 static void bnx2x_reset_common(struct bnx2x *bp)
6397 {
6398         /* reset_common */
6399         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6400                0xd3ffff7f);
6401         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6402 }
6403
6404 static void bnx2x_init_pxp(struct bnx2x *bp)
6405 {
6406         u16 devctl;
6407         int r_order, w_order;
6408
6409         pci_read_config_word(bp->pdev,
6410                              bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
6411         DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6412         w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6413         if (bp->mrrs == -1)
6414                 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6415         else {
6416                 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6417                 r_order = bp->mrrs;
6418         }
6419
6420         bnx2x_init_pxp_arb(bp, r_order, w_order);
6421 }
6422
6423 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6424 {
6425         int is_required;
6426         u32 val;
6427         int port;
6428
6429         if (BP_NOMCP(bp))
6430                 return;
6431
6432         is_required = 0;
6433         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6434               SHARED_HW_CFG_FAN_FAILURE_MASK;
6435
6436         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6437                 is_required = 1;
6438
6439         /*
6440          * The fan failure mechanism is usually related to the PHY type since
6441          * the power consumption of the board is affected by the PHY. Currently,
6442          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6443          */
6444         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6445                 for (port = PORT_0; port < PORT_MAX; port++) {
6446                         u32 phy_type =
6447                                 SHMEM_RD(bp, dev_info.port_hw_config[port].
6448                                          external_phy_config) &
6449                                 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6450                         is_required |=
6451                                 ((phy_type ==
6452                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
6453                                  (phy_type ==
6454                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
6455                                  (phy_type ==
6456                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6457                 }
6458
6459         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6460
6461         if (is_required == 0)
6462                 return;
6463
6464         /* Fan failure is indicated by SPIO 5 */
6465         bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6466                        MISC_REGISTERS_SPIO_INPUT_HI_Z);
6467
6468         /* set to active low mode */
6469         val = REG_RD(bp, MISC_REG_SPIO_INT);
6470         val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6471                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6472         REG_WR(bp, MISC_REG_SPIO_INT, val);
6473
6474         /* enable interrupt to signal the IGU */
6475         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6476         val |= (1 << MISC_REGISTERS_SPIO_5);
6477         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6478 }
6479
6480 static int bnx2x_init_common(struct bnx2x *bp)
6481 {
6482         u32 val, i;
6483 #ifdef BCM_CNIC
6484         u32 wb_write[2];
6485 #endif
6486
6487         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
6488
6489         bnx2x_reset_common(bp);
6490         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6491         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
6492
6493         bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
6494         if (CHIP_IS_E1H(bp))
6495                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
6496
6497         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6498         msleep(30);
6499         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6500
6501         bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
6502         if (CHIP_IS_E1(bp)) {
6503                 /* enable HW interrupt from PXP on USDM overflow
6504                    bit 16 on INT_MASK_0 */
6505                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6506         }
6507
6508         bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
6509         bnx2x_init_pxp(bp);
6510
6511 #ifdef __BIG_ENDIAN
6512         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6513         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6514         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6515         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6516         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6517         /* make sure this value is 0 */
6518         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6519
6520 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6521         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6522         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6523         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6524         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6525 #endif
6526
6527         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
6528 #ifdef BCM_CNIC
6529         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6530         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6531         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6532 #endif
6533
6534         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6535                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6536
6537         /* let the HW do it's magic ... */
6538         msleep(100);
6539         /* finish PXP init */
6540         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6541         if (val != 1) {
6542                 BNX2X_ERR("PXP2 CFG failed\n");
6543                 return -EBUSY;
6544         }
6545         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6546         if (val != 1) {
6547                 BNX2X_ERR("PXP2 RD_INIT failed\n");
6548                 return -EBUSY;
6549         }
6550
6551         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6552         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6553
6554         bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
6555
6556         /* clean the DMAE memory */
6557         bp->dmae_ready = 1;
6558         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6559
6560         bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6561         bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6562         bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6563         bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
6564
6565         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6566         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6567         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6568         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6569
6570         bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
6571
6572 #ifdef BCM_CNIC
6573         wb_write[0] = 0;
6574         wb_write[1] = 0;
6575         for (i = 0; i < 64; i++) {
6576                 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6577                 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6578
6579                 if (CHIP_IS_E1H(bp)) {
6580                         REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6581                         bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6582                                           wb_write, 2);
6583                 }
6584         }
6585 #endif
6586         /* soft reset pulse */
6587         REG_WR(bp, QM_REG_SOFT_RESET, 1);
6588         REG_WR(bp, QM_REG_SOFT_RESET, 0);
6589
6590 #ifdef BCM_CNIC
6591         bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
6592 #endif
6593
6594         bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
6595         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6596         if (!CHIP_REV_IS_SLOW(bp)) {
6597                 /* enable hw interrupt from doorbell Q */
6598                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6599         }
6600
6601         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6602         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6603         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6604 #ifndef BCM_CNIC
6605         /* set NIC mode */
6606         REG_WR(bp, PRS_REG_NIC_MODE, 1);
6607 #endif
6608         if (CHIP_IS_E1H(bp))
6609                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
6610
6611         bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6612         bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6613         bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6614         bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
6615
6616         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6617         bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6618         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6619         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6620
6621         bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6622         bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6623         bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6624         bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
6625
6626         /* sync semi rtc */
6627         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6628                0x80000000);
6629         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6630                0x80000000);
6631
6632         bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6633         bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6634         bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
6635
6636         REG_WR(bp, SRC_REG_SOFT_RST, 1);
6637         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
6638                 REG_WR(bp, i, random32());
6639         bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6640 #ifdef BCM_CNIC
6641         REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6642         REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6643         REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6644         REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6645         REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6646         REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6647         REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6648         REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6649         REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6650         REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6651 #endif
6652         REG_WR(bp, SRC_REG_SOFT_RST, 0);
6653
6654         if (sizeof(union cdu_context) != 1024)
6655                 /* we currently assume that a context is 1024 bytes */
6656                 dev_alert(&bp->pdev->dev, "please adjust the size "
6657                                           "of cdu_context(%ld)\n",
6658                          (long)sizeof(union cdu_context));
6659
6660         bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
6661         val = (4 << 24) + (0 << 12) + 1024;
6662         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6663
6664         bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
6665         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6666         /* enable context validation interrupt from CFC */
6667         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6668
6669         /* set the thresholds to prevent CFC/CDU race */
6670         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6671
6672         bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6673         bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
6674
6675         bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
6676         /* Reset PCIE errors for debug */
6677         REG_WR(bp, 0x2814, 0xffffffff);
6678         REG_WR(bp, 0x3820, 0xffffffff);
6679
6680         bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
6681         bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
6682         bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
6683         bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
6684
6685         bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
6686         if (CHIP_IS_E1H(bp)) {
6687                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6688                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6689         }
6690
6691         if (CHIP_REV_IS_SLOW(bp))
6692                 msleep(200);
6693
6694         /* finish CFC init */
6695         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6696         if (val != 1) {
6697                 BNX2X_ERR("CFC LL_INIT failed\n");
6698                 return -EBUSY;
6699         }
6700         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6701         if (val != 1) {
6702                 BNX2X_ERR("CFC AC_INIT failed\n");
6703                 return -EBUSY;
6704         }
6705         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6706         if (val != 1) {
6707                 BNX2X_ERR("CFC CAM_INIT failed\n");
6708                 return -EBUSY;
6709         }
6710         REG_WR(bp, CFC_REG_DEBUG0, 0);
6711
6712         /* read NIG statistic
6713            to see if this is our first up since powerup */
6714         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6715         val = *bnx2x_sp(bp, wb_data[0]);
6716
6717         /* do internal memory self test */
6718         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6719                 BNX2X_ERR("internal mem self test failed\n");
6720                 return -EBUSY;
6721         }
6722
6723         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6724         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6725         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6726         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6727         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6728                 bp->port.need_hw_lock = 1;
6729                 break;
6730
6731         default:
6732                 break;
6733         }
6734
6735         bnx2x_setup_fan_failure_detection(bp);
6736
6737         /* clear PXP2 attentions */
6738         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6739
6740         enable_blocks_attention(bp);
6741         if (CHIP_PARITY_SUPPORTED(bp))
6742                 enable_blocks_parity(bp);
6743
6744         if (!BP_NOMCP(bp)) {
6745                 bnx2x_acquire_phy_lock(bp);
6746                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6747                 bnx2x_release_phy_lock(bp);
6748         } else
6749                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6750
6751         return 0;
6752 }
6753
6754 static int bnx2x_init_port(struct bnx2x *bp)
6755 {
6756         int port = BP_PORT(bp);
6757         int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
6758         u32 low, high;
6759         u32 val;
6760
6761         DP(BNX2X_MSG_MCP, "starting port init  port %d\n", port);
6762
6763         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6764
6765         bnx2x_init_block(bp, PXP_BLOCK, init_stage);
6766         bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
6767
6768         bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6769         bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6770         bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6771         bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6772
6773 #ifdef BCM_CNIC
6774         REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
6775
6776         bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6777         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6778         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
6779 #endif
6780
6781         bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6782
6783         bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
6784         if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6785                 /* no pause for emulation and FPGA */
6786                 low = 0;
6787                 high = 513;
6788         } else {
6789                 if (IS_E1HMF(bp))
6790                         low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6791                 else if (bp->dev->mtu > 4096) {
6792                         if (bp->flags & ONE_PORT_FLAG)
6793                                 low = 160;
6794                         else {
6795                                 val = bp->dev->mtu;
6796                                 /* (24*1024 + val*4)/256 */
6797                                 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6798                         }
6799                 } else
6800                         low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6801                 high = low + 56;        /* 14*1024/256 */
6802         }
6803         REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6804         REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6805
6806
6807         bnx2x_init_block(bp, PRS_BLOCK, init_stage);
6808
6809         bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
6810         bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
6811         bnx2x_init_block(bp, USDM_BLOCK, init_stage);
6812         bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
6813
6814         bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6815         bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6816         bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6817         bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
6818
6819         bnx2x_init_block(bp, UPB_BLOCK, init_stage);
6820         bnx2x_init_block(bp, XPB_BLOCK, init_stage);
6821
6822         bnx2x_init_block(bp, PBF_BLOCK, init_stage);
6823
6824         /* configure PBF to work without PAUSE mtu 9000 */
6825         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6826
6827         /* update threshold */
6828         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6829         /* update init credit */
6830         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6831
6832         /* probe changes */
6833         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6834         msleep(5);
6835         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6836
6837 #ifdef BCM_CNIC
6838         bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
6839 #endif
6840         bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6841         bnx2x_init_block(bp, CFC_BLOCK, init_stage);
6842
6843         if (CHIP_IS_E1(bp)) {
6844                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6845                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6846         }
6847         bnx2x_init_block(bp, HC_BLOCK, init_stage);
6848
6849         bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6850         /* init aeu_mask_attn_func_0/1:
6851          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6852          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6853          *             bits 4-7 are used for "per vn group attention" */
6854         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6855                (IS_E1HMF(bp) ? 0xF7 : 0x7));
6856
6857         bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6858         bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6859         bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6860         bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6861         bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6862
6863         bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6864
6865         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6866
6867         if (CHIP_IS_E1H(bp)) {
6868                 /* 0x2 disable e1hov, 0x1 enable */
6869                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6870                        (IS_E1HMF(bp) ? 0x1 : 0x2));
6871
6872                 {
6873                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6874                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6875                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6876                 }
6877         }
6878
6879         bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6880         bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6881
6882         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6883         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6884                 {
6885                 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6886
6887                 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6888                                MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6889
6890                 /* The GPIO should be swapped if the swap register is
6891                    set and active */
6892                 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6893                 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6894
6895                 /* Select function upon port-swap configuration */
6896                 if (port == 0) {
6897                         offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6898                         aeu_gpio_mask = (swap_val && swap_override) ?
6899                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6900                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6901                 } else {
6902                         offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6903                         aeu_gpio_mask = (swap_val && swap_override) ?
6904                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6905                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6906                 }
6907                 val = REG_RD(bp, offset);
6908                 /* add GPIO3 to group */
6909                 val |= aeu_gpio_mask;
6910                 REG_WR(bp, offset, val);
6911                 }
6912                 break;
6913
6914         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6915         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6916                 /* add SPIO 5 to group 0 */
6917                 {
6918                 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6919                                        MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6920                 val = REG_RD(bp, reg_addr);
6921                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6922                 REG_WR(bp, reg_addr, val);
6923                 }
6924                 break;
6925
6926         default:
6927                 break;
6928         }
6929
6930         bnx2x__link_reset(bp);
6931
6932         return 0;
6933 }
6934
6935 #define ILT_PER_FUNC            (768/2)
6936 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
6937 /* the phys address is shifted right 12 bits and has an added
6938    1=valid bit added to the 53rd bit
6939    then since this is a wide register(TM)
6940    we split it into two 32 bit writes
6941  */
6942 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6943 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
6944 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
6945 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
6946
6947 #ifdef BCM_CNIC
6948 #define CNIC_ILT_LINES          127
6949 #define CNIC_CTX_PER_ILT        16
6950 #else
6951 #define CNIC_ILT_LINES          0
6952 #endif
6953
6954 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6955 {
6956         int reg;
6957
6958         if (CHIP_IS_E1H(bp))
6959                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6960         else /* E1 */
6961                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6962
6963         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6964 }
6965
6966 static int bnx2x_init_func(struct bnx2x *bp)
6967 {
6968         int port = BP_PORT(bp);
6969         int func = BP_FUNC(bp);
6970         u32 addr, val;
6971         int i;
6972
6973         DP(BNX2X_MSG_MCP, "starting func init  func %d\n", func);
6974
6975         /* set MSI reconfigure capability */
6976         addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6977         val = REG_RD(bp, addr);
6978         val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6979         REG_WR(bp, addr, val);
6980
6981         i = FUNC_ILT_BASE(func);
6982
6983         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6984         if (CHIP_IS_E1H(bp)) {
6985                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6986                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6987         } else /* E1 */
6988                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6989                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6990
6991 #ifdef BCM_CNIC
6992         i += 1 + CNIC_ILT_LINES;
6993         bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6994         if (CHIP_IS_E1(bp))
6995                 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6996         else {
6997                 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6998                 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6999         }
7000
7001         i++;
7002         bnx2x_ilt_wr(bp, i, bp->qm_mapping);
7003         if (CHIP_IS_E1(bp))
7004                 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
7005         else {
7006                 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
7007                 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
7008         }
7009
7010         i++;
7011         bnx2x_ilt_wr(bp, i, bp->t1_mapping);
7012         if (CHIP_IS_E1(bp))
7013                 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
7014         else {
7015                 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
7016                 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
7017         }
7018
7019         /* tell the searcher where the T2 table is */
7020         REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
7021
7022         bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
7023                     U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
7024
7025         bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
7026                     U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
7027                     U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
7028
7029         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
7030 #endif
7031
7032         if (CHIP_IS_E1H(bp)) {
7033                 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
7034                 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
7035                 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
7036                 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
7037                 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
7038                 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
7039                 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
7040                 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
7041                 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
7042
7043                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
7044                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
7045         }
7046
7047         /* HC init per function */
7048         if (CHIP_IS_E1H(bp)) {
7049                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7050
7051                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7052                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7053         }
7054         bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
7055
7056         /* Reset PCIE errors for debug */
7057         REG_WR(bp, 0x2114, 0xffffffff);
7058         REG_WR(bp, 0x2120, 0xffffffff);
7059
7060         return 0;
7061 }
7062
7063 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
7064 {
7065         int i, rc = 0;
7066
7067         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
7068            BP_FUNC(bp), load_code);
7069
7070         bp->dmae_ready = 0;
7071         mutex_init(&bp->dmae_mutex);
7072         rc = bnx2x_gunzip_init(bp);
7073         if (rc)
7074                 return rc;
7075
7076         switch (load_code) {
7077         case FW_MSG_CODE_DRV_LOAD_COMMON:
7078                 rc = bnx2x_init_common(bp);
7079                 if (rc)
7080                         goto init_hw_err;
7081                 /* no break */
7082
7083         case FW_MSG_CODE_DRV_LOAD_PORT:
7084                 bp->dmae_ready = 1;
7085                 rc = bnx2x_init_port(bp);
7086                 if (rc)
7087                         goto init_hw_err;
7088                 /* no break */
7089
7090         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
7091                 bp->dmae_ready = 1;
7092                 rc = bnx2x_init_func(bp);
7093                 if (rc)
7094                         goto init_hw_err;
7095                 break;
7096
7097         default:
7098                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
7099                 break;
7100         }
7101
7102         if (!BP_NOMCP(bp)) {
7103                 int func = BP_FUNC(bp);
7104
7105                 bp->fw_drv_pulse_wr_seq =
7106                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
7107                                  DRV_PULSE_SEQ_MASK);
7108                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
7109         }
7110
7111         /* this needs to be done before gunzip end */
7112         bnx2x_zero_def_sb(bp);
7113         for_each_queue(bp, i)
7114                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
7115 #ifdef BCM_CNIC
7116         bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
7117 #endif
7118
7119 init_hw_err:
7120         bnx2x_gunzip_end(bp);
7121
7122         return rc;
7123 }
7124
7125 static void bnx2x_free_mem(struct bnx2x *bp)
7126 {
7127
7128 #define BNX2X_PCI_FREE(x, y, size) \
7129         do { \
7130                 if (x) { \
7131                         dma_free_coherent(&bp->pdev->dev, size, x, y); \
7132                         x = NULL; \
7133                         y = 0; \
7134                 } \
7135         } while (0)
7136
7137 #define BNX2X_FREE(x) \
7138         do { \
7139                 if (x) { \
7140                         vfree(x); \
7141                         x = NULL; \
7142                 } \
7143         } while (0)
7144
7145         int i;
7146
7147         /* fastpath */
7148         /* Common */
7149         for_each_queue(bp, i) {
7150
7151                 /* status blocks */
7152                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
7153                                bnx2x_fp(bp, i, status_blk_mapping),
7154                                sizeof(struct host_status_block));
7155         }
7156         /* Rx */
7157         for_each_queue(bp, i) {
7158
7159                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
7160                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
7161                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
7162                                bnx2x_fp(bp, i, rx_desc_mapping),
7163                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
7164
7165                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
7166                                bnx2x_fp(bp, i, rx_comp_mapping),
7167                                sizeof(struct eth_fast_path_rx_cqe) *
7168                                NUM_RCQ_BD);
7169
7170                 /* SGE ring */
7171                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7172                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
7173                                bnx2x_fp(bp, i, rx_sge_mapping),
7174                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
7175         }
7176         /* Tx */
7177         for_each_queue(bp, i) {
7178
7179                 /* fastpath tx rings: tx_buf tx_desc */
7180                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
7181                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
7182                                bnx2x_fp(bp, i, tx_desc_mapping),
7183                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
7184         }
7185         /* end of fastpath */
7186
7187         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
7188                        sizeof(struct host_def_status_block));
7189
7190         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
7191                        sizeof(struct bnx2x_slowpath));
7192
7193 #ifdef BCM_CNIC
7194         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
7195         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
7196         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
7197         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
7198         BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
7199                        sizeof(struct host_status_block));
7200 #endif
7201         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
7202
7203 #undef BNX2X_PCI_FREE
7204 #undef BNX2X_KFREE
7205 }
7206
7207 static int bnx2x_alloc_mem(struct bnx2x *bp)
7208 {
7209
7210 #define BNX2X_PCI_ALLOC(x, y, size) \
7211         do { \
7212                 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
7213                 if (x == NULL) \
7214                         goto alloc_mem_err; \
7215                 memset(x, 0, size); \
7216         } while (0)
7217
7218 #define BNX2X_ALLOC(x, size) \
7219         do { \
7220                 x = vmalloc(size); \
7221                 if (x == NULL) \
7222                         goto alloc_mem_err; \
7223                 memset(x, 0, size); \
7224         } while (0)
7225
7226         int i;
7227
7228         /* fastpath */
7229         /* Common */
7230         for_each_queue(bp, i) {
7231                 bnx2x_fp(bp, i, bp) = bp;
7232
7233                 /* status blocks */
7234                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
7235                                 &bnx2x_fp(bp, i, status_blk_mapping),
7236                                 sizeof(struct host_status_block));
7237         }
7238         /* Rx */
7239         for_each_queue(bp, i) {
7240
7241                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
7242                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
7243                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
7244                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
7245                                 &bnx2x_fp(bp, i, rx_desc_mapping),
7246                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
7247
7248                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
7249                                 &bnx2x_fp(bp, i, rx_comp_mapping),
7250                                 sizeof(struct eth_fast_path_rx_cqe) *
7251                                 NUM_RCQ_BD);
7252
7253                 /* SGE ring */
7254                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
7255                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
7256                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
7257                                 &bnx2x_fp(bp, i, rx_sge_mapping),
7258                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
7259         }
7260         /* Tx */
7261         for_each_queue(bp, i) {
7262
7263                 /* fastpath tx rings: tx_buf tx_desc */
7264                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
7265                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
7266                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
7267                                 &bnx2x_fp(bp, i, tx_desc_mapping),
7268                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
7269         }
7270         /* end of fastpath */
7271
7272         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
7273                         sizeof(struct host_def_status_block));
7274
7275         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
7276                         sizeof(struct bnx2x_slowpath));
7277
7278 #ifdef BCM_CNIC
7279         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
7280
7281         /* allocate searcher T2 table
7282            we allocate 1/4 of alloc num for T2
7283           (which is not entered into the ILT) */
7284         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
7285
7286         /* Initialize T2 (for 1024 connections) */
7287         for (i = 0; i < 16*1024; i += 64)
7288                 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
7289
7290         /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
7291         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
7292
7293         /* QM queues (128*MAX_CONN) */
7294         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
7295
7296         BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
7297                         sizeof(struct host_status_block));
7298 #endif
7299
7300         /* Slow path ring */
7301         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
7302
7303         return 0;
7304
7305 alloc_mem_err:
7306         bnx2x_free_mem(bp);
7307         return -ENOMEM;
7308
7309 #undef BNX2X_PCI_ALLOC
7310 #undef BNX2X_ALLOC
7311 }
7312
7313 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
7314 {
7315         int i;
7316
7317         for_each_queue(bp, i) {
7318                 struct bnx2x_fastpath *fp = &bp->fp[i];
7319
7320                 u16 bd_cons = fp->tx_bd_cons;
7321                 u16 sw_prod = fp->tx_pkt_prod;
7322                 u16 sw_cons = fp->tx_pkt_cons;
7323
7324                 while (sw_cons != sw_prod) {
7325                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
7326                         sw_cons++;
7327                 }
7328         }
7329 }
7330
7331 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
7332 {
7333         int i, j;
7334
7335         for_each_queue(bp, j) {
7336                 struct bnx2x_fastpath *fp = &bp->fp[j];
7337
7338                 for (i = 0; i < NUM_RX_BD; i++) {
7339                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
7340                         struct sk_buff *skb = rx_buf->skb;
7341
7342                         if (skb == NULL)
7343                                 continue;
7344
7345                         dma_unmap_single(&bp->pdev->dev,
7346                                          dma_unmap_addr(rx_buf, mapping),
7347                                          bp->rx_buf_size, DMA_FROM_DEVICE);
7348
7349                         rx_buf->skb = NULL;
7350                         dev_kfree_skb(skb);
7351                 }
7352                 if (!fp->disable_tpa)
7353                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
7354                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
7355                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
7356         }
7357 }
7358
7359 static void bnx2x_free_skbs(struct bnx2x *bp)
7360 {
7361         bnx2x_free_tx_skbs(bp);
7362         bnx2x_free_rx_skbs(bp);
7363 }
7364
7365 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
7366 {
7367         int i, offset = 1;
7368
7369         free_irq(bp->msix_table[0].vector, bp->dev);
7370         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
7371            bp->msix_table[0].vector);
7372
7373 #ifdef BCM_CNIC
7374         offset++;
7375 #endif
7376         for_each_queue(bp, i) {
7377                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
7378                    "state %x\n", i, bp->msix_table[i + offset].vector,
7379                    bnx2x_fp(bp, i, state));
7380
7381                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
7382         }
7383 }
7384
7385 static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
7386 {
7387         if (bp->flags & USING_MSIX_FLAG) {
7388                 if (!disable_only)
7389                         bnx2x_free_msix_irqs(bp);
7390                 pci_disable_msix(bp->pdev);
7391                 bp->flags &= ~USING_MSIX_FLAG;
7392
7393         } else if (bp->flags & USING_MSI_FLAG) {
7394                 if (!disable_only)
7395                         free_irq(bp->pdev->irq, bp->dev);
7396                 pci_disable_msi(bp->pdev);
7397                 bp->flags &= ~USING_MSI_FLAG;
7398
7399         } else if (!disable_only)
7400                 free_irq(bp->pdev->irq, bp->dev);
7401 }
7402
7403 static int bnx2x_enable_msix(struct bnx2x *bp)
7404 {
7405         int i, rc, offset = 1;
7406         int igu_vec = 0;
7407
7408         bp->msix_table[0].entry = igu_vec;
7409         DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
7410
7411 #ifdef BCM_CNIC
7412         igu_vec = BP_L_ID(bp) + offset;
7413         bp->msix_table[1].entry = igu_vec;
7414         DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
7415         offset++;
7416 #endif
7417         for_each_queue(bp, i) {
7418                 igu_vec = BP_L_ID(bp) + offset + i;
7419                 bp->msix_table[i + offset].entry = igu_vec;
7420                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
7421                    "(fastpath #%u)\n", i + offset, igu_vec, i);
7422         }
7423
7424         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
7425                              BNX2X_NUM_QUEUES(bp) + offset);
7426
7427         /*
7428          * reconfigure number of tx/rx queues according to available
7429          * MSI-X vectors
7430          */
7431         if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
7432                 /* vectors available for FP */
7433                 int fp_vec = rc - BNX2X_MSIX_VEC_FP_START;
7434
7435                 DP(NETIF_MSG_IFUP,
7436                    "Trying to use less MSI-X vectors: %d\n", rc);
7437
7438                 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
7439
7440                 if (rc) {
7441                         DP(NETIF_MSG_IFUP,
7442                            "MSI-X is not attainable  rc %d\n", rc);
7443                         return rc;
7444                 }
7445
7446                 bp->num_queues = min(bp->num_queues, fp_vec);
7447
7448                 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
7449                                   bp->num_queues);
7450         } else if (rc) {
7451                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
7452                 return rc;
7453         }
7454
7455         bp->flags |= USING_MSIX_FLAG;
7456
7457         return 0;
7458 }
7459
7460 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7461 {
7462         int i, rc, offset = 1;
7463
7464         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7465                          bp->dev->name, bp->dev);
7466         if (rc) {
7467                 BNX2X_ERR("request sp irq failed\n");
7468                 return -EBUSY;
7469         }
7470
7471 #ifdef BCM_CNIC
7472         offset++;
7473 #endif
7474         for_each_queue(bp, i) {
7475                 struct bnx2x_fastpath *fp = &bp->fp[i];
7476                 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7477                          bp->dev->name, i);
7478
7479                 rc = request_irq(bp->msix_table[i + offset].vector,
7480                                  bnx2x_msix_fp_int, 0, fp->name, fp);
7481                 if (rc) {
7482                         BNX2X_ERR("request fp #%d irq failed  rc %d\n", i, rc);
7483                         bnx2x_free_msix_irqs(bp);
7484                         return -EBUSY;
7485                 }
7486
7487                 fp->state = BNX2X_FP_STATE_IRQ;
7488         }
7489
7490         i = BNX2X_NUM_QUEUES(bp);
7491         netdev_info(bp->dev, "using MSI-X  IRQs: sp %d  fp[%d] %d"
7492                " ... fp[%d] %d\n",
7493                bp->msix_table[0].vector,
7494                0, bp->msix_table[offset].vector,
7495                i - 1, bp->msix_table[offset + i - 1].vector);
7496
7497         return 0;
7498 }
7499
7500 static int bnx2x_enable_msi(struct bnx2x *bp)
7501 {
7502         int rc;
7503
7504         rc = pci_enable_msi(bp->pdev);
7505         if (rc) {
7506                 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7507                 return -1;
7508         }
7509         bp->flags |= USING_MSI_FLAG;
7510
7511         return 0;
7512 }
7513
7514 static int bnx2x_req_irq(struct bnx2x *bp)
7515 {
7516         unsigned long flags;
7517         int rc;
7518
7519         if (bp->flags & USING_MSI_FLAG)
7520                 flags = 0;
7521         else
7522                 flags = IRQF_SHARED;
7523
7524         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
7525                          bp->dev->name, bp->dev);
7526         if (!rc)
7527                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7528
7529         return rc;
7530 }
7531
7532 static void bnx2x_napi_enable(struct bnx2x *bp)
7533 {
7534         int i;
7535
7536         for_each_queue(bp, i)
7537                 napi_enable(&bnx2x_fp(bp, i, napi));
7538 }
7539
7540 static void bnx2x_napi_disable(struct bnx2x *bp)
7541 {
7542         int i;
7543
7544         for_each_queue(bp, i)
7545                 napi_disable(&bnx2x_fp(bp, i, napi));
7546 }
7547
7548 static void bnx2x_netif_start(struct bnx2x *bp)
7549 {
7550         int intr_sem;
7551
7552         intr_sem = atomic_dec_and_test(&bp->intr_sem);
7553         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7554
7555         if (intr_sem) {
7556                 if (netif_running(bp->dev)) {
7557                         bnx2x_napi_enable(bp);
7558                         bnx2x_int_enable(bp);
7559                         if (bp->state == BNX2X_STATE_OPEN)
7560                                 netif_tx_wake_all_queues(bp->dev);
7561                 }
7562         }
7563 }
7564
7565 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
7566 {
7567         bnx2x_int_disable_sync(bp, disable_hw);
7568         bnx2x_napi_disable(bp);
7569         netif_tx_disable(bp->dev);
7570 }
7571
7572 /*
7573  * Init service functions
7574  */
7575
7576 /**
7577  * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7578  *
7579  * @param bp driver descriptor
7580  * @param set set or clear an entry (1 or 0)
7581  * @param mac pointer to a buffer containing a MAC
7582  * @param cl_bit_vec bit vector of clients to register a MAC for
7583  * @param cam_offset offset in a CAM to use
7584  * @param with_bcast set broadcast MAC as well
7585  */
7586 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7587                                       u32 cl_bit_vec, u8 cam_offset,
7588                                       u8 with_bcast)
7589 {
7590         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
7591         int port = BP_PORT(bp);
7592
7593         /* CAM allocation
7594          * unicasts 0-31:port0 32-63:port1
7595          * multicast 64-127:port0 128-191:port1
7596          */
7597         config->hdr.length = 1 + (with_bcast ? 1 : 0);
7598         config->hdr.offset = cam_offset;
7599         config->hdr.client_id = 0xff;
7600         config->hdr.reserved1 = 0;
7601
7602         /* primary MAC */
7603         config->config_table[0].cam_entry.msb_mac_addr =
7604                                         swab16(*(u16 *)&mac[0]);
7605         config->config_table[0].cam_entry.middle_mac_addr =
7606                                         swab16(*(u16 *)&mac[2]);
7607         config->config_table[0].cam_entry.lsb_mac_addr =
7608                                         swab16(*(u16 *)&mac[4]);
7609         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
7610         if (set)
7611                 config->config_table[0].target_table_entry.flags = 0;
7612         else
7613                 CAM_INVALIDATE(config->config_table[0]);
7614         config->config_table[0].target_table_entry.clients_bit_vector =
7615                                                 cpu_to_le32(cl_bit_vec);
7616         config->config_table[0].target_table_entry.vlan_id = 0;
7617
7618         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7619            (set ? "setting" : "clearing"),
7620            config->config_table[0].cam_entry.msb_mac_addr,
7621            config->config_table[0].cam_entry.middle_mac_addr,
7622            config->config_table[0].cam_entry.lsb_mac_addr);
7623
7624         /* broadcast */
7625         if (with_bcast) {
7626                 config->config_table[1].cam_entry.msb_mac_addr =
7627                         cpu_to_le16(0xffff);
7628                 config->config_table[1].cam_entry.middle_mac_addr =
7629                         cpu_to_le16(0xffff);
7630                 config->config_table[1].cam_entry.lsb_mac_addr =
7631                         cpu_to_le16(0xffff);
7632                 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7633                 if (set)
7634                         config->config_table[1].target_table_entry.flags =
7635                                         TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7636                 else
7637                         CAM_INVALIDATE(config->config_table[1]);
7638                 config->config_table[1].target_table_entry.clients_bit_vector =
7639                                                         cpu_to_le32(cl_bit_vec);
7640                 config->config_table[1].target_table_entry.vlan_id = 0;
7641         }
7642
7643         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7644                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7645                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7646 }
7647
7648 /**
7649  * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7650  *
7651  * @param bp driver descriptor
7652  * @param set set or clear an entry (1 or 0)
7653  * @param mac pointer to a buffer containing a MAC
7654  * @param cl_bit_vec bit vector of clients to register a MAC for
7655  * @param cam_offset offset in a CAM to use
7656  */
7657 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7658                                        u32 cl_bit_vec, u8 cam_offset)
7659 {
7660         struct mac_configuration_cmd_e1h *config =
7661                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7662
7663         config->hdr.length = 1;
7664         config->hdr.offset = cam_offset;
7665         config->hdr.client_id = 0xff;
7666         config->hdr.reserved1 = 0;
7667
7668         /* primary MAC */
7669         config->config_table[0].msb_mac_addr =
7670                                         swab16(*(u16 *)&mac[0]);
7671         config->config_table[0].middle_mac_addr =
7672                                         swab16(*(u16 *)&mac[2]);
7673         config->config_table[0].lsb_mac_addr =
7674                                         swab16(*(u16 *)&mac[4]);
7675         config->config_table[0].clients_bit_vector =
7676                                         cpu_to_le32(cl_bit_vec);
7677         config->config_table[0].vlan_id = 0;
7678         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
7679         if (set)
7680                 config->config_table[0].flags = BP_PORT(bp);
7681         else
7682                 config->config_table[0].flags =
7683                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
7684
7685         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID mask %d\n",
7686            (set ? "setting" : "clearing"),
7687            config->config_table[0].msb_mac_addr,
7688            config->config_table[0].middle_mac_addr,
7689            config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
7690
7691         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7692                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7693                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7694 }
7695
7696 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7697                              int *state_p, int poll)
7698 {
7699         /* can take a while if any port is running */
7700         int cnt = 5000;
7701
7702         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7703            poll ? "polling" : "waiting", state, idx);
7704
7705         might_sleep();
7706         while (cnt--) {
7707                 if (poll) {
7708                         bnx2x_rx_int(bp->fp, 10);
7709                         /* if index is different from 0
7710                          * the reply for some commands will
7711                          * be on the non default queue
7712                          */
7713                         if (idx)
7714                                 bnx2x_rx_int(&bp->fp[idx], 10);
7715                 }
7716
7717                 mb(); /* state is changed by bnx2x_sp_event() */
7718                 if (*state_p == state) {
7719 #ifdef BNX2X_STOP_ON_ERROR
7720                         DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
7721 #endif
7722                         return 0;
7723                 }
7724
7725                 msleep(1);
7726
7727                 if (bp->panic)
7728                         return -EIO;
7729         }
7730
7731         /* timeout! */
7732         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7733                   poll ? "polling" : "waiting", state, idx);
7734 #ifdef BNX2X_STOP_ON_ERROR
7735         bnx2x_panic();
7736 #endif
7737
7738         return -EBUSY;
7739 }
7740
7741 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7742 {
7743         bp->set_mac_pending++;
7744         smp_wmb();
7745
7746         bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7747                                    (1 << bp->fp->cl_id), BP_FUNC(bp));
7748
7749         /* Wait for a completion */
7750         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7751 }
7752
7753 static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7754 {
7755         bp->set_mac_pending++;
7756         smp_wmb();
7757
7758         bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7759                                   (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7760                                   1);
7761
7762         /* Wait for a completion */
7763         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7764 }
7765
7766 #ifdef BCM_CNIC
7767 /**
7768  * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7769  * MAC(s). This function will wait until the ramdord completion
7770  * returns.
7771  *
7772  * @param bp driver handle
7773  * @param set set or clear the CAM entry
7774  *
7775  * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7776  */
7777 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7778 {
7779         u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7780
7781         bp->set_mac_pending++;
7782         smp_wmb();
7783
7784         /* Send a SET_MAC ramrod */
7785         if (CHIP_IS_E1(bp))
7786                 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7787                                   cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7788                                   1);
7789         else
7790                 /* CAM allocation for E1H
7791                 * unicasts: by func number
7792                 * multicast: 20+FUNC*20, 20 each
7793                 */
7794                 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7795                                    cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7796
7797         /* Wait for a completion when setting */
7798         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7799
7800         return 0;
7801 }
7802 #endif
7803
7804 static int bnx2x_setup_leading(struct bnx2x *bp)
7805 {
7806         int rc;
7807
7808         /* reset IGU state */
7809         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7810
7811         /* SETUP ramrod */
7812         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7813
7814         /* Wait for completion */
7815         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7816
7817         return rc;
7818 }
7819
7820 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7821 {
7822         struct bnx2x_fastpath *fp = &bp->fp[index];
7823
7824         /* reset IGU state */
7825         bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7826
7827         /* SETUP ramrod */
7828         fp->state = BNX2X_FP_STATE_OPENING;
7829         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7830                       fp->cl_id, 0);
7831
7832         /* Wait for completion */
7833         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7834                                  &(fp->state), 0);
7835 }
7836
7837 static int bnx2x_poll(struct napi_struct *napi, int budget);
7838
7839 static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
7840 {
7841
7842         switch (bp->multi_mode) {
7843         case ETH_RSS_MODE_DISABLED:
7844                 bp->num_queues = 1;
7845                 break;
7846
7847         case ETH_RSS_MODE_REGULAR:
7848                 if (num_queues)
7849                         bp->num_queues = min_t(u32, num_queues,
7850                                                   BNX2X_MAX_QUEUES(bp));
7851                 else
7852                         bp->num_queues = min_t(u32, num_online_cpus(),
7853                                                   BNX2X_MAX_QUEUES(bp));
7854                 break;
7855
7856
7857         default:
7858                 bp->num_queues = 1;
7859                 break;
7860         }
7861 }
7862
7863 static int bnx2x_set_num_queues(struct bnx2x *bp)
7864 {
7865         int rc = 0;
7866
7867         switch (int_mode) {
7868         case INT_MODE_INTx:
7869         case INT_MODE_MSI:
7870                 bp->num_queues = 1;
7871                 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7872                 break;
7873         default:
7874                 /* Set number of queues according to bp->multi_mode value */
7875                 bnx2x_set_num_queues_msix(bp);
7876
7877                 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7878                    bp->num_queues);
7879
7880                 /* if we can't use MSI-X we only need one fp,
7881                  * so try to enable MSI-X with the requested number of fp's
7882                  * and fallback to MSI or legacy INTx with one fp
7883                  */
7884                 rc = bnx2x_enable_msix(bp);
7885                 if (rc)
7886                         /* failed to enable MSI-X */
7887                         bp->num_queues = 1;
7888                 break;
7889         }
7890         bp->dev->real_num_tx_queues = bp->num_queues;
7891         return rc;
7892 }
7893
7894 #ifdef BCM_CNIC
7895 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7896 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7897 #endif
7898
7899 /* must be called with rtnl_lock */
7900 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7901 {
7902         u32 load_code;
7903         int i, rc;
7904
7905 #ifdef BNX2X_STOP_ON_ERROR
7906         if (unlikely(bp->panic))
7907                 return -EPERM;
7908 #endif
7909
7910         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7911
7912         rc = bnx2x_set_num_queues(bp);
7913
7914         if (bnx2x_alloc_mem(bp)) {
7915                 bnx2x_free_irq(bp, true);
7916                 return -ENOMEM;
7917         }
7918
7919         for_each_queue(bp, i)
7920                 bnx2x_fp(bp, i, disable_tpa) =
7921                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
7922
7923         for_each_queue(bp, i)
7924                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7925                                bnx2x_poll, 128);
7926
7927         bnx2x_napi_enable(bp);
7928
7929         if (bp->flags & USING_MSIX_FLAG) {
7930                 rc = bnx2x_req_msix_irqs(bp);
7931                 if (rc) {
7932                         bnx2x_free_irq(bp, true);
7933                         goto load_error1;
7934                 }
7935         } else {
7936                 /* Fall to INTx if failed to enable MSI-X due to lack of
7937                    memory (in bnx2x_set_num_queues()) */
7938                 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7939                         bnx2x_enable_msi(bp);
7940                 bnx2x_ack_int(bp);
7941                 rc = bnx2x_req_irq(bp);
7942                 if (rc) {
7943                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
7944                         bnx2x_free_irq(bp, true);
7945                         goto load_error1;
7946                 }
7947                 if (bp->flags & USING_MSI_FLAG) {
7948                         bp->dev->irq = bp->pdev->irq;
7949                         netdev_info(bp->dev, "using MSI  IRQ %d\n",
7950                                     bp->pdev->irq);
7951                 }
7952         }
7953
7954         /* Send LOAD_REQUEST command to MCP
7955            Returns the type of LOAD command:
7956            if it is the first port to be initialized
7957            common blocks should be initialized, otherwise - not
7958         */
7959         if (!BP_NOMCP(bp)) {
7960                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7961                 if (!load_code) {
7962                         BNX2X_ERR("MCP response failure, aborting\n");
7963                         rc = -EBUSY;
7964                         goto load_error2;
7965                 }
7966                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7967                         rc = -EBUSY; /* other port in diagnostic mode */
7968                         goto load_error2;
7969                 }
7970
7971         } else {
7972                 int port = BP_PORT(bp);
7973
7974                 DP(NETIF_MSG_IFUP, "NO MCP - load counts      %d, %d, %d\n",
7975                    load_count[0], load_count[1], load_count[2]);
7976                 load_count[0]++;
7977                 load_count[1 + port]++;
7978                 DP(NETIF_MSG_IFUP, "NO MCP - new load counts  %d, %d, %d\n",
7979                    load_count[0], load_count[1], load_count[2]);
7980                 if (load_count[0] == 1)
7981                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7982                 else if (load_count[1 + port] == 1)
7983                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7984                 else
7985                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7986         }
7987
7988         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7989             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7990                 bp->port.pmf = 1;
7991         else
7992                 bp->port.pmf = 0;
7993         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7994
7995         /* Initialize HW */
7996         rc = bnx2x_init_hw(bp, load_code);
7997         if (rc) {
7998                 BNX2X_ERR("HW init failed, aborting\n");
7999                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
8000                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
8001                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8002                 goto load_error2;
8003         }
8004
8005         /* Setup NIC internals and enable interrupts */
8006         bnx2x_nic_init(bp, load_code);
8007
8008         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
8009             (bp->common.shmem2_base))
8010                 SHMEM2_WR(bp, dcc_support,
8011                           (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
8012                            SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
8013
8014         /* Send LOAD_DONE command to MCP */
8015         if (!BP_NOMCP(bp)) {
8016                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
8017                 if (!load_code) {
8018                         BNX2X_ERR("MCP response failure, aborting\n");
8019                         rc = -EBUSY;
8020                         goto load_error3;
8021                 }
8022         }
8023
8024         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
8025
8026         rc = bnx2x_setup_leading(bp);
8027         if (rc) {
8028                 BNX2X_ERR("Setup leading failed!\n");
8029 #ifndef BNX2X_STOP_ON_ERROR
8030                 goto load_error3;
8031 #else
8032                 bp->panic = 1;
8033                 return -EBUSY;
8034 #endif
8035         }
8036
8037         if (CHIP_IS_E1H(bp))
8038                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
8039                         DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
8040                         bp->flags |= MF_FUNC_DIS;
8041                 }
8042
8043         if (bp->state == BNX2X_STATE_OPEN) {
8044 #ifdef BCM_CNIC
8045                 /* Enable Timer scan */
8046                 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
8047 #endif
8048                 for_each_nondefault_queue(bp, i) {
8049                         rc = bnx2x_setup_multi(bp, i);
8050                         if (rc)
8051 #ifdef BCM_CNIC
8052                                 goto load_error4;
8053 #else
8054                                 goto load_error3;
8055 #endif
8056                 }
8057
8058                 if (CHIP_IS_E1(bp))
8059                         bnx2x_set_eth_mac_addr_e1(bp, 1);
8060                 else
8061                         bnx2x_set_eth_mac_addr_e1h(bp, 1);
8062 #ifdef BCM_CNIC
8063                 /* Set iSCSI L2 MAC */
8064                 mutex_lock(&bp->cnic_mutex);
8065                 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
8066                         bnx2x_set_iscsi_eth_mac_addr(bp, 1);
8067                         bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
8068                         bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
8069                                       CNIC_SB_ID(bp));
8070                 }
8071                 mutex_unlock(&bp->cnic_mutex);
8072 #endif
8073         }
8074
8075         if (bp->port.pmf)
8076                 bnx2x_initial_phy_init(bp, load_mode);
8077
8078         /* Start fast path */
8079         switch (load_mode) {
8080         case LOAD_NORMAL:
8081                 if (bp->state == BNX2X_STATE_OPEN) {
8082                         /* Tx queue should be only reenabled */
8083                         netif_tx_wake_all_queues(bp->dev);
8084                 }
8085                 /* Initialize the receive filter. */
8086                 bnx2x_set_rx_mode(bp->dev);
8087                 break;
8088
8089         case LOAD_OPEN:
8090                 netif_tx_start_all_queues(bp->dev);
8091                 if (bp->state != BNX2X_STATE_OPEN)
8092                         netif_tx_disable(bp->dev);
8093                 /* Initialize the receive filter. */
8094                 bnx2x_set_rx_mode(bp->dev);
8095                 break;
8096
8097         case LOAD_DIAG:
8098                 /* Initialize the receive filter. */
8099                 bnx2x_set_rx_mode(bp->dev);
8100                 bp->state = BNX2X_STATE_DIAG;
8101                 break;
8102
8103         default:
8104                 break;
8105         }
8106
8107         if (!bp->port.pmf)
8108                 bnx2x__link_status_update(bp);
8109
8110         /* start the timer */
8111         mod_timer(&bp->timer, jiffies + bp->current_interval);
8112
8113 #ifdef BCM_CNIC
8114         bnx2x_setup_cnic_irq_info(bp);
8115         if (bp->state == BNX2X_STATE_OPEN)
8116                 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
8117 #endif
8118         bnx2x_inc_load_cnt(bp);
8119
8120         return 0;
8121
8122 #ifdef BCM_CNIC
8123 load_error4:
8124         /* Disable Timer scan */
8125         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
8126 #endif
8127 load_error3:
8128         bnx2x_int_disable_sync(bp, 1);
8129         if (!BP_NOMCP(bp)) {
8130                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
8131                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8132         }
8133         bp->port.pmf = 0;
8134         /* Free SKBs, SGEs, TPA pool and driver internals */
8135         bnx2x_free_skbs(bp);
8136         for_each_queue(bp, i)
8137                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8138 load_error2:
8139         /* Release IRQs */
8140         bnx2x_free_irq(bp, false);
8141 load_error1:
8142         bnx2x_napi_disable(bp);
8143         for_each_queue(bp, i)
8144                 netif_napi_del(&bnx2x_fp(bp, i, napi));
8145         bnx2x_free_mem(bp);
8146
8147         return rc;
8148 }
8149
8150 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
8151 {
8152         struct bnx2x_fastpath *fp = &bp->fp[index];
8153         int rc;
8154
8155         /* halt the connection */
8156         fp->state = BNX2X_FP_STATE_HALTING;
8157         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
8158
8159         /* Wait for completion */
8160         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
8161                                &(fp->state), 1);
8162         if (rc) /* timeout */
8163                 return rc;
8164
8165         /* delete cfc entry */
8166         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
8167
8168         /* Wait for completion */
8169         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
8170                                &(fp->state), 1);
8171         return rc;
8172 }
8173
8174 static int bnx2x_stop_leading(struct bnx2x *bp)
8175 {
8176         __le16 dsb_sp_prod_idx;
8177         /* if the other port is handling traffic,
8178            this can take a lot of time */
8179         int cnt = 500;
8180         int rc;
8181
8182         might_sleep();
8183
8184         /* Send HALT ramrod */
8185         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
8186         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
8187
8188         /* Wait for completion */
8189         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
8190                                &(bp->fp[0].state), 1);
8191         if (rc) /* timeout */
8192                 return rc;
8193
8194         dsb_sp_prod_idx = *bp->dsb_sp_prod;
8195
8196         /* Send PORT_DELETE ramrod */
8197         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
8198
8199         /* Wait for completion to arrive on default status block
8200            we are going to reset the chip anyway
8201            so there is not much to do if this times out
8202          */
8203         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
8204                 if (!cnt) {
8205                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
8206                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
8207                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
8208 #ifdef BNX2X_STOP_ON_ERROR
8209                         bnx2x_panic();
8210 #endif
8211                         rc = -EBUSY;
8212                         break;
8213                 }
8214                 cnt--;
8215                 msleep(1);
8216                 rmb(); /* Refresh the dsb_sp_prod */
8217         }
8218         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
8219         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
8220
8221         return rc;
8222 }
8223
8224 static void bnx2x_reset_func(struct bnx2x *bp)
8225 {
8226         int port = BP_PORT(bp);
8227         int func = BP_FUNC(bp);
8228         int base, i;
8229
8230         /* Configure IGU */
8231         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8232         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8233
8234 #ifdef BCM_CNIC
8235         /* Disable Timer scan */
8236         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
8237         /*
8238          * Wait for at least 10ms and up to 2 second for the timers scan to
8239          * complete
8240          */
8241         for (i = 0; i < 200; i++) {
8242                 msleep(10);
8243                 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
8244                         break;
8245         }
8246 #endif
8247         /* Clear ILT */
8248         base = FUNC_ILT_BASE(func);
8249         for (i = base; i < base + ILT_PER_FUNC; i++)
8250                 bnx2x_ilt_wr(bp, i, 0);
8251 }
8252
8253 static void bnx2x_reset_port(struct bnx2x *bp)
8254 {
8255         int port = BP_PORT(bp);
8256         u32 val;
8257
8258         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
8259
8260         /* Do not rcv packets to BRB */
8261         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
8262         /* Do not direct rcv packets that are not for MCP to the BRB */
8263         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
8264                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8265
8266         /* Configure AEU */
8267         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
8268
8269         msleep(100);
8270         /* Check for BRB port occupancy */
8271         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
8272         if (val)
8273                 DP(NETIF_MSG_IFDOWN,
8274                    "BRB1 is not empty  %d blocks are occupied\n", val);
8275
8276         /* TODO: Close Doorbell port? */
8277 }
8278
8279 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
8280 {
8281         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
8282            BP_FUNC(bp), reset_code);
8283
8284         switch (reset_code) {
8285         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
8286                 bnx2x_reset_port(bp);
8287                 bnx2x_reset_func(bp);
8288                 bnx2x_reset_common(bp);
8289                 break;
8290
8291         case FW_MSG_CODE_DRV_UNLOAD_PORT:
8292                 bnx2x_reset_port(bp);
8293                 bnx2x_reset_func(bp);
8294                 break;
8295
8296         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
8297                 bnx2x_reset_func(bp);
8298                 break;
8299
8300         default:
8301                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
8302                 break;
8303         }
8304 }
8305
8306 static void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
8307 {
8308         int port = BP_PORT(bp);
8309         u32 reset_code = 0;
8310         int i, cnt, rc;
8311
8312         /* Wait until tx fastpath tasks complete */
8313         for_each_queue(bp, i) {
8314                 struct bnx2x_fastpath *fp = &bp->fp[i];
8315
8316                 cnt = 1000;
8317                 while (bnx2x_has_tx_work_unload(fp)) {
8318
8319                         bnx2x_tx_int(fp);
8320                         if (!cnt) {
8321                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
8322                                           i);
8323 #ifdef BNX2X_STOP_ON_ERROR
8324                                 bnx2x_panic();
8325                                 return -EBUSY;
8326 #else
8327                                 break;
8328 #endif
8329                         }
8330                         cnt--;
8331                         msleep(1);
8332                 }
8333         }
8334         /* Give HW time to discard old tx messages */
8335         msleep(1);
8336
8337         if (CHIP_IS_E1(bp)) {
8338                 struct mac_configuration_cmd *config =
8339                                                 bnx2x_sp(bp, mcast_config);
8340
8341                 bnx2x_set_eth_mac_addr_e1(bp, 0);
8342
8343                 for (i = 0; i < config->hdr.length; i++)
8344                         CAM_INVALIDATE(config->config_table[i]);
8345
8346                 config->hdr.length = i;
8347                 if (CHIP_REV_IS_SLOW(bp))
8348                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
8349                 else
8350                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
8351                 config->hdr.client_id = bp->fp->cl_id;
8352                 config->hdr.reserved1 = 0;
8353
8354                 bp->set_mac_pending++;
8355                 smp_wmb();
8356
8357                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8358                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
8359                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
8360
8361         } else { /* E1H */
8362                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
8363
8364                 bnx2x_set_eth_mac_addr_e1h(bp, 0);
8365
8366                 for (i = 0; i < MC_HASH_SIZE; i++)
8367                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
8368
8369                 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
8370         }
8371 #ifdef BCM_CNIC
8372         /* Clear iSCSI L2 MAC */
8373         mutex_lock(&bp->cnic_mutex);
8374         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
8375                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
8376                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
8377         }
8378         mutex_unlock(&bp->cnic_mutex);
8379 #endif
8380
8381         if (unload_mode == UNLOAD_NORMAL)
8382                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8383
8384         else if (bp->flags & NO_WOL_FLAG)
8385                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
8386
8387         else if (bp->wol) {
8388                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
8389                 u8 *mac_addr = bp->dev->dev_addr;
8390                 u32 val;
8391                 /* The mac address is written to entries 1-4 to
8392                    preserve entry 0 which is used by the PMF */
8393                 u8 entry = (BP_E1HVN(bp) + 1)*8;
8394
8395                 val = (mac_addr[0] << 8) | mac_addr[1];
8396                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
8397
8398                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8399                       (mac_addr[4] << 8) | mac_addr[5];
8400                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
8401
8402                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
8403
8404         } else
8405                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8406
8407         /* Close multi and leading connections
8408            Completions for ramrods are collected in a synchronous way */
8409         for_each_nondefault_queue(bp, i)
8410                 if (bnx2x_stop_multi(bp, i))
8411                         goto unload_error;
8412
8413         rc = bnx2x_stop_leading(bp);
8414         if (rc) {
8415                 BNX2X_ERR("Stop leading failed!\n");
8416 #ifdef BNX2X_STOP_ON_ERROR
8417                 return -EBUSY;
8418 #else
8419                 goto unload_error;
8420 #endif
8421         }
8422
8423 unload_error:
8424         if (!BP_NOMCP(bp))
8425                 reset_code = bnx2x_fw_command(bp, reset_code);
8426         else {
8427                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts      %d, %d, %d\n",
8428                    load_count[0], load_count[1], load_count[2]);
8429                 load_count[0]--;
8430                 load_count[1 + port]--;
8431                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts  %d, %d, %d\n",
8432                    load_count[0], load_count[1], load_count[2]);
8433                 if (load_count[0] == 0)
8434                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
8435                 else if (load_count[1 + port] == 0)
8436                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8437                 else
8438                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8439         }
8440
8441         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
8442             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
8443                 bnx2x__link_reset(bp);
8444
8445         /* Reset the chip */
8446         bnx2x_reset_chip(bp, reset_code);
8447
8448         /* Report UNLOAD_DONE to MCP */
8449         if (!BP_NOMCP(bp))
8450                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8451
8452 }
8453
8454 static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp)
8455 {
8456         u32 val;
8457
8458         DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
8459
8460         if (CHIP_IS_E1(bp)) {
8461                 int port = BP_PORT(bp);
8462                 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8463                         MISC_REG_AEU_MASK_ATTN_FUNC_0;
8464
8465                 val = REG_RD(bp, addr);
8466                 val &= ~(0x300);
8467                 REG_WR(bp, addr, val);
8468         } else if (CHIP_IS_E1H(bp)) {
8469                 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
8470                 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
8471                          MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
8472                 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
8473         }
8474 }
8475
8476 /* must be called with rtnl_lock */
8477 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
8478 {
8479         int i;
8480
8481         if (bp->state == BNX2X_STATE_CLOSED) {
8482                 /* Interface has been removed - nothing to recover */
8483                 bp->recovery_state = BNX2X_RECOVERY_DONE;
8484                 bp->is_leader = 0;
8485                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8486                 smp_wmb();
8487
8488                 return -EINVAL;
8489         }
8490
8491 #ifdef BCM_CNIC
8492         bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
8493 #endif
8494         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
8495
8496         /* Set "drop all" */
8497         bp->rx_mode = BNX2X_RX_MODE_NONE;
8498         bnx2x_set_storm_rx_mode(bp);
8499
8500         /* Disable HW interrupts, NAPI and Tx */
8501         bnx2x_netif_stop(bp, 1);
8502
8503         del_timer_sync(&bp->timer);
8504         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
8505                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
8506         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8507
8508         /* Release IRQs */
8509         bnx2x_free_irq(bp, false);
8510
8511         /* Cleanup the chip if needed */
8512         if (unload_mode != UNLOAD_RECOVERY)
8513                 bnx2x_chip_cleanup(bp, unload_mode);
8514
8515         bp->port.pmf = 0;
8516
8517         /* Free SKBs, SGEs, TPA pool and driver internals */
8518         bnx2x_free_skbs(bp);
8519         for_each_queue(bp, i)
8520                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8521         for_each_queue(bp, i)
8522                 netif_napi_del(&bnx2x_fp(bp, i, napi));
8523         bnx2x_free_mem(bp);
8524
8525         bp->state = BNX2X_STATE_CLOSED;
8526
8527         netif_carrier_off(bp->dev);
8528
8529         /* The last driver must disable a "close the gate" if there is no
8530          * parity attention or "process kill" pending.
8531          */
8532         if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
8533             bnx2x_reset_is_done(bp))
8534                 bnx2x_disable_close_the_gate(bp);
8535
8536         /* Reset MCP mail box sequence if there is on going recovery */
8537         if (unload_mode == UNLOAD_RECOVERY)
8538                 bp->fw_seq = 0;
8539
8540         return 0;
8541 }
8542
8543 /* Close gates #2, #3 and #4: */
8544 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
8545 {
8546         u32 val, addr;
8547
8548         /* Gates #2 and #4a are closed/opened for "not E1" only */
8549         if (!CHIP_IS_E1(bp)) {
8550                 /* #4 */
8551                 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
8552                 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
8553                        close ? (val | 0x1) : (val & (~(u32)1)));
8554                 /* #2 */
8555                 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
8556                 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
8557                        close ? (val | 0x1) : (val & (~(u32)1)));
8558         }
8559
8560         /* #3 */
8561         addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
8562         val = REG_RD(bp, addr);
8563         REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
8564
8565         DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
8566                 close ? "closing" : "opening");
8567         mmiowb();
8568 }
8569
8570 #define SHARED_MF_CLP_MAGIC  0x80000000 /* `magic' bit */
8571
8572 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
8573 {
8574         /* Do some magic... */
8575         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8576         *magic_val = val & SHARED_MF_CLP_MAGIC;
8577         MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
8578 }
8579
8580 /* Restore the value of the `magic' bit.
8581  *
8582  * @param pdev Device handle.
8583  * @param magic_val Old value of the `magic' bit.
8584  */
8585 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
8586 {
8587         /* Restore the `magic' bit value... */
8588         /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
8589         SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
8590                 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
8591         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8592         MF_CFG_WR(bp, shared_mf_config.clp_mb,
8593                 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
8594 }
8595
8596 /* Prepares for MCP reset: takes care of CLP configurations.
8597  *
8598  * @param bp
8599  * @param magic_val Old value of 'magic' bit.
8600  */
8601 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
8602 {
8603         u32 shmem;
8604         u32 validity_offset;
8605
8606         DP(NETIF_MSG_HW, "Starting\n");
8607
8608         /* Set `magic' bit in order to save MF config */
8609         if (!CHIP_IS_E1(bp))
8610                 bnx2x_clp_reset_prep(bp, magic_val);
8611
8612         /* Get shmem offset */
8613         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8614         validity_offset = offsetof(struct shmem_region, validity_map[0]);
8615
8616         /* Clear validity map flags */
8617         if (shmem > 0)
8618                 REG_WR(bp, shmem + validity_offset, 0);
8619 }
8620
8621 #define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
8622 #define MCP_ONE_TIMEOUT  100    /* 100 ms */
8623
8624 /* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
8625  * depending on the HW type.
8626  *
8627  * @param bp
8628  */
8629 static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
8630 {
8631         /* special handling for emulation and FPGA,
8632            wait 10 times longer */
8633         if (CHIP_REV_IS_SLOW(bp))
8634                 msleep(MCP_ONE_TIMEOUT*10);
8635         else
8636                 msleep(MCP_ONE_TIMEOUT);
8637 }
8638
8639 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
8640 {
8641         u32 shmem, cnt, validity_offset, val;
8642         int rc = 0;
8643
8644         msleep(100);
8645
8646         /* Get shmem offset */
8647         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8648         if (shmem == 0) {
8649                 BNX2X_ERR("Shmem 0 return failure\n");
8650                 rc = -ENOTTY;
8651                 goto exit_lbl;
8652         }
8653
8654         validity_offset = offsetof(struct shmem_region, validity_map[0]);
8655
8656         /* Wait for MCP to come up */
8657         for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
8658                 /* TBD: its best to check validity map of last port.
8659                  * currently checks on port 0.
8660                  */
8661                 val = REG_RD(bp, shmem + validity_offset);
8662                 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
8663                    shmem + validity_offset, val);
8664
8665                 /* check that shared memory is valid. */
8666                 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8667                     == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8668                         break;
8669
8670                 bnx2x_mcp_wait_one(bp);
8671         }
8672
8673         DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
8674
8675         /* Check that shared memory is valid. This indicates that MCP is up. */
8676         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
8677             (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
8678                 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
8679                 rc = -ENOTTY;
8680                 goto exit_lbl;
8681         }
8682
8683 exit_lbl:
8684         /* Restore the `magic' bit value */
8685         if (!CHIP_IS_E1(bp))
8686                 bnx2x_clp_reset_done(bp, magic_val);
8687
8688         return rc;
8689 }
8690
8691 static void bnx2x_pxp_prep(struct bnx2x *bp)
8692 {
8693         if (!CHIP_IS_E1(bp)) {
8694                 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
8695                 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
8696                 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
8697                 mmiowb();
8698         }
8699 }
8700
8701 /*
8702  * Reset the whole chip except for:
8703  *      - PCIE core
8704  *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
8705  *              one reset bit)
8706  *      - IGU
8707  *      - MISC (including AEU)
8708  *      - GRC
8709  *      - RBCN, RBCP
8710  */
8711 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
8712 {
8713         u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
8714
8715         not_reset_mask1 =
8716                 MISC_REGISTERS_RESET_REG_1_RST_HC |
8717                 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
8718                 MISC_REGISTERS_RESET_REG_1_RST_PXP;
8719
8720         not_reset_mask2 =
8721                 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
8722                 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
8723                 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
8724                 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
8725                 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
8726                 MISC_REGISTERS_RESET_REG_2_RST_GRC  |
8727                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
8728                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
8729
8730         reset_mask1 = 0xffffffff;
8731
8732         if (CHIP_IS_E1(bp))
8733                 reset_mask2 = 0xffff;
8734         else
8735                 reset_mask2 = 0x1ffff;
8736
8737         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8738                reset_mask1 & (~not_reset_mask1));
8739         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8740                reset_mask2 & (~not_reset_mask2));
8741
8742         barrier();
8743         mmiowb();
8744
8745         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
8746         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
8747         mmiowb();
8748 }
8749
8750 static int bnx2x_process_kill(struct bnx2x *bp)
8751 {
8752         int cnt = 1000;
8753         u32 val = 0;
8754         u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
8755
8756
8757         /* Empty the Tetris buffer, wait for 1s */
8758         do {
8759                 sr_cnt  = REG_RD(bp, PXP2_REG_RD_SR_CNT);
8760                 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
8761                 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
8762                 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
8763                 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
8764                 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
8765                     ((port_is_idle_0 & 0x1) == 0x1) &&
8766                     ((port_is_idle_1 & 0x1) == 0x1) &&
8767                     (pgl_exp_rom2 == 0xffffffff))
8768                         break;
8769                 msleep(1);
8770         } while (cnt-- > 0);
8771
8772         if (cnt <= 0) {
8773                 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
8774                           " are still"
8775                           " outstanding read requests after 1s!\n");
8776                 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
8777                           " port_is_idle_0=0x%08x,"
8778                           " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
8779                           sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
8780                           pgl_exp_rom2);
8781                 return -EAGAIN;
8782         }
8783
8784         barrier();
8785
8786         /* Close gates #2, #3 and #4 */
8787         bnx2x_set_234_gates(bp, true);
8788
8789         /* TBD: Indicate that "process kill" is in progress to MCP */
8790
8791         /* Clear "unprepared" bit */
8792         REG_WR(bp, MISC_REG_UNPREPARED, 0);
8793         barrier();
8794
8795         /* Make sure all is written to the chip before the reset */
8796         mmiowb();
8797
8798         /* Wait for 1ms to empty GLUE and PCI-E core queues,
8799          * PSWHST, GRC and PSWRD Tetris buffer.
8800          */
8801         msleep(1);
8802
8803         /* Prepare to chip reset: */
8804         /* MCP */
8805         bnx2x_reset_mcp_prep(bp, &val);
8806
8807         /* PXP */
8808         bnx2x_pxp_prep(bp);
8809         barrier();
8810
8811         /* reset the chip */
8812         bnx2x_process_kill_chip_reset(bp);
8813         barrier();
8814
8815         /* Recover after reset: */
8816         /* MCP */
8817         if (bnx2x_reset_mcp_comp(bp, val))
8818                 return -EAGAIN;
8819
8820         /* PXP */
8821         bnx2x_pxp_prep(bp);
8822
8823         /* Open the gates #2, #3 and #4 */
8824         bnx2x_set_234_gates(bp, false);
8825
8826         /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
8827          * reset state, re-enable attentions. */
8828
8829         return 0;
8830 }
8831
8832 static int bnx2x_leader_reset(struct bnx2x *bp)
8833 {
8834         int rc = 0;
8835         /* Try to recover after the failure */
8836         if (bnx2x_process_kill(bp)) {
8837                 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
8838                        bp->dev->name);
8839                 rc = -EAGAIN;
8840                 goto exit_leader_reset;
8841         }
8842
8843         /* Clear "reset is in progress" bit and update the driver state */
8844         bnx2x_set_reset_done(bp);
8845         bp->recovery_state = BNX2X_RECOVERY_DONE;
8846
8847 exit_leader_reset:
8848         bp->is_leader = 0;
8849         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8850         smp_wmb();
8851         return rc;
8852 }
8853
8854 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
8855
8856 /* Assumption: runs under rtnl lock. This together with the fact
8857  * that it's called only from bnx2x_reset_task() ensure that it
8858  * will never be called when netif_running(bp->dev) is false.
8859  */
8860 static void bnx2x_parity_recover(struct bnx2x *bp)
8861 {
8862         DP(NETIF_MSG_HW, "Handling parity\n");
8863         while (1) {
8864                 switch (bp->recovery_state) {
8865                 case BNX2X_RECOVERY_INIT:
8866                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
8867                         /* Try to get a LEADER_LOCK HW lock */
8868                         if (bnx2x_trylock_hw_lock(bp,
8869                                 HW_LOCK_RESOURCE_RESERVED_08))
8870                                 bp->is_leader = 1;
8871
8872                         /* Stop the driver */
8873                         /* If interface has been removed - break */
8874                         if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
8875                                 return;
8876
8877                         bp->recovery_state = BNX2X_RECOVERY_WAIT;
8878                         /* Ensure "is_leader" and "recovery_state"
8879                          *  update values are seen on other CPUs
8880                          */
8881                         smp_wmb();
8882                         break;
8883
8884                 case BNX2X_RECOVERY_WAIT:
8885                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
8886                         if (bp->is_leader) {
8887                                 u32 load_counter = bnx2x_get_load_cnt(bp);
8888                                 if (load_counter) {
8889                                         /* Wait until all other functions get
8890                                          * down.
8891                                          */
8892                                         schedule_delayed_work(&bp->reset_task,
8893                                                                 HZ/10);
8894                                         return;
8895                                 } else {
8896                                         /* If all other functions got down -
8897                                          * try to bring the chip back to
8898                                          * normal. In any case it's an exit
8899                                          * point for a leader.
8900                                          */
8901                                         if (bnx2x_leader_reset(bp) ||
8902                                         bnx2x_nic_load(bp, LOAD_NORMAL)) {
8903                                                 printk(KERN_ERR"%s: Recovery "
8904                                                 "has failed. Power cycle is "
8905                                                 "needed.\n", bp->dev->name);
8906                                                 /* Disconnect this device */
8907                                                 netif_device_detach(bp->dev);
8908                                                 /* Block ifup for all function
8909                                                  * of this ASIC until
8910                                                  * "process kill" or power
8911                                                  * cycle.
8912                                                  */
8913                                                 bnx2x_set_reset_in_progress(bp);
8914                                                 /* Shut down the power */
8915                                                 bnx2x_set_power_state(bp,
8916                                                                 PCI_D3hot);
8917                                                 return;
8918                                         }
8919
8920                                         return;
8921                                 }
8922                         } else { /* non-leader */
8923                                 if (!bnx2x_reset_is_done(bp)) {
8924                                         /* Try to get a LEADER_LOCK HW lock as
8925                                          * long as a former leader may have
8926                                          * been unloaded by the user or
8927                                          * released a leadership by another
8928                                          * reason.
8929                                          */
8930                                         if (bnx2x_trylock_hw_lock(bp,
8931                                             HW_LOCK_RESOURCE_RESERVED_08)) {
8932                                                 /* I'm a leader now! Restart a
8933                                                  * switch case.
8934                                                  */
8935                                                 bp->is_leader = 1;
8936                                                 break;
8937                                         }
8938
8939                                         schedule_delayed_work(&bp->reset_task,
8940                                                                 HZ/10);
8941                                         return;
8942
8943                                 } else { /* A leader has completed
8944                                           * the "process kill". It's an exit
8945                                           * point for a non-leader.
8946                                           */
8947                                         bnx2x_nic_load(bp, LOAD_NORMAL);
8948                                         bp->recovery_state =
8949                                                 BNX2X_RECOVERY_DONE;
8950                                         smp_wmb();
8951                                         return;
8952                                 }
8953                         }
8954                 default:
8955                         return;
8956                 }
8957         }
8958 }
8959
8960 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
8961  * scheduled on a general queue in order to prevent a dead lock.
8962  */
8963 static void bnx2x_reset_task(struct work_struct *work)
8964 {
8965         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
8966
8967 #ifdef BNX2X_STOP_ON_ERROR
8968         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8969                   " so reset not done to allow debug dump,\n"
8970          KERN_ERR " you will need to reboot when done\n");
8971         return;
8972 #endif
8973
8974         rtnl_lock();
8975
8976         if (!netif_running(bp->dev))
8977                 goto reset_task_exit;
8978
8979         if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
8980                 bnx2x_parity_recover(bp);
8981         else {
8982                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8983                 bnx2x_nic_load(bp, LOAD_NORMAL);
8984         }
8985
8986 reset_task_exit:
8987         rtnl_unlock();
8988 }
8989
8990 /* end of nic load/unload */
8991
8992 /* ethtool_ops */
8993
8994 /*
8995  * Init service functions
8996  */
8997
8998 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8999 {
9000         switch (func) {
9001         case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
9002         case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
9003         case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
9004         case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
9005         case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
9006         case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
9007         case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
9008         case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
9009         default:
9010                 BNX2X_ERR("Unsupported function index: %d\n", func);
9011                 return (u32)(-1);
9012         }
9013 }
9014
9015 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
9016 {
9017         u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
9018
9019         /* Flush all outstanding writes */
9020         mmiowb();
9021
9022         /* Pretend to be function 0 */
9023         REG_WR(bp, reg, 0);
9024         /* Flush the GRC transaction (in the chip) */
9025         new_val = REG_RD(bp, reg);
9026         if (new_val != 0) {
9027                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
9028                           new_val);
9029                 BUG();
9030         }
9031
9032         /* From now we are in the "like-E1" mode */
9033         bnx2x_int_disable(bp);
9034
9035         /* Flush all outstanding writes */
9036         mmiowb();
9037
9038         /* Restore the original funtion settings */
9039         REG_WR(bp, reg, orig_func);
9040         new_val = REG_RD(bp, reg);
9041         if (new_val != orig_func) {
9042                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
9043                           orig_func, new_val);
9044                 BUG();
9045         }
9046 }
9047
9048 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
9049 {
9050         if (CHIP_IS_E1H(bp))
9051                 bnx2x_undi_int_disable_e1h(bp, func);
9052         else
9053                 bnx2x_int_disable(bp);
9054 }
9055
9056 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
9057 {
9058         u32 val;
9059
9060         /* Check if there is any driver already loaded */
9061         val = REG_RD(bp, MISC_REG_UNPREPARED);
9062         if (val == 0x1) {
9063                 /* Check if it is the UNDI driver
9064                  * UNDI driver initializes CID offset for normal bell to 0x7
9065                  */
9066                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9067                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
9068                 if (val == 0x7) {
9069                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9070                         /* save our func */
9071                         int func = BP_FUNC(bp);
9072                         u32 swap_en;
9073                         u32 swap_val;
9074
9075                         /* clear the UNDI indication */
9076                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
9077
9078                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
9079
9080                         /* try unload UNDI on port 0 */
9081                         bp->func = 0;
9082                         bp->fw_seq =
9083                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9084                                 DRV_MSG_SEQ_NUMBER_MASK);
9085                         reset_code = bnx2x_fw_command(bp, reset_code);
9086
9087                         /* if UNDI is loaded on the other port */
9088                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
9089
9090                                 /* send "DONE" for previous unload */
9091                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9092
9093                                 /* unload UNDI on port 1 */
9094                                 bp->func = 1;
9095                                 bp->fw_seq =
9096                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9097                                         DRV_MSG_SEQ_NUMBER_MASK);
9098                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9099
9100                                 bnx2x_fw_command(bp, reset_code);
9101                         }
9102
9103                         /* now it's safe to release the lock */
9104                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9105
9106                         bnx2x_undi_int_disable(bp, func);
9107
9108                         /* close input traffic and wait for it */
9109                         /* Do not rcv packets to BRB */
9110                         REG_WR(bp,
9111                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
9112                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
9113                         /* Do not direct rcv packets that are not for MCP to
9114                          * the BRB */
9115                         REG_WR(bp,
9116                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
9117                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
9118                         /* clear AEU */
9119                         REG_WR(bp,
9120                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
9121                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
9122                         msleep(10);
9123
9124                         /* save NIG port swap info */
9125                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
9126                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
9127                         /* reset device */
9128                         REG_WR(bp,
9129                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
9130                                0xd3ffffff);
9131                         REG_WR(bp,
9132                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
9133                                0x1403);
9134                         /* take the NIG out of reset and restore swap values */
9135                         REG_WR(bp,
9136                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
9137                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
9138                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
9139                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
9140
9141                         /* send unload done to the MCP */
9142                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9143
9144                         /* restore our func and fw_seq */
9145                         bp->func = func;
9146                         bp->fw_seq =
9147                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9148                                 DRV_MSG_SEQ_NUMBER_MASK);
9149
9150                 } else
9151                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9152         }
9153 }
9154
9155 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
9156 {
9157         u32 val, val2, val3, val4, id;
9158         u16 pmc;
9159
9160         /* Get the chip revision id and number. */
9161         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
9162         val = REG_RD(bp, MISC_REG_CHIP_NUM);
9163         id = ((val & 0xffff) << 16);
9164         val = REG_RD(bp, MISC_REG_CHIP_REV);
9165         id |= ((val & 0xf) << 12);
9166         val = REG_RD(bp, MISC_REG_CHIP_METAL);
9167         id |= ((val & 0xff) << 4);
9168         val = REG_RD(bp, MISC_REG_BOND_ID);
9169         id |= (val & 0xf);
9170         bp->common.chip_id = id;
9171         bp->link_params.chip_id = bp->common.chip_id;
9172         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
9173
9174         val = (REG_RD(bp, 0x2874) & 0x55);
9175         if ((bp->common.chip_id & 0x1) ||
9176             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
9177                 bp->flags |= ONE_PORT_FLAG;
9178                 BNX2X_DEV_INFO("single port device\n");
9179         }
9180
9181         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
9182         bp->common.flash_size = (NVRAM_1MB_SIZE <<
9183                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
9184         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
9185                        bp->common.flash_size, bp->common.flash_size);
9186
9187         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9188         bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
9189         bp->link_params.shmem_base = bp->common.shmem_base;
9190         BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
9191                        bp->common.shmem_base, bp->common.shmem2_base);
9192
9193         if (!bp->common.shmem_base ||
9194             (bp->common.shmem_base < 0xA0000) ||
9195             (bp->common.shmem_base >= 0xC0000)) {
9196                 BNX2X_DEV_INFO("MCP not active\n");
9197                 bp->flags |= NO_MCP_FLAG;
9198                 return;
9199         }
9200
9201         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9202         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9203                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9204                 BNX2X_ERROR("BAD MCP validity signature\n");
9205
9206         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
9207         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
9208
9209         bp->link_params.hw_led_mode = ((bp->common.hw_config &
9210                                         SHARED_HW_CFG_LED_MODE_MASK) >>
9211                                        SHARED_HW_CFG_LED_MODE_SHIFT);
9212
9213         bp->link_params.feature_config_flags = 0;
9214         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
9215         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
9216                 bp->link_params.feature_config_flags |=
9217                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9218         else
9219                 bp->link_params.feature_config_flags &=
9220                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9221
9222         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
9223         bp->common.bc_ver = val;
9224         BNX2X_DEV_INFO("bc_ver %X\n", val);
9225         if (val < BNX2X_BC_VER) {
9226                 /* for now only warn
9227                  * later we might need to enforce this */
9228                 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
9229                             "please upgrade BC\n", BNX2X_BC_VER, val);
9230         }
9231         bp->link_params.feature_config_flags |=
9232                 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
9233                 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
9234
9235         if (BP_E1HVN(bp) == 0) {
9236                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
9237                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
9238         } else {
9239                 /* no WOL capability for E1HVN != 0 */
9240                 bp->flags |= NO_WOL_FLAG;
9241         }
9242         BNX2X_DEV_INFO("%sWoL capable\n",
9243                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
9244
9245         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
9246         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
9247         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
9248         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
9249
9250         dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
9251                  val, val2, val3, val4);
9252 }
9253
9254 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
9255                                                     u32 switch_cfg)
9256 {
9257         int port = BP_PORT(bp);
9258         u32 ext_phy_type;
9259
9260         switch (switch_cfg) {
9261         case SWITCH_CFG_1G:
9262                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
9263
9264                 ext_phy_type =
9265                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9266                 switch (ext_phy_type) {
9267                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
9268                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9269                                        ext_phy_type);
9270
9271                         bp->port.supported |= (SUPPORTED_10baseT_Half |
9272                                                SUPPORTED_10baseT_Full |
9273                                                SUPPORTED_100baseT_Half |
9274                                                SUPPORTED_100baseT_Full |
9275                                                SUPPORTED_1000baseT_Full |
9276                                                SUPPORTED_2500baseX_Full |
9277                                                SUPPORTED_TP |
9278                                                SUPPORTED_FIBRE |
9279                                                SUPPORTED_Autoneg |
9280                                                SUPPORTED_Pause |
9281                                                SUPPORTED_Asym_Pause);
9282                         break;
9283
9284                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
9285                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
9286                                        ext_phy_type);
9287
9288                         bp->port.supported |= (SUPPORTED_10baseT_Half |
9289                                                SUPPORTED_10baseT_Full |
9290                                                SUPPORTED_100baseT_Half |
9291                                                SUPPORTED_100baseT_Full |
9292                                                SUPPORTED_1000baseT_Full |
9293                                                SUPPORTED_TP |
9294                                                SUPPORTED_FIBRE |
9295                                                SUPPORTED_Autoneg |
9296                                                SUPPORTED_Pause |
9297                                                SUPPORTED_Asym_Pause);
9298                         break;
9299
9300                 default:
9301                         BNX2X_ERR("NVRAM config error. "
9302                                   "BAD SerDes ext_phy_config 0x%x\n",
9303                                   bp->link_params.ext_phy_config);
9304                         return;
9305                 }
9306
9307                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
9308                                            port*0x10);
9309                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
9310                 break;
9311
9312         case SWITCH_CFG_10G:
9313                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
9314
9315                 ext_phy_type =
9316                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9317                 switch (ext_phy_type) {
9318                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
9319                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9320                                        ext_phy_type);
9321
9322                         bp->port.supported |= (SUPPORTED_10baseT_Half |
9323                                                SUPPORTED_10baseT_Full |
9324                                                SUPPORTED_100baseT_Half |
9325                                                SUPPORTED_100baseT_Full |
9326                                                SUPPORTED_1000baseT_Full |
9327                                                SUPPORTED_2500baseX_Full |
9328                                                SUPPORTED_10000baseT_Full |
9329                                                SUPPORTED_TP |
9330                                                SUPPORTED_FIBRE |
9331                                                SUPPORTED_Autoneg |
9332                                                SUPPORTED_Pause |
9333                                                SUPPORTED_Asym_Pause);
9334                         break;
9335
9336                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
9337                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
9338                                        ext_phy_type);
9339
9340                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9341                                                SUPPORTED_1000baseT_Full |
9342                                                SUPPORTED_FIBRE |
9343                                                SUPPORTED_Autoneg |
9344                                                SUPPORTED_Pause |
9345                                                SUPPORTED_Asym_Pause);
9346                         break;
9347
9348                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
9349                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
9350                                        ext_phy_type);
9351
9352                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9353                                                SUPPORTED_2500baseX_Full |
9354                                                SUPPORTED_1000baseT_Full |
9355                                                SUPPORTED_FIBRE |
9356                                                SUPPORTED_Autoneg |
9357                                                SUPPORTED_Pause |
9358                                                SUPPORTED_Asym_Pause);
9359                         break;
9360
9361                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9362                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
9363                                        ext_phy_type);
9364
9365                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9366                                                SUPPORTED_FIBRE |
9367                                                SUPPORTED_Pause |
9368                                                SUPPORTED_Asym_Pause);
9369                         break;
9370
9371                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9372                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
9373                                        ext_phy_type);
9374
9375                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9376                                                SUPPORTED_1000baseT_Full |
9377                                                SUPPORTED_FIBRE |
9378                                                SUPPORTED_Pause |
9379                                                SUPPORTED_Asym_Pause);
9380                         break;
9381
9382                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
9383                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
9384                                        ext_phy_type);
9385
9386                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9387                                                SUPPORTED_1000baseT_Full |
9388                                                SUPPORTED_Autoneg |
9389                                                SUPPORTED_FIBRE |
9390                                                SUPPORTED_Pause |
9391                                                SUPPORTED_Asym_Pause);
9392                         break;
9393
9394                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
9395                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
9396                                        ext_phy_type);
9397
9398                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9399                                                SUPPORTED_1000baseT_Full |
9400                                                SUPPORTED_Autoneg |
9401                                                SUPPORTED_FIBRE |
9402                                                SUPPORTED_Pause |
9403                                                SUPPORTED_Asym_Pause);
9404                         break;
9405
9406                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
9407                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
9408                                        ext_phy_type);
9409
9410                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9411                                                SUPPORTED_TP |
9412                                                SUPPORTED_Autoneg |
9413                                                SUPPORTED_Pause |
9414                                                SUPPORTED_Asym_Pause);
9415                         break;
9416
9417                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
9418                         BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
9419                                        ext_phy_type);
9420
9421                         bp->port.supported |= (SUPPORTED_10baseT_Half |
9422                                                SUPPORTED_10baseT_Full |
9423                                                SUPPORTED_100baseT_Half |
9424                                                SUPPORTED_100baseT_Full |
9425                                                SUPPORTED_1000baseT_Full |
9426                                                SUPPORTED_10000baseT_Full |
9427                                                SUPPORTED_TP |
9428                                                SUPPORTED_Autoneg |
9429                                                SUPPORTED_Pause |
9430                                                SUPPORTED_Asym_Pause);
9431                         break;
9432
9433                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9434                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9435                                   bp->link_params.ext_phy_config);
9436                         break;
9437
9438                 default:
9439                         BNX2X_ERR("NVRAM config error. "
9440                                   "BAD XGXS ext_phy_config 0x%x\n",
9441                                   bp->link_params.ext_phy_config);
9442                         return;
9443                 }
9444
9445                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
9446                                            port*0x18);
9447                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
9448
9449                 break;
9450
9451         default:
9452                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
9453                           bp->port.link_config);
9454                 return;
9455         }
9456         bp->link_params.phy_addr = bp->port.phy_addr;
9457
9458         /* mask what we support according to speed_cap_mask */
9459         if (!(bp->link_params.speed_cap_mask &
9460                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
9461                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
9462
9463         if (!(bp->link_params.speed_cap_mask &
9464                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
9465                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
9466
9467         if (!(bp->link_params.speed_cap_mask &
9468                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
9469                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
9470
9471         if (!(bp->link_params.speed_cap_mask &
9472                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
9473                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
9474
9475         if (!(bp->link_params.speed_cap_mask &
9476                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
9477                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
9478                                         SUPPORTED_1000baseT_Full);
9479
9480         if (!(bp->link_params.speed_cap_mask &
9481                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
9482                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
9483
9484         if (!(bp->link_params.speed_cap_mask &
9485                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
9486                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
9487
9488         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
9489 }
9490
9491 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
9492 {
9493         bp->link_params.req_duplex = DUPLEX_FULL;
9494
9495         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
9496         case PORT_FEATURE_LINK_SPEED_AUTO:
9497                 if (bp->port.supported & SUPPORTED_Autoneg) {
9498                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9499                         bp->port.advertising = bp->port.supported;
9500                 } else {
9501                         u32 ext_phy_type =
9502                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9503
9504                         if ((ext_phy_type ==
9505                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
9506                             (ext_phy_type ==
9507                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
9508                                 /* force 10G, no AN */
9509                                 bp->link_params.req_line_speed = SPEED_10000;
9510                                 bp->port.advertising =
9511                                                 (ADVERTISED_10000baseT_Full |
9512                                                  ADVERTISED_FIBRE);
9513                                 break;
9514                         }
9515                         BNX2X_ERR("NVRAM config error. "
9516                                   "Invalid link_config 0x%x"
9517                                   "  Autoneg not supported\n",
9518                                   bp->port.link_config);
9519                         return;
9520                 }
9521                 break;
9522
9523         case PORT_FEATURE_LINK_SPEED_10M_FULL:
9524                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
9525                         bp->link_params.req_line_speed = SPEED_10;
9526                         bp->port.advertising = (ADVERTISED_10baseT_Full |
9527                                                 ADVERTISED_TP);
9528                 } else {
9529                         BNX2X_ERROR("NVRAM config error. "
9530                                     "Invalid link_config 0x%x"
9531                                     "  speed_cap_mask 0x%x\n",
9532                                     bp->port.link_config,
9533                                     bp->link_params.speed_cap_mask);
9534                         return;
9535                 }
9536                 break;
9537
9538         case PORT_FEATURE_LINK_SPEED_10M_HALF:
9539                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
9540                         bp->link_params.req_line_speed = SPEED_10;
9541                         bp->link_params.req_duplex = DUPLEX_HALF;
9542                         bp->port.advertising = (ADVERTISED_10baseT_Half |
9543                                                 ADVERTISED_TP);
9544                 } else {
9545                         BNX2X_ERROR("NVRAM config error. "
9546                                     "Invalid link_config 0x%x"
9547                                     "  speed_cap_mask 0x%x\n",
9548                                     bp->port.link_config,
9549                                     bp->link_params.speed_cap_mask);
9550                         return;
9551                 }
9552                 break;
9553
9554         case PORT_FEATURE_LINK_SPEED_100M_FULL:
9555                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
9556                         bp->link_params.req_line_speed = SPEED_100;
9557                         bp->port.advertising = (ADVERTISED_100baseT_Full |
9558                                                 ADVERTISED_TP);
9559                 } else {
9560                         BNX2X_ERROR("NVRAM config error. "
9561                                     "Invalid link_config 0x%x"
9562                                     "  speed_cap_mask 0x%x\n",
9563                                     bp->port.link_config,
9564                                     bp->link_params.speed_cap_mask);
9565                         return;
9566                 }
9567                 break;
9568
9569         case PORT_FEATURE_LINK_SPEED_100M_HALF:
9570                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
9571                         bp->link_params.req_line_speed = SPEED_100;
9572                         bp->link_params.req_duplex = DUPLEX_HALF;
9573                         bp->port.advertising = (ADVERTISED_100baseT_Half |
9574                                                 ADVERTISED_TP);
9575                 } else {
9576                         BNX2X_ERROR("NVRAM config error. "
9577                                     "Invalid link_config 0x%x"
9578                                     "  speed_cap_mask 0x%x\n",
9579                                     bp->port.link_config,
9580                                     bp->link_params.speed_cap_mask);
9581                         return;
9582                 }
9583                 break;
9584
9585         case PORT_FEATURE_LINK_SPEED_1G:
9586                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
9587                         bp->link_params.req_line_speed = SPEED_1000;
9588                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
9589                                                 ADVERTISED_TP);
9590                 } else {
9591                         BNX2X_ERROR("NVRAM config error. "
9592                                     "Invalid link_config 0x%x"
9593                                     "  speed_cap_mask 0x%x\n",
9594                                     bp->port.link_config,
9595                                     bp->link_params.speed_cap_mask);
9596                         return;
9597                 }
9598                 break;
9599
9600         case PORT_FEATURE_LINK_SPEED_2_5G:
9601                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
9602                         bp->link_params.req_line_speed = SPEED_2500;
9603                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
9604                                                 ADVERTISED_TP);
9605                 } else {
9606                         BNX2X_ERROR("NVRAM config error. "
9607                                     "Invalid link_config 0x%x"
9608                                     "  speed_cap_mask 0x%x\n",
9609                                     bp->port.link_config,
9610                                     bp->link_params.speed_cap_mask);
9611                         return;
9612                 }
9613                 break;
9614
9615         case PORT_FEATURE_LINK_SPEED_10G_CX4:
9616         case PORT_FEATURE_LINK_SPEED_10G_KX4:
9617         case PORT_FEATURE_LINK_SPEED_10G_KR:
9618                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
9619                         bp->link_params.req_line_speed = SPEED_10000;
9620                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
9621                                                 ADVERTISED_FIBRE);
9622                 } else {
9623                         BNX2X_ERROR("NVRAM config error. "
9624                                     "Invalid link_config 0x%x"
9625                                     "  speed_cap_mask 0x%x\n",
9626                                     bp->port.link_config,
9627                                     bp->link_params.speed_cap_mask);
9628                         return;
9629                 }
9630                 break;
9631
9632         default:
9633                 BNX2X_ERROR("NVRAM config error. "
9634                             "BAD link speed link_config 0x%x\n",
9635                             bp->port.link_config);
9636                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9637                 bp->port.advertising = bp->port.supported;
9638                 break;
9639         }
9640
9641         bp->link_params.req_flow_ctrl = (bp->port.link_config &
9642                                          PORT_FEATURE_FLOW_CONTROL_MASK);
9643         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
9644             !(bp->port.supported & SUPPORTED_Autoneg))
9645                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9646
9647         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
9648                        "  advertising 0x%x\n",
9649                        bp->link_params.req_line_speed,
9650                        bp->link_params.req_duplex,
9651                        bp->link_params.req_flow_ctrl, bp->port.advertising);
9652 }
9653
9654 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
9655 {
9656         mac_hi = cpu_to_be16(mac_hi);
9657         mac_lo = cpu_to_be32(mac_lo);
9658         memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
9659         memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
9660 }
9661
9662 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
9663 {
9664         int port = BP_PORT(bp);
9665         u32 val, val2;
9666         u32 config;
9667         u16 i;
9668         u32 ext_phy_type;
9669
9670         bp->link_params.bp = bp;
9671         bp->link_params.port = port;
9672
9673         bp->link_params.lane_config =
9674                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
9675         bp->link_params.ext_phy_config =
9676                 SHMEM_RD(bp,
9677                          dev_info.port_hw_config[port].external_phy_config);
9678         /* BCM8727_NOC => BCM8727 no over current */
9679         if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9680             PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
9681                 bp->link_params.ext_phy_config &=
9682                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
9683                 bp->link_params.ext_phy_config |=
9684                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
9685                 bp->link_params.feature_config_flags |=
9686                         FEATURE_CONFIG_BCM8727_NOC;
9687         }
9688
9689         bp->link_params.speed_cap_mask =
9690                 SHMEM_RD(bp,
9691                          dev_info.port_hw_config[port].speed_capability_mask);
9692
9693         bp->port.link_config =
9694                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
9695
9696         /* Get the 4 lanes xgxs config rx and tx */
9697         for (i = 0; i < 2; i++) {
9698                 val = SHMEM_RD(bp,
9699                            dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
9700                 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
9701                 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
9702
9703                 val = SHMEM_RD(bp,
9704                            dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
9705                 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
9706                 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
9707         }
9708
9709         /* If the device is capable of WoL, set the default state according
9710          * to the HW
9711          */
9712         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
9713         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
9714                    (config & PORT_FEATURE_WOL_ENABLED));
9715
9716         BNX2X_DEV_INFO("lane_config 0x%08x  ext_phy_config 0x%08x"
9717                        "  speed_cap_mask 0x%08x  link_config 0x%08x\n",
9718                        bp->link_params.lane_config,
9719                        bp->link_params.ext_phy_config,
9720                        bp->link_params.speed_cap_mask, bp->port.link_config);
9721
9722         bp->link_params.switch_cfg |= (bp->port.link_config &
9723                                        PORT_FEATURE_CONNECTED_SWITCH_MASK);
9724         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
9725
9726         bnx2x_link_settings_requested(bp);
9727
9728         /*
9729          * If connected directly, work with the internal PHY, otherwise, work
9730          * with the external PHY
9731          */
9732         ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9733         if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
9734                 bp->mdio.prtad = bp->link_params.phy_addr;
9735
9736         else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
9737                  (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
9738                 bp->mdio.prtad =
9739                         XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
9740
9741         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
9742         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
9743         bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
9744         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
9745         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
9746
9747 #ifdef BCM_CNIC
9748         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
9749         val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
9750         bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
9751 #endif
9752 }
9753
9754 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9755 {
9756         int func = BP_FUNC(bp);
9757         u32 val, val2;
9758         int rc = 0;
9759
9760         bnx2x_get_common_hwinfo(bp);
9761
9762         bp->e1hov = 0;
9763         bp->e1hmf = 0;
9764         if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
9765                 bp->mf_config =
9766                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
9767
9768                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
9769                        FUNC_MF_CFG_E1HOV_TAG_MASK);
9770                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
9771                         bp->e1hmf = 1;
9772                 BNX2X_DEV_INFO("%s function mode\n",
9773                                IS_E1HMF(bp) ? "multi" : "single");
9774
9775                 if (IS_E1HMF(bp)) {
9776                         val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
9777                                                                 e1hov_tag) &
9778                                FUNC_MF_CFG_E1HOV_TAG_MASK);
9779                         if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
9780                                 bp->e1hov = val;
9781                                 BNX2X_DEV_INFO("E1HOV for func %d is %d "
9782                                                "(0x%04x)\n",
9783                                                func, bp->e1hov, bp->e1hov);
9784                         } else {
9785                                 BNX2X_ERROR("No valid E1HOV for func %d,"
9786                                             "  aborting\n", func);
9787                                 rc = -EPERM;
9788                         }
9789                 } else {
9790                         if (BP_E1HVN(bp)) {
9791                                 BNX2X_ERROR("VN %d in single function mode,"
9792                                             "  aborting\n", BP_E1HVN(bp));
9793                                 rc = -EPERM;
9794                         }
9795                 }
9796         }
9797
9798         if (!BP_NOMCP(bp)) {
9799                 bnx2x_get_port_hwinfo(bp);
9800
9801                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
9802                               DRV_MSG_SEQ_NUMBER_MASK);
9803                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9804         }
9805
9806         if (IS_E1HMF(bp)) {
9807                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
9808                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
9809                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
9810                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
9811                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
9812                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
9813                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
9814                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
9815                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
9816                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
9817                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
9818                                ETH_ALEN);
9819                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
9820                                ETH_ALEN);
9821                 }
9822
9823                 return rc;
9824         }
9825
9826         if (BP_NOMCP(bp)) {
9827                 /* only supposed to happen on emulation/FPGA */
9828                 BNX2X_ERROR("warning: random MAC workaround active\n");
9829                 random_ether_addr(bp->dev->dev_addr);
9830                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
9831         }
9832
9833         return rc;
9834 }
9835
9836 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
9837 {
9838         int cnt, i, block_end, rodi;
9839         char vpd_data[BNX2X_VPD_LEN+1];
9840         char str_id_reg[VENDOR_ID_LEN+1];
9841         char str_id_cap[VENDOR_ID_LEN+1];
9842         u8 len;
9843
9844         cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
9845         memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
9846
9847         if (cnt < BNX2X_VPD_LEN)
9848                 goto out_not_found;
9849
9850         i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
9851                              PCI_VPD_LRDT_RO_DATA);
9852         if (i < 0)
9853                 goto out_not_found;
9854
9855
9856         block_end = i + PCI_VPD_LRDT_TAG_SIZE +
9857                     pci_vpd_lrdt_size(&vpd_data[i]);
9858
9859         i += PCI_VPD_LRDT_TAG_SIZE;
9860
9861         if (block_end > BNX2X_VPD_LEN)
9862                 goto out_not_found;
9863
9864         rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9865                                    PCI_VPD_RO_KEYWORD_MFR_ID);
9866         if (rodi < 0)
9867                 goto out_not_found;
9868
9869         len = pci_vpd_info_field_size(&vpd_data[rodi]);
9870
9871         if (len != VENDOR_ID_LEN)
9872                 goto out_not_found;
9873
9874         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9875
9876         /* vendor specific info */
9877         snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
9878         snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
9879         if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
9880             !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
9881
9882                 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9883                                                 PCI_VPD_RO_KEYWORD_VENDOR0);
9884                 if (rodi >= 0) {
9885                         len = pci_vpd_info_field_size(&vpd_data[rodi]);
9886
9887                         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9888
9889                         if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
9890                                 memcpy(bp->fw_ver, &vpd_data[rodi], len);
9891                                 bp->fw_ver[len] = ' ';
9892                         }
9893                 }
9894                 return;
9895         }
9896 out_not_found:
9897         return;
9898 }
9899
9900 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
9901 {
9902         int func = BP_FUNC(bp);
9903         int timer_interval;
9904         int rc;
9905
9906         /* Disable interrupt handling until HW is initialized */
9907         atomic_set(&bp->intr_sem, 1);
9908         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
9909
9910         mutex_init(&bp->port.phy_mutex);
9911         mutex_init(&bp->fw_mb_mutex);
9912 #ifdef BCM_CNIC
9913         mutex_init(&bp->cnic_mutex);
9914 #endif
9915
9916         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
9917         INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
9918
9919         rc = bnx2x_get_hwinfo(bp);
9920
9921         bnx2x_read_fwinfo(bp);
9922         /* need to reset chip if undi was active */
9923         if (!BP_NOMCP(bp))
9924                 bnx2x_undi_unload(bp);
9925
9926         if (CHIP_REV_IS_FPGA(bp))
9927                 dev_err(&bp->pdev->dev, "FPGA detected\n");
9928
9929         if (BP_NOMCP(bp) && (func == 0))
9930                 dev_err(&bp->pdev->dev, "MCP disabled, "
9931                                         "must load devices in order!\n");
9932
9933         /* Set multi queue mode */
9934         if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
9935             ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
9936                 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
9937                                         "requested is not MSI-X\n");
9938                 multi_mode = ETH_RSS_MODE_DISABLED;
9939         }
9940         bp->multi_mode = multi_mode;
9941
9942
9943         bp->dev->features |= NETIF_F_GRO;
9944
9945         /* Set TPA flags */
9946         if (disable_tpa) {
9947                 bp->flags &= ~TPA_ENABLE_FLAG;
9948                 bp->dev->features &= ~NETIF_F_LRO;
9949         } else {
9950                 bp->flags |= TPA_ENABLE_FLAG;
9951                 bp->dev->features |= NETIF_F_LRO;
9952         }
9953
9954         if (CHIP_IS_E1(bp))
9955                 bp->dropless_fc = 0;
9956         else
9957                 bp->dropless_fc = dropless_fc;
9958
9959         bp->mrrs = mrrs;
9960
9961         bp->tx_ring_size = MAX_TX_AVAIL;
9962         bp->rx_ring_size = MAX_RX_AVAIL;
9963
9964         bp->rx_csum = 1;
9965
9966         /* make sure that the numbers are in the right granularity */
9967         bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
9968         bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
9969
9970         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
9971         bp->current_interval = (poll ? poll : timer_interval);
9972
9973         init_timer(&bp->timer);
9974         bp->timer.expires = jiffies + bp->current_interval;
9975         bp->timer.data = (unsigned long) bp;
9976         bp->timer.function = bnx2x_timer;
9977
9978         return rc;
9979 }
9980
9981 /*
9982  * ethtool service functions
9983  */
9984
9985 /* All ethtool functions called with rtnl_lock */
9986
9987 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9988 {
9989         struct bnx2x *bp = netdev_priv(dev);
9990
9991         cmd->supported = bp->port.supported;
9992         cmd->advertising = bp->port.advertising;
9993
9994         if ((bp->state == BNX2X_STATE_OPEN) &&
9995             !(bp->flags & MF_FUNC_DIS) &&
9996             (bp->link_vars.link_up)) {
9997                 cmd->speed = bp->link_vars.line_speed;
9998                 cmd->duplex = bp->link_vars.duplex;
9999                 if (IS_E1HMF(bp)) {
10000                         u16 vn_max_rate;
10001
10002                         vn_max_rate =
10003                                 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
10004                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
10005                         if (vn_max_rate < cmd->speed)
10006                                 cmd->speed = vn_max_rate;
10007                 }
10008         } else {
10009                 cmd->speed = -1;
10010                 cmd->duplex = -1;
10011         }
10012
10013         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
10014                 u32 ext_phy_type =
10015                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
10016
10017                 switch (ext_phy_type) {
10018                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
10019                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
10020                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
10021                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
10022                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
10023                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
10024                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
10025                         cmd->port = PORT_FIBRE;
10026                         break;
10027
10028                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
10029                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
10030                         cmd->port = PORT_TP;
10031                         break;
10032
10033                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
10034                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
10035                                   bp->link_params.ext_phy_config);
10036                         break;
10037
10038                 default:
10039                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
10040                            bp->link_params.ext_phy_config);
10041                         break;
10042                 }
10043         } else
10044                 cmd->port = PORT_TP;
10045
10046         cmd->phy_address = bp->mdio.prtad;
10047         cmd->transceiver = XCVR_INTERNAL;
10048
10049         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
10050                 cmd->autoneg = AUTONEG_ENABLE;
10051         else
10052                 cmd->autoneg = AUTONEG_DISABLE;
10053
10054         cmd->maxtxpkt = 0;
10055         cmd->maxrxpkt = 0;
10056
10057         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
10058            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
10059            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
10060            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
10061            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
10062            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
10063            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
10064
10065         return 0;
10066 }
10067
10068 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10069 {
10070         struct bnx2x *bp = netdev_priv(dev);
10071         u32 advertising;
10072
10073         if (IS_E1HMF(bp))
10074                 return 0;
10075
10076         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
10077            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
10078            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
10079            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
10080            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
10081            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
10082            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
10083
10084         if (cmd->autoneg == AUTONEG_ENABLE) {
10085                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
10086                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
10087                         return -EINVAL;
10088                 }
10089
10090                 /* advertise the requested speed and duplex if supported */
10091                 cmd->advertising &= bp->port.supported;
10092
10093                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
10094                 bp->link_params.req_duplex = DUPLEX_FULL;
10095                 bp->port.advertising |= (ADVERTISED_Autoneg |
10096                                          cmd->advertising);
10097
10098         } else { /* forced speed */
10099                 /* advertise the requested speed and duplex if supported */
10100                 switch (cmd->speed) {
10101                 case SPEED_10:
10102                         if (cmd->duplex == DUPLEX_FULL) {
10103                                 if (!(bp->port.supported &
10104                                       SUPPORTED_10baseT_Full)) {
10105                                         DP(NETIF_MSG_LINK,
10106                                            "10M full not supported\n");
10107                                         return -EINVAL;
10108                                 }
10109
10110                                 advertising = (ADVERTISED_10baseT_Full |
10111                                                ADVERTISED_TP);
10112                         } else {
10113                                 if (!(bp->port.supported &
10114                                       SUPPORTED_10baseT_Half)) {
10115                                         DP(NETIF_MSG_LINK,
10116                                            "10M half not supported\n");
10117                                         return -EINVAL;
10118                                 }
10119
10120                                 advertising = (ADVERTISED_10baseT_Half |
10121                                                ADVERTISED_TP);
10122                         }
10123                         break;
10124
10125                 case SPEED_100:
10126                         if (cmd->duplex == DUPLEX_FULL) {
10127                                 if (!(bp->port.supported &
10128                                                 SUPPORTED_100baseT_Full)) {
10129                                         DP(NETIF_MSG_LINK,
10130                                            "100M full not supported\n");
10131                                         return -EINVAL;
10132                                 }
10133
10134                                 advertising = (ADVERTISED_100baseT_Full |
10135                                                ADVERTISED_TP);
10136                         } else {
10137                                 if (!(bp->port.supported &
10138                                                 SUPPORTED_100baseT_Half)) {
10139                                         DP(NETIF_MSG_LINK,
10140                                            "100M half not supported\n");
10141                                         return -EINVAL;
10142                                 }
10143
10144                                 advertising = (ADVERTISED_100baseT_Half |
10145                                                ADVERTISED_TP);
10146                         }
10147                         break;
10148
10149                 case SPEED_1000:
10150                         if (cmd->duplex != DUPLEX_FULL) {
10151                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
10152                                 return -EINVAL;
10153                         }
10154
10155                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
10156                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
10157                                 return -EINVAL;
10158                         }
10159
10160                         advertising = (ADVERTISED_1000baseT_Full |
10161                                        ADVERTISED_TP);
10162                         break;
10163
10164                 case SPEED_2500:
10165                         if (cmd->duplex != DUPLEX_FULL) {
10166                                 DP(NETIF_MSG_LINK,
10167                                    "2.5G half not supported\n");
10168                                 return -EINVAL;
10169                         }
10170
10171                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
10172                                 DP(NETIF_MSG_LINK,
10173                                    "2.5G full not supported\n");
10174                                 return -EINVAL;
10175                         }
10176
10177                         advertising = (ADVERTISED_2500baseX_Full |
10178                                        ADVERTISED_TP);
10179                         break;
10180
10181                 case SPEED_10000:
10182                         if (cmd->duplex != DUPLEX_FULL) {
10183                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
10184                                 return -EINVAL;
10185                         }
10186
10187                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
10188                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
10189                                 return -EINVAL;
10190                         }
10191
10192                         advertising = (ADVERTISED_10000baseT_Full |
10193                                        ADVERTISED_FIBRE);
10194                         break;
10195
10196                 default:
10197                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
10198                         return -EINVAL;
10199                 }
10200
10201                 bp->link_params.req_line_speed = cmd->speed;
10202                 bp->link_params.req_duplex = cmd->duplex;
10203                 bp->port.advertising = advertising;
10204         }
10205
10206         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
10207            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
10208            bp->link_params.req_line_speed, bp->link_params.req_duplex,
10209            bp->port.advertising);
10210
10211         if (netif_running(dev)) {
10212                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10213                 bnx2x_link_set(bp);
10214         }
10215
10216         return 0;
10217 }
10218
10219 #define IS_E1_ONLINE(info)      (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
10220 #define IS_E1H_ONLINE(info)     (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
10221
10222 static int bnx2x_get_regs_len(struct net_device *dev)
10223 {
10224         struct bnx2x *bp = netdev_priv(dev);
10225         int regdump_len = 0;
10226         int i;
10227
10228         if (CHIP_IS_E1(bp)) {
10229                 for (i = 0; i < REGS_COUNT; i++)
10230                         if (IS_E1_ONLINE(reg_addrs[i].info))
10231                                 regdump_len += reg_addrs[i].size;
10232
10233                 for (i = 0; i < WREGS_COUNT_E1; i++)
10234                         if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
10235                                 regdump_len += wreg_addrs_e1[i].size *
10236                                         (1 + wreg_addrs_e1[i].read_regs_count);
10237
10238         } else { /* E1H */
10239                 for (i = 0; i < REGS_COUNT; i++)
10240                         if (IS_E1H_ONLINE(reg_addrs[i].info))
10241                                 regdump_len += reg_addrs[i].size;
10242
10243                 for (i = 0; i < WREGS_COUNT_E1H; i++)
10244                         if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
10245                                 regdump_len += wreg_addrs_e1h[i].size *
10246                                         (1 + wreg_addrs_e1h[i].read_regs_count);
10247         }
10248         regdump_len *= 4;
10249         regdump_len += sizeof(struct dump_hdr);
10250
10251         return regdump_len;
10252 }
10253
10254 static void bnx2x_get_regs(struct net_device *dev,
10255                            struct ethtool_regs *regs, void *_p)
10256 {
10257         u32 *p = _p, i, j;
10258         struct bnx2x *bp = netdev_priv(dev);
10259         struct dump_hdr dump_hdr = {0};
10260
10261         regs->version = 0;
10262         memset(p, 0, regs->len);
10263
10264         if (!netif_running(bp->dev))
10265                 return;
10266
10267         dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
10268         dump_hdr.dump_sign = dump_sign_all;
10269         dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
10270         dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
10271         dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
10272         dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
10273         dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
10274
10275         memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
10276         p += dump_hdr.hdr_size + 1;
10277
10278         if (CHIP_IS_E1(bp)) {
10279                 for (i = 0; i < REGS_COUNT; i++)
10280                         if (IS_E1_ONLINE(reg_addrs[i].info))
10281                                 for (j = 0; j < reg_addrs[i].size; j++)
10282                                         *p++ = REG_RD(bp,
10283                                                       reg_addrs[i].addr + j*4);
10284
10285         } else { /* E1H */
10286                 for (i = 0; i < REGS_COUNT; i++)
10287                         if (IS_E1H_ONLINE(reg_addrs[i].info))
10288                                 for (j = 0; j < reg_addrs[i].size; j++)
10289                                         *p++ = REG_RD(bp,
10290                                                       reg_addrs[i].addr + j*4);
10291         }
10292 }
10293
10294 #define PHY_FW_VER_LEN                  10
10295
10296 static void bnx2x_get_drvinfo(struct net_device *dev,
10297                               struct ethtool_drvinfo *info)
10298 {
10299         struct bnx2x *bp = netdev_priv(dev);
10300         u8 phy_fw_ver[PHY_FW_VER_LEN];
10301
10302         strcpy(info->driver, DRV_MODULE_NAME);
10303         strcpy(info->version, DRV_MODULE_VERSION);
10304
10305         phy_fw_ver[0] = '\0';
10306         if (bp->port.pmf) {
10307                 bnx2x_acquire_phy_lock(bp);
10308                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
10309                                              (bp->state != BNX2X_STATE_CLOSED),
10310                                              phy_fw_ver, PHY_FW_VER_LEN);
10311                 bnx2x_release_phy_lock(bp);
10312         }
10313
10314         strncpy(info->fw_version, bp->fw_ver, 32);
10315         snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
10316                  "bc %d.%d.%d%s%s",
10317                  (bp->common.bc_ver & 0xff0000) >> 16,
10318                  (bp->common.bc_ver & 0xff00) >> 8,
10319                  (bp->common.bc_ver & 0xff),
10320                  ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
10321         strcpy(info->bus_info, pci_name(bp->pdev));
10322         info->n_stats = BNX2X_NUM_STATS;
10323         info->testinfo_len = BNX2X_NUM_TESTS;
10324         info->eedump_len = bp->common.flash_size;
10325         info->regdump_len = bnx2x_get_regs_len(dev);
10326 }
10327
10328 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10329 {
10330         struct bnx2x *bp = netdev_priv(dev);
10331
10332         if (bp->flags & NO_WOL_FLAG) {
10333                 wol->supported = 0;
10334                 wol->wolopts = 0;
10335         } else {
10336                 wol->supported = WAKE_MAGIC;
10337                 if (bp->wol)
10338                         wol->wolopts = WAKE_MAGIC;
10339                 else
10340                         wol->wolopts = 0;
10341         }
10342         memset(&wol->sopass, 0, sizeof(wol->sopass));
10343 }
10344
10345 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10346 {
10347         struct bnx2x *bp = netdev_priv(dev);
10348
10349         if (wol->wolopts & ~WAKE_MAGIC)
10350                 return -EINVAL;
10351
10352         if (wol->wolopts & WAKE_MAGIC) {
10353                 if (bp->flags & NO_WOL_FLAG)
10354                         return -EINVAL;
10355
10356                 bp->wol = 1;
10357         } else
10358                 bp->wol = 0;
10359
10360         return 0;
10361 }
10362
10363 static u32 bnx2x_get_msglevel(struct net_device *dev)
10364 {
10365         struct bnx2x *bp = netdev_priv(dev);
10366
10367         return bp->msg_enable;
10368 }
10369
10370 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
10371 {
10372         struct bnx2x *bp = netdev_priv(dev);
10373
10374         if (capable(CAP_NET_ADMIN))
10375                 bp->msg_enable = level;
10376 }
10377
10378 static int bnx2x_nway_reset(struct net_device *dev)
10379 {
10380         struct bnx2x *bp = netdev_priv(dev);
10381
10382         if (!bp->port.pmf)
10383                 return 0;
10384
10385         if (netif_running(dev)) {
10386                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10387                 bnx2x_link_set(bp);
10388         }
10389
10390         return 0;
10391 }
10392
10393 static u32 bnx2x_get_link(struct net_device *dev)
10394 {
10395         struct bnx2x *bp = netdev_priv(dev);
10396
10397         if (bp->flags & MF_FUNC_DIS)
10398                 return 0;
10399
10400         return bp->link_vars.link_up;
10401 }
10402
10403 static int bnx2x_get_eeprom_len(struct net_device *dev)
10404 {
10405         struct bnx2x *bp = netdev_priv(dev);
10406
10407         return bp->common.flash_size;
10408 }
10409
10410 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
10411 {
10412         int port = BP_PORT(bp);
10413         int count, i;
10414         u32 val = 0;
10415
10416         /* adjust timeout for emulation/FPGA */
10417         count = NVRAM_TIMEOUT_COUNT;
10418         if (CHIP_REV_IS_SLOW(bp))
10419                 count *= 100;
10420
10421         /* request access to nvram interface */
10422         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10423                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
10424
10425         for (i = 0; i < count*10; i++) {
10426                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10427                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
10428                         break;
10429
10430                 udelay(5);
10431         }
10432
10433         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
10434                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
10435                 return -EBUSY;
10436         }
10437
10438         return 0;
10439 }
10440
10441 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
10442 {
10443         int port = BP_PORT(bp);
10444         int count, i;
10445         u32 val = 0;
10446
10447         /* adjust timeout for emulation/FPGA */
10448         count = NVRAM_TIMEOUT_COUNT;
10449         if (CHIP_REV_IS_SLOW(bp))
10450                 count *= 100;
10451
10452         /* relinquish nvram interface */
10453         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10454                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
10455
10456         for (i = 0; i < count*10; i++) {
10457                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10458                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
10459                         break;
10460
10461                 udelay(5);
10462         }
10463
10464         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
10465                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
10466                 return -EBUSY;
10467         }
10468
10469         return 0;
10470 }
10471
10472 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
10473 {
10474         u32 val;
10475
10476         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10477
10478         /* enable both bits, even on read */
10479         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10480                (val | MCPR_NVM_ACCESS_ENABLE_EN |
10481                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
10482 }
10483
10484 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
10485 {
10486         u32 val;
10487
10488         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10489
10490         /* disable both bits, even after read */
10491         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10492                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
10493                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
10494 }
10495
10496 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
10497                                   u32 cmd_flags)
10498 {
10499         int count, i, rc;
10500         u32 val;
10501
10502         /* build the command word */
10503         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
10504
10505         /* need to clear DONE bit separately */
10506         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10507
10508         /* address of the NVRAM to read from */
10509         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10510                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10511
10512         /* issue a read command */
10513         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10514
10515         /* adjust timeout for emulation/FPGA */
10516         count = NVRAM_TIMEOUT_COUNT;
10517         if (CHIP_REV_IS_SLOW(bp))
10518                 count *= 100;
10519
10520         /* wait for completion */
10521         *ret_val = 0;
10522         rc = -EBUSY;
10523         for (i = 0; i < count; i++) {
10524                 udelay(5);
10525                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10526
10527                 if (val & MCPR_NVM_COMMAND_DONE) {
10528                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
10529                         /* we read nvram data in cpu order
10530                          * but ethtool sees it as an array of bytes
10531                          * converting to big-endian will do the work */
10532                         *ret_val = cpu_to_be32(val);
10533                         rc = 0;
10534                         break;
10535                 }
10536         }
10537
10538         return rc;
10539 }
10540
10541 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
10542                             int buf_size)
10543 {
10544         int rc;
10545         u32 cmd_flags;
10546         __be32 val;
10547
10548         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
10549                 DP(BNX2X_MSG_NVM,
10550                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
10551                    offset, buf_size);
10552                 return -EINVAL;
10553         }
10554
10555         if (offset + buf_size > bp->common.flash_size) {
10556                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10557                                   " buf_size (0x%x) > flash_size (0x%x)\n",
10558                    offset, buf_size, bp->common.flash_size);
10559                 return -EINVAL;
10560         }
10561
10562         /* request access to nvram interface */
10563         rc = bnx2x_acquire_nvram_lock(bp);
10564         if (rc)
10565                 return rc;
10566
10567         /* enable access to nvram interface */
10568         bnx2x_enable_nvram_access(bp);
10569
10570         /* read the first word(s) */
10571         cmd_flags = MCPR_NVM_COMMAND_FIRST;
10572         while ((buf_size > sizeof(u32)) && (rc == 0)) {
10573                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10574                 memcpy(ret_buf, &val, 4);
10575
10576                 /* advance to the next dword */
10577                 offset += sizeof(u32);
10578                 ret_buf += sizeof(u32);
10579                 buf_size -= sizeof(u32);
10580                 cmd_flags = 0;
10581         }
10582
10583         if (rc == 0) {
10584                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10585                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10586                 memcpy(ret_buf, &val, 4);
10587         }
10588
10589         /* disable access to nvram interface */
10590         bnx2x_disable_nvram_access(bp);
10591         bnx2x_release_nvram_lock(bp);
10592
10593         return rc;
10594 }
10595
10596 static int bnx2x_get_eeprom(struct net_device *dev,
10597                             struct ethtool_eeprom *eeprom, u8 *eebuf)
10598 {
10599         struct bnx2x *bp = netdev_priv(dev);
10600         int rc;
10601
10602         if (!netif_running(dev))
10603                 return -EAGAIN;
10604
10605         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
10606            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
10607            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10608            eeprom->len, eeprom->len);
10609
10610         /* parameters already validated in ethtool_get_eeprom */
10611
10612         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
10613
10614         return rc;
10615 }
10616
10617 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
10618                                    u32 cmd_flags)
10619 {
10620         int count, i, rc;
10621
10622         /* build the command word */
10623         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
10624
10625         /* need to clear DONE bit separately */
10626         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10627
10628         /* write the data */
10629         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
10630
10631         /* address of the NVRAM to write to */
10632         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10633                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10634
10635         /* issue the write command */
10636         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10637
10638         /* adjust timeout for emulation/FPGA */
10639         count = NVRAM_TIMEOUT_COUNT;
10640         if (CHIP_REV_IS_SLOW(bp))
10641                 count *= 100;
10642
10643         /* wait for completion */
10644         rc = -EBUSY;
10645         for (i = 0; i < count; i++) {
10646                 udelay(5);
10647                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10648                 if (val & MCPR_NVM_COMMAND_DONE) {
10649                         rc = 0;
10650                         break;
10651                 }
10652         }
10653
10654         return rc;
10655 }
10656
10657 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
10658
10659 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
10660                               int buf_size)
10661 {
10662         int rc;
10663         u32 cmd_flags;
10664         u32 align_offset;
10665         __be32 val;
10666
10667         if (offset + buf_size > bp->common.flash_size) {
10668                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10669                                   " buf_size (0x%x) > flash_size (0x%x)\n",
10670                    offset, buf_size, bp->common.flash_size);
10671                 return -EINVAL;
10672         }
10673
10674         /* request access to nvram interface */
10675         rc = bnx2x_acquire_nvram_lock(bp);
10676         if (rc)
10677                 return rc;
10678
10679         /* enable access to nvram interface */
10680         bnx2x_enable_nvram_access(bp);
10681
10682         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
10683         align_offset = (offset & ~0x03);
10684         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
10685
10686         if (rc == 0) {
10687                 val &= ~(0xff << BYTE_OFFSET(offset));
10688                 val |= (*data_buf << BYTE_OFFSET(offset));
10689
10690                 /* nvram data is returned as an array of bytes
10691                  * convert it back to cpu order */
10692                 val = be32_to_cpu(val);
10693
10694                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
10695                                              cmd_flags);
10696         }
10697
10698         /* disable access to nvram interface */
10699         bnx2x_disable_nvram_access(bp);
10700         bnx2x_release_nvram_lock(bp);
10701
10702         return rc;
10703 }
10704
10705 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
10706                              int buf_size)
10707 {
10708         int rc;
10709         u32 cmd_flags;
10710         u32 val;
10711         u32 written_so_far;
10712
10713         if (buf_size == 1)      /* ethtool */
10714                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
10715
10716         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
10717                 DP(BNX2X_MSG_NVM,
10718                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
10719                    offset, buf_size);
10720                 return -EINVAL;
10721         }
10722
10723         if (offset + buf_size > bp->common.flash_size) {
10724                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10725                                   " buf_size (0x%x) > flash_size (0x%x)\n",
10726                    offset, buf_size, bp->common.flash_size);
10727                 return -EINVAL;
10728         }
10729
10730         /* request access to nvram interface */
10731         rc = bnx2x_acquire_nvram_lock(bp);
10732         if (rc)
10733                 return rc;
10734
10735         /* enable access to nvram interface */
10736         bnx2x_enable_nvram_access(bp);
10737
10738         written_so_far = 0;
10739         cmd_flags = MCPR_NVM_COMMAND_FIRST;
10740         while ((written_so_far < buf_size) && (rc == 0)) {
10741                 if (written_so_far == (buf_size - sizeof(u32)))
10742                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
10743                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
10744                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
10745                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
10746                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
10747
10748                 memcpy(&val, data_buf, 4);
10749
10750                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
10751
10752                 /* advance to the next dword */
10753                 offset += sizeof(u32);
10754                 data_buf += sizeof(u32);
10755                 written_so_far += sizeof(u32);
10756                 cmd_flags = 0;
10757         }
10758
10759         /* disable access to nvram interface */
10760         bnx2x_disable_nvram_access(bp);
10761         bnx2x_release_nvram_lock(bp);
10762
10763         return rc;
10764 }
10765
10766 static int bnx2x_set_eeprom(struct net_device *dev,
10767                             struct ethtool_eeprom *eeprom, u8 *eebuf)
10768 {
10769         struct bnx2x *bp = netdev_priv(dev);
10770         int port = BP_PORT(bp);
10771         int rc = 0;
10772
10773         if (!netif_running(dev))
10774                 return -EAGAIN;
10775
10776         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
10777            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
10778            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10779            eeprom->len, eeprom->len);
10780
10781         /* parameters already validated in ethtool_set_eeprom */
10782
10783         /* PHY eeprom can be accessed only by the PMF */
10784         if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
10785             !bp->port.pmf)
10786                 return -EINVAL;
10787
10788         if (eeprom->magic == 0x50485950) {
10789                 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
10790                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10791
10792                 bnx2x_acquire_phy_lock(bp);
10793                 rc |= bnx2x_link_reset(&bp->link_params,
10794                                        &bp->link_vars, 0);
10795                 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10796                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
10797                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10798                                        MISC_REGISTERS_GPIO_HIGH, port);
10799                 bnx2x_release_phy_lock(bp);
10800                 bnx2x_link_report(bp);
10801
10802         } else if (eeprom->magic == 0x50485952) {
10803                 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
10804                 if (bp->state == BNX2X_STATE_OPEN) {
10805                         bnx2x_acquire_phy_lock(bp);
10806                         rc |= bnx2x_link_reset(&bp->link_params,
10807                                                &bp->link_vars, 1);
10808
10809                         rc |= bnx2x_phy_init(&bp->link_params,
10810                                              &bp->link_vars);
10811                         bnx2x_release_phy_lock(bp);
10812                         bnx2x_calc_fc_adv(bp);
10813                 }
10814         } else if (eeprom->magic == 0x53985943) {
10815                 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
10816                 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10817                                        PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
10818                         u8 ext_phy_addr =
10819                              XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
10820
10821                         /* DSP Remove Download Mode */
10822                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10823                                        MISC_REGISTERS_GPIO_LOW, port);
10824
10825                         bnx2x_acquire_phy_lock(bp);
10826
10827                         bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
10828
10829                         /* wait 0.5 sec to allow it to run */
10830                         msleep(500);
10831                         bnx2x_ext_phy_hw_reset(bp, port);
10832                         msleep(500);
10833                         bnx2x_release_phy_lock(bp);
10834                 }
10835         } else
10836                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
10837
10838         return rc;
10839 }
10840
10841 static int bnx2x_get_coalesce(struct net_device *dev,
10842                               struct ethtool_coalesce *coal)
10843 {
10844         struct bnx2x *bp = netdev_priv(dev);
10845
10846         memset(coal, 0, sizeof(struct ethtool_coalesce));
10847
10848         coal->rx_coalesce_usecs = bp->rx_ticks;
10849         coal->tx_coalesce_usecs = bp->tx_ticks;
10850
10851         return 0;
10852 }
10853
10854 static int bnx2x_set_coalesce(struct net_device *dev,
10855                               struct ethtool_coalesce *coal)
10856 {
10857         struct bnx2x *bp = netdev_priv(dev);
10858
10859         bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
10860         if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
10861                 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
10862
10863         bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
10864         if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
10865                 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
10866
10867         if (netif_running(dev))
10868                 bnx2x_update_coalesce(bp);
10869
10870         return 0;
10871 }
10872
10873 static void bnx2x_get_ringparam(struct net_device *dev,
10874                                 struct ethtool_ringparam *ering)
10875 {
10876         struct bnx2x *bp = netdev_priv(dev);
10877
10878         ering->rx_max_pending = MAX_RX_AVAIL;
10879         ering->rx_mini_max_pending = 0;
10880         ering->rx_jumbo_max_pending = 0;
10881
10882         ering->rx_pending = bp->rx_ring_size;
10883         ering->rx_mini_pending = 0;
10884         ering->rx_jumbo_pending = 0;
10885
10886         ering->tx_max_pending = MAX_TX_AVAIL;
10887         ering->tx_pending = bp->tx_ring_size;
10888 }
10889
10890 static int bnx2x_set_ringparam(struct net_device *dev,
10891                                struct ethtool_ringparam *ering)
10892 {
10893         struct bnx2x *bp = netdev_priv(dev);
10894         int rc = 0;
10895
10896         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10897                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10898                 return -EAGAIN;
10899         }
10900
10901         if ((ering->rx_pending > MAX_RX_AVAIL) ||
10902             (ering->tx_pending > MAX_TX_AVAIL) ||
10903             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
10904                 return -EINVAL;
10905
10906         bp->rx_ring_size = ering->rx_pending;
10907         bp->tx_ring_size = ering->tx_pending;
10908
10909         if (netif_running(dev)) {
10910                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10911                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10912         }
10913
10914         return rc;
10915 }
10916
10917 static void bnx2x_get_pauseparam(struct net_device *dev,
10918                                  struct ethtool_pauseparam *epause)
10919 {
10920         struct bnx2x *bp = netdev_priv(dev);
10921
10922         epause->autoneg = (bp->link_params.req_flow_ctrl ==
10923                            BNX2X_FLOW_CTRL_AUTO) &&
10924                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
10925
10926         epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
10927                             BNX2X_FLOW_CTRL_RX);
10928         epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
10929                             BNX2X_FLOW_CTRL_TX);
10930
10931         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10932            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
10933            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10934 }
10935
10936 static int bnx2x_set_pauseparam(struct net_device *dev,
10937                                 struct ethtool_pauseparam *epause)
10938 {
10939         struct bnx2x *bp = netdev_priv(dev);
10940
10941         if (IS_E1HMF(bp))
10942                 return 0;
10943
10944         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10945            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
10946            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10947
10948         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
10949
10950         if (epause->rx_pause)
10951                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
10952
10953         if (epause->tx_pause)
10954                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
10955
10956         if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
10957                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
10958
10959         if (epause->autoneg) {
10960                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
10961                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
10962                         return -EINVAL;
10963                 }
10964
10965                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
10966                         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
10967         }
10968
10969         DP(NETIF_MSG_LINK,
10970            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
10971
10972         if (netif_running(dev)) {
10973                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10974                 bnx2x_link_set(bp);
10975         }
10976
10977         return 0;
10978 }
10979
10980 static int bnx2x_set_flags(struct net_device *dev, u32 data)
10981 {
10982         struct bnx2x *bp = netdev_priv(dev);
10983         int changed = 0;
10984         int rc = 0;
10985
10986         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10987                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10988                 return -EAGAIN;
10989         }
10990
10991         /* TPA requires Rx CSUM offloading */
10992         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
10993                 if (!disable_tpa) {
10994                         if (!(dev->features & NETIF_F_LRO)) {
10995                                 dev->features |= NETIF_F_LRO;
10996                                 bp->flags |= TPA_ENABLE_FLAG;
10997                                 changed = 1;
10998                         }
10999                 } else
11000                         rc = -EINVAL;
11001         } else if (dev->features & NETIF_F_LRO) {
11002                 dev->features &= ~NETIF_F_LRO;
11003                 bp->flags &= ~TPA_ENABLE_FLAG;
11004                 changed = 1;
11005         }
11006
11007         if (data & ETH_FLAG_RXHASH)
11008                 dev->features |= NETIF_F_RXHASH;
11009         else
11010                 dev->features &= ~NETIF_F_RXHASH;
11011
11012         if (changed && netif_running(dev)) {
11013                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11014                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11015         }
11016
11017         return rc;
11018 }
11019
11020 static u32 bnx2x_get_rx_csum(struct net_device *dev)
11021 {
11022         struct bnx2x *bp = netdev_priv(dev);
11023
11024         return bp->rx_csum;
11025 }
11026
11027 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
11028 {
11029         struct bnx2x *bp = netdev_priv(dev);
11030         int rc = 0;
11031
11032         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11033                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11034                 return -EAGAIN;
11035         }
11036
11037         bp->rx_csum = data;
11038
11039         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
11040            TPA'ed packets will be discarded due to wrong TCP CSUM */
11041         if (!data) {
11042                 u32 flags = ethtool_op_get_flags(dev);
11043
11044                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
11045         }
11046
11047         return rc;
11048 }
11049
11050 static int bnx2x_set_tso(struct net_device *dev, u32 data)
11051 {
11052         if (data) {
11053                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11054                 dev->features |= NETIF_F_TSO6;
11055         } else {
11056                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
11057                 dev->features &= ~NETIF_F_TSO6;
11058         }
11059
11060         return 0;
11061 }
11062
11063 static const struct {
11064         char string[ETH_GSTRING_LEN];
11065 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
11066         { "register_test (offline)" },
11067         { "memory_test (offline)" },
11068         { "loopback_test (offline)" },
11069         { "nvram_test (online)" },
11070         { "interrupt_test (online)" },
11071         { "link_test (online)" },
11072         { "idle check (online)" }
11073 };
11074
11075 static int bnx2x_test_registers(struct bnx2x *bp)
11076 {
11077         int idx, i, rc = -ENODEV;
11078         u32 wr_val = 0;
11079         int port = BP_PORT(bp);
11080         static const struct {
11081                 u32 offset0;
11082                 u32 offset1;
11083                 u32 mask;
11084         } reg_tbl[] = {
11085 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
11086                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
11087                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
11088                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
11089                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
11090                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
11091                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
11092                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
11093                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
11094                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
11095 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
11096                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
11097                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
11098                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
11099                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
11100                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
11101                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
11102                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
11103                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
11104                 { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
11105 /* 20 */        { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
11106                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
11107                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
11108                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
11109                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
11110                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
11111                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
11112                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
11113                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
11114                 { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
11115 /* 30 */        { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
11116                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
11117                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
11118                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
11119                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
11120                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
11121                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
11122
11123                 { 0xffffffff, 0, 0x00000000 }
11124         };
11125
11126         if (!netif_running(bp->dev))
11127                 return rc;
11128
11129         /* Repeat the test twice:
11130            First by writing 0x00000000, second by writing 0xffffffff */
11131         for (idx = 0; idx < 2; idx++) {
11132
11133                 switch (idx) {
11134                 case 0:
11135                         wr_val = 0;
11136                         break;
11137                 case 1:
11138                         wr_val = 0xffffffff;
11139                         break;
11140                 }
11141
11142                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
11143                         u32 offset, mask, save_val, val;
11144
11145                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
11146                         mask = reg_tbl[i].mask;
11147
11148                         save_val = REG_RD(bp, offset);
11149
11150                         REG_WR(bp, offset, (wr_val & mask));
11151                         val = REG_RD(bp, offset);
11152
11153                         /* Restore the original register's value */
11154                         REG_WR(bp, offset, save_val);
11155
11156                         /* verify value is as expected */
11157                         if ((val & mask) != (wr_val & mask)) {
11158                                 DP(NETIF_MSG_PROBE,
11159                                    "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
11160                                    offset, val, wr_val, mask);
11161                                 goto test_reg_exit;
11162                         }
11163                 }
11164         }
11165
11166         rc = 0;
11167
11168 test_reg_exit:
11169         return rc;
11170 }
11171
11172 static int bnx2x_test_memory(struct bnx2x *bp)
11173 {
11174         int i, j, rc = -ENODEV;
11175         u32 val;
11176         static const struct {
11177                 u32 offset;
11178                 int size;
11179         } mem_tbl[] = {
11180                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
11181                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
11182                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
11183                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
11184                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
11185                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
11186                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
11187
11188                 { 0xffffffff, 0 }
11189         };
11190         static const struct {
11191                 char *name;
11192                 u32 offset;
11193                 u32 e1_mask;
11194                 u32 e1h_mask;
11195         } prty_tbl[] = {
11196                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
11197                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
11198                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
11199                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
11200                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
11201                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
11202
11203                 { NULL, 0xffffffff, 0, 0 }
11204         };
11205
11206         if (!netif_running(bp->dev))
11207                 return rc;
11208
11209         /* Go through all the memories */
11210         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
11211                 for (j = 0; j < mem_tbl[i].size; j++)
11212                         REG_RD(bp, mem_tbl[i].offset + j*4);
11213
11214         /* Check the parity status */
11215         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
11216                 val = REG_RD(bp, prty_tbl[i].offset);
11217                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
11218                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
11219                         DP(NETIF_MSG_HW,
11220                            "%s is 0x%x\n", prty_tbl[i].name, val);
11221                         goto test_mem_exit;
11222                 }
11223         }
11224
11225         rc = 0;
11226
11227 test_mem_exit:
11228         return rc;
11229 }
11230
11231 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
11232 {
11233         int cnt = 1000;
11234
11235         if (link_up)
11236                 while (bnx2x_link_test(bp) && cnt--)
11237                         msleep(10);
11238 }
11239
11240 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
11241 {
11242         unsigned int pkt_size, num_pkts, i;
11243         struct sk_buff *skb;
11244         unsigned char *packet;
11245         struct bnx2x_fastpath *fp_rx = &bp->fp[0];
11246         struct bnx2x_fastpath *fp_tx = &bp->fp[0];
11247         u16 tx_start_idx, tx_idx;
11248         u16 rx_start_idx, rx_idx;
11249         u16 pkt_prod, bd_prod;
11250         struct sw_tx_bd *tx_buf;
11251         struct eth_tx_start_bd *tx_start_bd;
11252         struct eth_tx_parse_bd *pbd = NULL;
11253         dma_addr_t mapping;
11254         union eth_rx_cqe *cqe;
11255         u8 cqe_fp_flags;
11256         struct sw_rx_bd *rx_buf;
11257         u16 len;
11258         int rc = -ENODEV;
11259
11260         /* check the loopback mode */
11261         switch (loopback_mode) {
11262         case BNX2X_PHY_LOOPBACK:
11263                 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
11264                         return -EINVAL;
11265                 break;
11266         case BNX2X_MAC_LOOPBACK:
11267                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
11268                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
11269                 break;
11270         default:
11271                 return -EINVAL;
11272         }
11273
11274         /* prepare the loopback packet */
11275         pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
11276                      bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
11277         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
11278         if (!skb) {
11279                 rc = -ENOMEM;
11280                 goto test_loopback_exit;
11281         }
11282         packet = skb_put(skb, pkt_size);
11283         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
11284         memset(packet + ETH_ALEN, 0, ETH_ALEN);
11285         memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
11286         for (i = ETH_HLEN; i < pkt_size; i++)
11287                 packet[i] = (unsigned char) (i & 0xff);
11288
11289         /* send the loopback packet */
11290         num_pkts = 0;
11291         tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
11292         rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
11293
11294         pkt_prod = fp_tx->tx_pkt_prod++;
11295         tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
11296         tx_buf->first_bd = fp_tx->tx_bd_prod;
11297         tx_buf->skb = skb;
11298         tx_buf->flags = 0;
11299
11300         bd_prod = TX_BD(fp_tx->tx_bd_prod);
11301         tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
11302         mapping = dma_map_single(&bp->pdev->dev, skb->data,
11303                                  skb_headlen(skb), DMA_TO_DEVICE);
11304         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11305         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11306         tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
11307         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11308         tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11309         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11310         tx_start_bd->general_data = ((UNICAST_ADDRESS <<
11311                                 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
11312
11313         /* turn on parsing and get a BD */
11314         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11315         pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
11316
11317         memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
11318
11319         wmb();
11320
11321         fp_tx->tx_db.data.prod += 2;
11322         barrier();
11323         DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
11324
11325         mmiowb();
11326
11327         num_pkts++;
11328         fp_tx->tx_bd_prod += 2; /* start + pbd */
11329
11330         udelay(100);
11331
11332         tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
11333         if (tx_idx != tx_start_idx + num_pkts)
11334                 goto test_loopback_exit;
11335
11336         rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
11337         if (rx_idx != rx_start_idx + num_pkts)
11338                 goto test_loopback_exit;
11339
11340         cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
11341         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
11342         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
11343                 goto test_loopback_rx_exit;
11344
11345         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
11346         if (len != pkt_size)
11347                 goto test_loopback_rx_exit;
11348
11349         rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
11350         skb = rx_buf->skb;
11351         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
11352         for (i = ETH_HLEN; i < pkt_size; i++)
11353                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
11354                         goto test_loopback_rx_exit;
11355
11356         rc = 0;
11357
11358 test_loopback_rx_exit:
11359
11360         fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
11361         fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
11362         fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
11363         fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
11364
11365         /* Update producers */
11366         bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
11367                              fp_rx->rx_sge_prod);
11368
11369 test_loopback_exit:
11370         bp->link_params.loopback_mode = LOOPBACK_NONE;
11371
11372         return rc;
11373 }
11374
11375 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
11376 {
11377         int rc = 0, res;
11378
11379         if (BP_NOMCP(bp))
11380                 return rc;
11381
11382         if (!netif_running(bp->dev))
11383                 return BNX2X_LOOPBACK_FAILED;
11384
11385         bnx2x_netif_stop(bp, 1);
11386         bnx2x_acquire_phy_lock(bp);
11387
11388         res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
11389         if (res) {
11390                 DP(NETIF_MSG_PROBE, "  PHY loopback failed  (res %d)\n", res);
11391                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
11392         }
11393
11394         res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
11395         if (res) {
11396                 DP(NETIF_MSG_PROBE, "  MAC loopback failed  (res %d)\n", res);
11397                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
11398         }
11399
11400         bnx2x_release_phy_lock(bp);
11401         bnx2x_netif_start(bp);
11402
11403         return rc;
11404 }
11405
11406 #define CRC32_RESIDUAL                  0xdebb20e3
11407
11408 static int bnx2x_test_nvram(struct bnx2x *bp)
11409 {
11410         static const struct {
11411                 int offset;
11412                 int size;
11413         } nvram_tbl[] = {
11414                 {     0,  0x14 }, /* bootstrap */
11415                 {  0x14,  0xec }, /* dir */
11416                 { 0x100, 0x350 }, /* manuf_info */
11417                 { 0x450,  0xf0 }, /* feature_info */
11418                 { 0x640,  0x64 }, /* upgrade_key_info */
11419                 { 0x6a4,  0x64 },
11420                 { 0x708,  0x70 }, /* manuf_key_info */
11421                 { 0x778,  0x70 },
11422                 {     0,     0 }
11423         };
11424         __be32 buf[0x350 / 4];
11425         u8 *data = (u8 *)buf;
11426         int i, rc;
11427         u32 magic, crc;
11428
11429         if (BP_NOMCP(bp))
11430                 return 0;
11431
11432         rc = bnx2x_nvram_read(bp, 0, data, 4);
11433         if (rc) {
11434                 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
11435                 goto test_nvram_exit;
11436         }
11437
11438         magic = be32_to_cpu(buf[0]);
11439         if (magic != 0x669955aa) {
11440                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
11441                 rc = -ENODEV;
11442                 goto test_nvram_exit;
11443         }
11444
11445         for (i = 0; nvram_tbl[i].size; i++) {
11446
11447                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
11448                                       nvram_tbl[i].size);
11449                 if (rc) {
11450                         DP(NETIF_MSG_PROBE,
11451                            "nvram_tbl[%d] read data (rc %d)\n", i, rc);
11452                         goto test_nvram_exit;
11453                 }
11454
11455                 crc = ether_crc_le(nvram_tbl[i].size, data);
11456                 if (crc != CRC32_RESIDUAL) {
11457                         DP(NETIF_MSG_PROBE,
11458                            "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
11459                         rc = -ENODEV;
11460                         goto test_nvram_exit;
11461                 }
11462         }
11463
11464 test_nvram_exit:
11465         return rc;
11466 }
11467
11468 static int bnx2x_test_intr(struct bnx2x *bp)
11469 {
11470         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
11471         int i, rc;
11472
11473         if (!netif_running(bp->dev))
11474                 return -ENODEV;
11475
11476         config->hdr.length = 0;
11477         if (CHIP_IS_E1(bp))
11478                 /* use last unicast entries */
11479                 config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
11480         else
11481                 config->hdr.offset = BP_FUNC(bp);
11482         config->hdr.client_id = bp->fp->cl_id;
11483         config->hdr.reserved1 = 0;
11484
11485         bp->set_mac_pending++;
11486         smp_wmb();
11487         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11488                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
11489                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
11490         if (rc == 0) {
11491                 for (i = 0; i < 10; i++) {
11492                         if (!bp->set_mac_pending)
11493                                 break;
11494                         smp_rmb();
11495                         msleep_interruptible(10);
11496                 }
11497                 if (i == 10)
11498                         rc = -ENODEV;
11499         }
11500
11501         return rc;
11502 }
11503
11504 static void bnx2x_self_test(struct net_device *dev,
11505                             struct ethtool_test *etest, u64 *buf)
11506 {
11507         struct bnx2x *bp = netdev_priv(dev);
11508
11509         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11510                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11511                 etest->flags |= ETH_TEST_FL_FAILED;
11512                 return;
11513         }
11514
11515         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
11516
11517         if (!netif_running(dev))
11518                 return;
11519
11520         /* offline tests are not supported in MF mode */
11521         if (IS_E1HMF(bp))
11522                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
11523
11524         if (etest->flags & ETH_TEST_FL_OFFLINE) {
11525                 int port = BP_PORT(bp);
11526                 u32 val;
11527                 u8 link_up;
11528
11529                 /* save current value of input enable for TX port IF */
11530                 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
11531                 /* disable input for TX port IF */
11532                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
11533
11534                 link_up = (bnx2x_link_test(bp) == 0);
11535                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11536                 bnx2x_nic_load(bp, LOAD_DIAG);
11537                 /* wait until link state is restored */
11538                 bnx2x_wait_for_link(bp, link_up);
11539
11540                 if (bnx2x_test_registers(bp) != 0) {
11541                         buf[0] = 1;
11542                         etest->flags |= ETH_TEST_FL_FAILED;
11543                 }
11544                 if (bnx2x_test_memory(bp) != 0) {
11545                         buf[1] = 1;
11546                         etest->flags |= ETH_TEST_FL_FAILED;
11547                 }
11548                 buf[2] = bnx2x_test_loopback(bp, link_up);
11549                 if (buf[2] != 0)
11550                         etest->flags |= ETH_TEST_FL_FAILED;
11551
11552                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11553
11554                 /* restore input for TX port IF */
11555                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
11556
11557                 bnx2x_nic_load(bp, LOAD_NORMAL);
11558                 /* wait until link state is restored */
11559                 bnx2x_wait_for_link(bp, link_up);
11560         }
11561         if (bnx2x_test_nvram(bp) != 0) {
11562                 buf[3] = 1;
11563                 etest->flags |= ETH_TEST_FL_FAILED;
11564         }
11565         if (bnx2x_test_intr(bp) != 0) {
11566                 buf[4] = 1;
11567                 etest->flags |= ETH_TEST_FL_FAILED;
11568         }
11569         if (bp->port.pmf)
11570                 if (bnx2x_link_test(bp) != 0) {
11571                         buf[5] = 1;
11572                         etest->flags |= ETH_TEST_FL_FAILED;
11573                 }
11574
11575 #ifdef BNX2X_EXTRA_DEBUG
11576         bnx2x_panic_dump(bp);
11577 #endif
11578 }
11579
11580 static const struct {
11581         long offset;
11582         int size;
11583         u8 string[ETH_GSTRING_LEN];
11584 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
11585 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
11586         { Q_STATS_OFFSET32(error_bytes_received_hi),
11587                                                 8, "[%d]: rx_error_bytes" },
11588         { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
11589                                                 8, "[%d]: rx_ucast_packets" },
11590         { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
11591                                                 8, "[%d]: rx_mcast_packets" },
11592         { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
11593                                                 8, "[%d]: rx_bcast_packets" },
11594         { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
11595         { Q_STATS_OFFSET32(rx_err_discard_pkt),
11596                                          4, "[%d]: rx_phy_ip_err_discards"},
11597         { Q_STATS_OFFSET32(rx_skb_alloc_failed),
11598                                          4, "[%d]: rx_skb_alloc_discard" },
11599         { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
11600
11601 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
11602         { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
11603                                                 8, "[%d]: tx_ucast_packets" },
11604         { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11605                                                 8, "[%d]: tx_mcast_packets" },
11606         { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11607                                                 8, "[%d]: tx_bcast_packets" }
11608 };
11609
11610 static const struct {
11611         long offset;
11612         int size;
11613         u32 flags;
11614 #define STATS_FLAGS_PORT                1
11615 #define STATS_FLAGS_FUNC                2
11616 #define STATS_FLAGS_BOTH                (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
11617         u8 string[ETH_GSTRING_LEN];
11618 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
11619 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
11620                                 8, STATS_FLAGS_BOTH, "rx_bytes" },
11621         { STATS_OFFSET32(error_bytes_received_hi),
11622                                 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
11623         { STATS_OFFSET32(total_unicast_packets_received_hi),
11624                                 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
11625         { STATS_OFFSET32(total_multicast_packets_received_hi),
11626                                 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
11627         { STATS_OFFSET32(total_broadcast_packets_received_hi),
11628                                 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
11629         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
11630                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
11631         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
11632                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
11633         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
11634                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
11635         { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
11636                                 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
11637 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
11638                                 8, STATS_FLAGS_PORT, "rx_fragments" },
11639         { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
11640                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
11641         { STATS_OFFSET32(no_buff_discard_hi),
11642                                 8, STATS_FLAGS_BOTH, "rx_discards" },
11643         { STATS_OFFSET32(mac_filter_discard),
11644                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
11645         { STATS_OFFSET32(xxoverflow_discard),
11646                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
11647         { STATS_OFFSET32(brb_drop_hi),
11648                                 8, STATS_FLAGS_PORT, "rx_brb_discard" },
11649         { STATS_OFFSET32(brb_truncate_hi),
11650                                 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
11651         { STATS_OFFSET32(pause_frames_received_hi),
11652                                 8, STATS_FLAGS_PORT, "rx_pause_frames" },
11653         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
11654                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
11655         { STATS_OFFSET32(nig_timer_max),
11656                         4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
11657 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
11658                                 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
11659         { STATS_OFFSET32(rx_skb_alloc_failed),
11660                                 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
11661         { STATS_OFFSET32(hw_csum_err),
11662                                 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
11663
11664         { STATS_OFFSET32(total_bytes_transmitted_hi),
11665                                 8, STATS_FLAGS_BOTH, "tx_bytes" },
11666         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
11667                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
11668         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
11669                                 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
11670         { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11671                                 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
11672         { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11673                                 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
11674         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
11675                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
11676         { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
11677                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
11678 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
11679                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
11680         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
11681                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
11682         { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
11683                                 8, STATS_FLAGS_PORT, "tx_deferred" },
11684         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
11685                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
11686         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
11687                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
11688         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
11689                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
11690         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
11691                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
11692         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
11693                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
11694         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
11695                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
11696         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
11697                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
11698 /* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
11699                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
11700         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
11701                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
11702         { STATS_OFFSET32(etherstatspktsover1522octets_hi),
11703                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
11704         { STATS_OFFSET32(pause_frames_sent_hi),
11705                                 8, STATS_FLAGS_PORT, "tx_pause_frames" }
11706 };
11707
11708 #define IS_PORT_STAT(i) \
11709         ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
11710 #define IS_FUNC_STAT(i)         (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
11711 #define IS_E1HMF_MODE_STAT(bp) \
11712                         (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
11713
11714 static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
11715 {
11716         struct bnx2x *bp = netdev_priv(dev);
11717         int i, num_stats;
11718
11719         switch (stringset) {
11720         case ETH_SS_STATS:
11721                 if (is_multi(bp)) {
11722                         num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
11723                         if (!IS_E1HMF_MODE_STAT(bp))
11724                                 num_stats += BNX2X_NUM_STATS;
11725                 } else {
11726                         if (IS_E1HMF_MODE_STAT(bp)) {
11727                                 num_stats = 0;
11728                                 for (i = 0; i < BNX2X_NUM_STATS; i++)
11729                                         if (IS_FUNC_STAT(i))
11730                                                 num_stats++;
11731                         } else
11732                                 num_stats = BNX2X_NUM_STATS;
11733                 }
11734                 return num_stats;
11735
11736         case ETH_SS_TEST:
11737                 return BNX2X_NUM_TESTS;
11738
11739         default:
11740                 return -EINVAL;
11741         }
11742 }
11743
11744 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11745 {
11746         struct bnx2x *bp = netdev_priv(dev);
11747         int i, j, k;
11748
11749         switch (stringset) {
11750         case ETH_SS_STATS:
11751                 if (is_multi(bp)) {
11752                         k = 0;
11753                         for_each_queue(bp, i) {
11754                                 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
11755                                         sprintf(buf + (k + j)*ETH_GSTRING_LEN,
11756                                                 bnx2x_q_stats_arr[j].string, i);
11757                                 k += BNX2X_NUM_Q_STATS;
11758                         }
11759                         if (IS_E1HMF_MODE_STAT(bp))
11760                                 break;
11761                         for (j = 0; j < BNX2X_NUM_STATS; j++)
11762                                 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
11763                                        bnx2x_stats_arr[j].string);
11764                 } else {
11765                         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11766                                 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11767                                         continue;
11768                                 strcpy(buf + j*ETH_GSTRING_LEN,
11769                                        bnx2x_stats_arr[i].string);
11770                                 j++;
11771                         }
11772                 }
11773                 break;
11774
11775         case ETH_SS_TEST:
11776                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
11777                 break;
11778         }
11779 }
11780
11781 static void bnx2x_get_ethtool_stats(struct net_device *dev,
11782                                     struct ethtool_stats *stats, u64 *buf)
11783 {
11784         struct bnx2x *bp = netdev_priv(dev);
11785         u32 *hw_stats, *offset;
11786         int i, j, k;
11787
11788         if (is_multi(bp)) {
11789                 k = 0;
11790                 for_each_queue(bp, i) {
11791                         hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
11792                         for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
11793                                 if (bnx2x_q_stats_arr[j].size == 0) {
11794                                         /* skip this counter */
11795                                         buf[k + j] = 0;
11796                                         continue;
11797                                 }
11798                                 offset = (hw_stats +
11799                                           bnx2x_q_stats_arr[j].offset);
11800                                 if (bnx2x_q_stats_arr[j].size == 4) {
11801                                         /* 4-byte counter */
11802                                         buf[k + j] = (u64) *offset;
11803                                         continue;
11804                                 }
11805                                 /* 8-byte counter */
11806                                 buf[k + j] = HILO_U64(*offset, *(offset + 1));
11807                         }
11808                         k += BNX2X_NUM_Q_STATS;
11809                 }
11810                 if (IS_E1HMF_MODE_STAT(bp))
11811                         return;
11812                 hw_stats = (u32 *)&bp->eth_stats;
11813                 for (j = 0; j < BNX2X_NUM_STATS; j++) {
11814                         if (bnx2x_stats_arr[j].size == 0) {
11815                                 /* skip this counter */
11816                                 buf[k + j] = 0;
11817                                 continue;
11818                         }
11819                         offset = (hw_stats + bnx2x_stats_arr[j].offset);
11820                         if (bnx2x_stats_arr[j].size == 4) {
11821                                 /* 4-byte counter */
11822                                 buf[k + j] = (u64) *offset;
11823                                 continue;
11824                         }
11825                         /* 8-byte counter */
11826                         buf[k + j] = HILO_U64(*offset, *(offset + 1));
11827                 }
11828         } else {
11829                 hw_stats = (u32 *)&bp->eth_stats;
11830                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11831                         if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11832                                 continue;
11833                         if (bnx2x_stats_arr[i].size == 0) {
11834                                 /* skip this counter */
11835                                 buf[j] = 0;
11836                                 j++;
11837                                 continue;
11838                         }
11839                         offset = (hw_stats + bnx2x_stats_arr[i].offset);
11840                         if (bnx2x_stats_arr[i].size == 4) {
11841                                 /* 4-byte counter */
11842                                 buf[j] = (u64) *offset;
11843                                 j++;
11844                                 continue;
11845                         }
11846                         /* 8-byte counter */
11847                         buf[j] = HILO_U64(*offset, *(offset + 1));
11848                         j++;
11849                 }
11850         }
11851 }
11852
11853 static int bnx2x_phys_id(struct net_device *dev, u32 data)
11854 {
11855         struct bnx2x *bp = netdev_priv(dev);
11856         int i;
11857
11858         if (!netif_running(dev))
11859                 return 0;
11860
11861         if (!bp->port.pmf)
11862                 return 0;
11863
11864         if (data == 0)
11865                 data = 2;
11866
11867         for (i = 0; i < (data * 2); i++) {
11868                 if ((i % 2) == 0)
11869                         bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11870                                       SPEED_1000);
11871                 else
11872                         bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
11873
11874                 msleep_interruptible(500);
11875                 if (signal_pending(current))
11876                         break;
11877         }
11878
11879         if (bp->link_vars.link_up)
11880                 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11881                               bp->link_vars.line_speed);
11882
11883         return 0;
11884 }
11885
11886 static const struct ethtool_ops bnx2x_ethtool_ops = {
11887         .get_settings           = bnx2x_get_settings,
11888         .set_settings           = bnx2x_set_settings,
11889         .get_drvinfo            = bnx2x_get_drvinfo,
11890         .get_regs_len           = bnx2x_get_regs_len,
11891         .get_regs               = bnx2x_get_regs,
11892         .get_wol                = bnx2x_get_wol,
11893         .set_wol                = bnx2x_set_wol,
11894         .get_msglevel           = bnx2x_get_msglevel,
11895         .set_msglevel           = bnx2x_set_msglevel,
11896         .nway_reset             = bnx2x_nway_reset,
11897         .get_link               = bnx2x_get_link,
11898         .get_eeprom_len         = bnx2x_get_eeprom_len,
11899         .get_eeprom             = bnx2x_get_eeprom,
11900         .set_eeprom             = bnx2x_set_eeprom,
11901         .get_coalesce           = bnx2x_get_coalesce,
11902         .set_coalesce           = bnx2x_set_coalesce,
11903         .get_ringparam          = bnx2x_get_ringparam,
11904         .set_ringparam          = bnx2x_set_ringparam,
11905         .get_pauseparam         = bnx2x_get_pauseparam,
11906         .set_pauseparam         = bnx2x_set_pauseparam,
11907         .get_rx_csum            = bnx2x_get_rx_csum,
11908         .set_rx_csum            = bnx2x_set_rx_csum,
11909         .get_tx_csum            = ethtool_op_get_tx_csum,
11910         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
11911         .set_flags              = bnx2x_set_flags,
11912         .get_flags              = ethtool_op_get_flags,
11913         .get_sg                 = ethtool_op_get_sg,
11914         .set_sg                 = ethtool_op_set_sg,
11915         .get_tso                = ethtool_op_get_tso,
11916         .set_tso                = bnx2x_set_tso,
11917         .self_test              = bnx2x_self_test,
11918         .get_sset_count         = bnx2x_get_sset_count,
11919         .get_strings            = bnx2x_get_strings,
11920         .phys_id                = bnx2x_phys_id,
11921         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
11922 };
11923
11924 /* end of ethtool_ops */
11925
11926 /****************************************************************************
11927 * General service functions
11928 ****************************************************************************/
11929
11930 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
11931 {
11932         u16 pmcsr;
11933
11934         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
11935
11936         switch (state) {
11937         case PCI_D0:
11938                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
11939                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
11940                                        PCI_PM_CTRL_PME_STATUS));
11941
11942                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
11943                         /* delay required during transition out of D3hot */
11944                         msleep(20);
11945                 break;
11946
11947         case PCI_D3hot:
11948                 /* If there are other clients above don't
11949                    shut down the power */
11950                 if (atomic_read(&bp->pdev->enable_cnt) != 1)
11951                         return 0;
11952                 /* Don't shut down the power for emulation and FPGA */
11953                 if (CHIP_REV_IS_SLOW(bp))
11954                         return 0;
11955
11956                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11957                 pmcsr |= 3;
11958
11959                 if (bp->wol)
11960                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
11961
11962                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
11963                                       pmcsr);
11964
11965                 /* No more memory access after this point until
11966                 * device is brought back to D0.
11967                 */
11968                 break;
11969
11970         default:
11971                 return -EINVAL;
11972         }
11973         return 0;
11974 }
11975
11976 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
11977 {
11978         u16 rx_cons_sb;
11979
11980         /* Tell compiler that status block fields can change */
11981         barrier();
11982         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
11983         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
11984                 rx_cons_sb++;
11985         return (fp->rx_comp_cons != rx_cons_sb);
11986 }
11987
11988 /*
11989  * net_device service functions
11990  */
11991
11992 static int bnx2x_poll(struct napi_struct *napi, int budget)
11993 {
11994         int work_done = 0;
11995         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
11996                                                  napi);
11997         struct bnx2x *bp = fp->bp;
11998
11999         while (1) {
12000 #ifdef BNX2X_STOP_ON_ERROR
12001                 if (unlikely(bp->panic)) {
12002                         napi_complete(napi);
12003                         return 0;
12004                 }
12005 #endif
12006
12007                 if (bnx2x_has_tx_work(fp))
12008                         bnx2x_tx_int(fp);
12009
12010                 if (bnx2x_has_rx_work(fp)) {
12011                         work_done += bnx2x_rx_int(fp, budget - work_done);
12012
12013                         /* must not complete if we consumed full budget */
12014                         if (work_done >= budget)
12015                                 break;
12016                 }
12017
12018                 /* Fall out from the NAPI loop if needed */
12019                 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
12020                         bnx2x_update_fpsb_idx(fp);
12021                 /* bnx2x_has_rx_work() reads the status block, thus we need
12022                  * to ensure that status block indices have been actually read
12023                  * (bnx2x_update_fpsb_idx) prior to this check
12024                  * (bnx2x_has_rx_work) so that we won't write the "newer"
12025                  * value of the status block to IGU (if there was a DMA right
12026                  * after bnx2x_has_rx_work and if there is no rmb, the memory
12027                  * reading (bnx2x_update_fpsb_idx) may be postponed to right
12028                  * before bnx2x_ack_sb). In this case there will never be
12029                  * another interrupt until there is another update of the
12030                  * status block, while there is still unhandled work.
12031                  */
12032                         rmb();
12033
12034                         if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
12035                                 napi_complete(napi);
12036                                 /* Re-enable interrupts */
12037                                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
12038                                              le16_to_cpu(fp->fp_c_idx),
12039                                              IGU_INT_NOP, 1);
12040                                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
12041                                              le16_to_cpu(fp->fp_u_idx),
12042                                              IGU_INT_ENABLE, 1);
12043                                 break;
12044                         }
12045                 }
12046         }
12047
12048         return work_done;
12049 }
12050
12051
12052 /* we split the first BD into headers and data BDs
12053  * to ease the pain of our fellow microcode engineers
12054  * we use one mapping for both BDs
12055  * So far this has only been observed to happen
12056  * in Other Operating Systems(TM)
12057  */
12058 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
12059                                    struct bnx2x_fastpath *fp,
12060                                    struct sw_tx_bd *tx_buf,
12061                                    struct eth_tx_start_bd **tx_bd, u16 hlen,
12062                                    u16 bd_prod, int nbd)
12063 {
12064         struct eth_tx_start_bd *h_tx_bd = *tx_bd;
12065         struct eth_tx_bd *d_tx_bd;
12066         dma_addr_t mapping;
12067         int old_len = le16_to_cpu(h_tx_bd->nbytes);
12068
12069         /* first fix first BD */
12070         h_tx_bd->nbd = cpu_to_le16(nbd);
12071         h_tx_bd->nbytes = cpu_to_le16(hlen);
12072
12073         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
12074            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
12075            h_tx_bd->addr_lo, h_tx_bd->nbd);
12076
12077         /* now get a new data BD
12078          * (after the pbd) and fill it */
12079         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12080         d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12081
12082         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
12083                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
12084
12085         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12086         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12087         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
12088
12089         /* this marks the BD as one that has no individual mapping */
12090         tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
12091
12092         DP(NETIF_MSG_TX_QUEUED,
12093            "TSO split data size is %d (%x:%x)\n",
12094            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
12095
12096         /* update tx_bd */
12097         *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
12098
12099         return bd_prod;
12100 }
12101
12102 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
12103 {
12104         if (fix > 0)
12105                 csum = (u16) ~csum_fold(csum_sub(csum,
12106                                 csum_partial(t_header - fix, fix, 0)));
12107
12108         else if (fix < 0)
12109                 csum = (u16) ~csum_fold(csum_add(csum,
12110                                 csum_partial(t_header, -fix, 0)));
12111
12112         return swab16(csum);
12113 }
12114
12115 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
12116 {
12117         u32 rc;
12118
12119         if (skb->ip_summed != CHECKSUM_PARTIAL)
12120                 rc = XMIT_PLAIN;
12121
12122         else {
12123                 if (skb->protocol == htons(ETH_P_IPV6)) {
12124                         rc = XMIT_CSUM_V6;
12125                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
12126                                 rc |= XMIT_CSUM_TCP;
12127
12128                 } else {
12129                         rc = XMIT_CSUM_V4;
12130                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
12131                                 rc |= XMIT_CSUM_TCP;
12132                 }
12133         }
12134
12135         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
12136                 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
12137
12138         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
12139                 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
12140
12141         return rc;
12142 }
12143
12144 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
12145 /* check if packet requires linearization (packet is too fragmented)
12146    no need to check fragmentation if page size > 8K (there will be no
12147    violation to FW restrictions) */
12148 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
12149                              u32 xmit_type)
12150 {
12151         int to_copy = 0;
12152         int hlen = 0;
12153         int first_bd_sz = 0;
12154
12155         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
12156         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
12157
12158                 if (xmit_type & XMIT_GSO) {
12159                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
12160                         /* Check if LSO packet needs to be copied:
12161                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
12162                         int wnd_size = MAX_FETCH_BD - 3;
12163                         /* Number of windows to check */
12164                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
12165                         int wnd_idx = 0;
12166                         int frag_idx = 0;
12167                         u32 wnd_sum = 0;
12168
12169                         /* Headers length */
12170                         hlen = (int)(skb_transport_header(skb) - skb->data) +
12171                                 tcp_hdrlen(skb);
12172
12173                         /* Amount of data (w/o headers) on linear part of SKB*/
12174                         first_bd_sz = skb_headlen(skb) - hlen;
12175
12176                         wnd_sum  = first_bd_sz;
12177
12178                         /* Calculate the first sum - it's special */
12179                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
12180                                 wnd_sum +=
12181                                         skb_shinfo(skb)->frags[frag_idx].size;
12182
12183                         /* If there was data on linear skb data - check it */
12184                         if (first_bd_sz > 0) {
12185                                 if (unlikely(wnd_sum < lso_mss)) {
12186                                         to_copy = 1;
12187                                         goto exit_lbl;
12188                                 }
12189
12190                                 wnd_sum -= first_bd_sz;
12191                         }
12192
12193                         /* Others are easier: run through the frag list and
12194                            check all windows */
12195                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
12196                                 wnd_sum +=
12197                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
12198
12199                                 if (unlikely(wnd_sum < lso_mss)) {
12200                                         to_copy = 1;
12201                                         break;
12202                                 }
12203                                 wnd_sum -=
12204                                         skb_shinfo(skb)->frags[wnd_idx].size;
12205                         }
12206                 } else {
12207                         /* in non-LSO too fragmented packet should always
12208                            be linearized */
12209                         to_copy = 1;
12210                 }
12211         }
12212
12213 exit_lbl:
12214         if (unlikely(to_copy))
12215                 DP(NETIF_MSG_TX_QUEUED,
12216                    "Linearization IS REQUIRED for %s packet. "
12217                    "num_frags %d  hlen %d  first_bd_sz %d\n",
12218                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
12219                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
12220
12221         return to_copy;
12222 }
12223 #endif
12224
12225 /* called with netif_tx_lock
12226  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
12227  * netif_wake_queue()
12228  */
12229 static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
12230 {
12231         struct bnx2x *bp = netdev_priv(dev);
12232         struct bnx2x_fastpath *fp;
12233         struct netdev_queue *txq;
12234         struct sw_tx_bd *tx_buf;
12235         struct eth_tx_start_bd *tx_start_bd;
12236         struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
12237         struct eth_tx_parse_bd *pbd = NULL;
12238         u16 pkt_prod, bd_prod;
12239         int nbd, fp_index;
12240         dma_addr_t mapping;
12241         u32 xmit_type = bnx2x_xmit_type(bp, skb);
12242         int i;
12243         u8 hlen = 0;
12244         __le16 pkt_size = 0;
12245         struct ethhdr *eth;
12246         u8 mac_type = UNICAST_ADDRESS;
12247
12248 #ifdef BNX2X_STOP_ON_ERROR
12249         if (unlikely(bp->panic))
12250                 return NETDEV_TX_BUSY;
12251 #endif
12252
12253         fp_index = skb_get_queue_mapping(skb);
12254         txq = netdev_get_tx_queue(dev, fp_index);
12255
12256         fp = &bp->fp[fp_index];
12257
12258         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
12259                 fp->eth_q_stats.driver_xoff++;
12260                 netif_tx_stop_queue(txq);
12261                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
12262                 return NETDEV_TX_BUSY;
12263         }
12264
12265         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
12266            "  gso type %x  xmit_type %x\n",
12267            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
12268            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
12269
12270         eth = (struct ethhdr *)skb->data;
12271
12272         /* set flag according to packet type (UNICAST_ADDRESS is default)*/
12273         if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
12274                 if (is_broadcast_ether_addr(eth->h_dest))
12275                         mac_type = BROADCAST_ADDRESS;
12276                 else
12277                         mac_type = MULTICAST_ADDRESS;
12278         }
12279
12280 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
12281         /* First, check if we need to linearize the skb (due to FW
12282            restrictions). No need to check fragmentation if page size > 8K
12283            (there will be no violation to FW restrictions) */
12284         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
12285                 /* Statistics of linearization */
12286                 bp->lin_cnt++;
12287                 if (skb_linearize(skb) != 0) {
12288                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
12289                            "silently dropping this SKB\n");
12290                         dev_kfree_skb_any(skb);
12291                         return NETDEV_TX_OK;
12292                 }
12293         }
12294 #endif
12295
12296         /*
12297         Please read carefully. First we use one BD which we mark as start,
12298         then we have a parsing info BD (used for TSO or xsum),
12299         and only then we have the rest of the TSO BDs.
12300         (don't forget to mark the last one as last,
12301         and to unmap only AFTER you write to the BD ...)
12302         And above all, all pdb sizes are in words - NOT DWORDS!
12303         */
12304
12305         pkt_prod = fp->tx_pkt_prod++;
12306         bd_prod = TX_BD(fp->tx_bd_prod);
12307
12308         /* get a tx_buf and first BD */
12309         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
12310         tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
12311
12312         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
12313         tx_start_bd->general_data =  (mac_type <<
12314                                         ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
12315         /* header nbd */
12316         tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
12317
12318         /* remember the first BD of the packet */
12319         tx_buf->first_bd = fp->tx_bd_prod;
12320         tx_buf->skb = skb;
12321         tx_buf->flags = 0;
12322
12323         DP(NETIF_MSG_TX_QUEUED,
12324            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
12325            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
12326
12327 #ifdef BCM_VLAN
12328         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
12329             (bp->flags & HW_VLAN_TX_FLAG)) {
12330                 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
12331                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
12332         } else
12333 #endif
12334                 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
12335
12336         /* turn on parsing and get a BD */
12337         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12338         pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
12339
12340         memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
12341
12342         if (xmit_type & XMIT_CSUM) {
12343                 hlen = (skb_network_header(skb) - skb->data) / 2;
12344
12345                 /* for now NS flag is not used in Linux */
12346                 pbd->global_data =
12347                         (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
12348                                  ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
12349
12350                 pbd->ip_hlen = (skb_transport_header(skb) -
12351                                 skb_network_header(skb)) / 2;
12352
12353                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
12354
12355                 pbd->total_hlen = cpu_to_le16(hlen);
12356                 hlen = hlen*2;
12357
12358                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
12359
12360                 if (xmit_type & XMIT_CSUM_V4)
12361                         tx_start_bd->bd_flags.as_bitfield |=
12362                                                 ETH_TX_BD_FLAGS_IP_CSUM;
12363                 else
12364                         tx_start_bd->bd_flags.as_bitfield |=
12365                                                 ETH_TX_BD_FLAGS_IPV6;
12366
12367                 if (xmit_type & XMIT_CSUM_TCP) {
12368                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
12369
12370                 } else {
12371                         s8 fix = SKB_CS_OFF(skb); /* signed! */
12372
12373                         pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
12374
12375                         DP(NETIF_MSG_TX_QUEUED,
12376                            "hlen %d  fix %d  csum before fix %x\n",
12377                            le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
12378
12379                         /* HW bug: fixup the CSUM */
12380                         pbd->tcp_pseudo_csum =
12381                                 bnx2x_csum_fix(skb_transport_header(skb),
12382                                                SKB_CS(skb), fix);
12383
12384                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
12385                            pbd->tcp_pseudo_csum);
12386                 }
12387         }
12388
12389         mapping = dma_map_single(&bp->pdev->dev, skb->data,
12390                                  skb_headlen(skb), DMA_TO_DEVICE);
12391
12392         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12393         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12394         nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
12395         tx_start_bd->nbd = cpu_to_le16(nbd);
12396         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
12397         pkt_size = tx_start_bd->nbytes;
12398
12399         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
12400            "  nbytes %d  flags %x  vlan %x\n",
12401            tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
12402            le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
12403            tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
12404
12405         if (xmit_type & XMIT_GSO) {
12406
12407                 DP(NETIF_MSG_TX_QUEUED,
12408                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
12409                    skb->len, hlen, skb_headlen(skb),
12410                    skb_shinfo(skb)->gso_size);
12411
12412                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
12413
12414                 if (unlikely(skb_headlen(skb) > hlen))
12415                         bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
12416                                                  hlen, bd_prod, ++nbd);
12417
12418                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
12419                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
12420                 pbd->tcp_flags = pbd_tcp_flags(skb);
12421
12422                 if (xmit_type & XMIT_GSO_V4) {
12423                         pbd->ip_id = swab16(ip_hdr(skb)->id);
12424                         pbd->tcp_pseudo_csum =
12425                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
12426                                                           ip_hdr(skb)->daddr,
12427                                                           0, IPPROTO_TCP, 0));
12428
12429                 } else
12430                         pbd->tcp_pseudo_csum =
12431                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
12432                                                         &ipv6_hdr(skb)->daddr,
12433                                                         0, IPPROTO_TCP, 0));
12434
12435                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
12436         }
12437         tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
12438
12439         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
12440                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
12441
12442                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12443                 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12444                 if (total_pkt_bd == NULL)
12445                         total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12446
12447                 mapping = dma_map_page(&bp->pdev->dev, frag->page,
12448                                        frag->page_offset,
12449                                        frag->size, DMA_TO_DEVICE);
12450
12451                 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12452                 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12453                 tx_data_bd->nbytes = cpu_to_le16(frag->size);
12454                 le16_add_cpu(&pkt_size, frag->size);
12455
12456                 DP(NETIF_MSG_TX_QUEUED,
12457                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
12458                    i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
12459                    le16_to_cpu(tx_data_bd->nbytes));
12460         }
12461
12462         DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
12463
12464         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12465
12466         /* now send a tx doorbell, counting the next BD
12467          * if the packet contains or ends with it
12468          */
12469         if (TX_BD_POFF(bd_prod) < nbd)
12470                 nbd++;
12471
12472         if (total_pkt_bd != NULL)
12473                 total_pkt_bd->total_pkt_bytes = pkt_size;
12474
12475         if (pbd)
12476                 DP(NETIF_MSG_TX_QUEUED,
12477                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
12478                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
12479                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
12480                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
12481                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
12482
12483         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
12484
12485         /*
12486          * Make sure that the BD data is updated before updating the producer
12487          * since FW might read the BD right after the producer is updated.
12488          * This is only applicable for weak-ordered memory model archs such
12489          * as IA-64. The following barrier is also mandatory since FW will
12490          * assumes packets must have BDs.
12491          */
12492         wmb();
12493
12494         fp->tx_db.data.prod += nbd;
12495         barrier();
12496         DOORBELL(bp, fp->index, fp->tx_db.raw);
12497
12498         mmiowb();
12499
12500         fp->tx_bd_prod += nbd;
12501
12502         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
12503                 netif_tx_stop_queue(txq);
12504
12505                 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
12506                  * ordering of set_bit() in netif_tx_stop_queue() and read of
12507                  * fp->bd_tx_cons */
12508                 smp_mb();
12509
12510                 fp->eth_q_stats.driver_xoff++;
12511                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
12512                         netif_tx_wake_queue(txq);
12513         }
12514         fp->tx_pkt++;
12515
12516         return NETDEV_TX_OK;
12517 }
12518
12519 /* called with rtnl_lock */
12520 static int bnx2x_open(struct net_device *dev)
12521 {
12522         struct bnx2x *bp = netdev_priv(dev);
12523
12524         netif_carrier_off(dev);
12525
12526         bnx2x_set_power_state(bp, PCI_D0);
12527
12528         if (!bnx2x_reset_is_done(bp)) {
12529                 do {
12530                         /* Reset MCP mail box sequence if there is on going
12531                          * recovery
12532                          */
12533                         bp->fw_seq = 0;
12534
12535                         /* If it's the first function to load and reset done
12536                          * is still not cleared it may mean that. We don't
12537                          * check the attention state here because it may have
12538                          * already been cleared by a "common" reset but we
12539                          * shell proceed with "process kill" anyway.
12540                          */
12541                         if ((bnx2x_get_load_cnt(bp) == 0) &&
12542                                 bnx2x_trylock_hw_lock(bp,
12543                                 HW_LOCK_RESOURCE_RESERVED_08) &&
12544                                 (!bnx2x_leader_reset(bp))) {
12545                                 DP(NETIF_MSG_HW, "Recovered in open\n");
12546                                 break;
12547                         }
12548
12549                         bnx2x_set_power_state(bp, PCI_D3hot);
12550
12551                         printk(KERN_ERR"%s: Recovery flow hasn't been properly"
12552                         " completed yet. Try again later. If u still see this"
12553                         " message after a few retries then power cycle is"
12554                         " required.\n", bp->dev->name);
12555
12556                         return -EAGAIN;
12557                 } while (0);
12558         }
12559
12560         bp->recovery_state = BNX2X_RECOVERY_DONE;
12561
12562         return bnx2x_nic_load(bp, LOAD_OPEN);
12563 }
12564
12565 /* called with rtnl_lock */
12566 static int bnx2x_close(struct net_device *dev)
12567 {
12568         struct bnx2x *bp = netdev_priv(dev);
12569
12570         /* Unload the driver, release IRQs */
12571         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
12572         bnx2x_set_power_state(bp, PCI_D3hot);
12573
12574         return 0;
12575 }
12576
12577 /* called with netif_tx_lock from dev_mcast.c */
12578 static void bnx2x_set_rx_mode(struct net_device *dev)
12579 {
12580         struct bnx2x *bp = netdev_priv(dev);
12581         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
12582         int port = BP_PORT(bp);
12583
12584         if (bp->state != BNX2X_STATE_OPEN) {
12585                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
12586                 return;
12587         }
12588
12589         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
12590
12591         if (dev->flags & IFF_PROMISC)
12592                 rx_mode = BNX2X_RX_MODE_PROMISC;
12593
12594         else if ((dev->flags & IFF_ALLMULTI) ||
12595                  ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
12596                   CHIP_IS_E1(bp)))
12597                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12598
12599         else { /* some multicasts */
12600                 if (CHIP_IS_E1(bp)) {
12601                         int i, old, offset;
12602                         struct netdev_hw_addr *ha;
12603                         struct mac_configuration_cmd *config =
12604                                                 bnx2x_sp(bp, mcast_config);
12605
12606                         i = 0;
12607                         netdev_for_each_mc_addr(ha, dev) {
12608                                 config->config_table[i].
12609                                         cam_entry.msb_mac_addr =
12610                                         swab16(*(u16 *)&ha->addr[0]);
12611                                 config->config_table[i].
12612                                         cam_entry.middle_mac_addr =
12613                                         swab16(*(u16 *)&ha->addr[2]);
12614                                 config->config_table[i].
12615                                         cam_entry.lsb_mac_addr =
12616                                         swab16(*(u16 *)&ha->addr[4]);
12617                                 config->config_table[i].cam_entry.flags =
12618                                                         cpu_to_le16(port);
12619                                 config->config_table[i].
12620                                         target_table_entry.flags = 0;
12621                                 config->config_table[i].target_table_entry.
12622                                         clients_bit_vector =
12623                                                 cpu_to_le32(1 << BP_L_ID(bp));
12624                                 config->config_table[i].
12625                                         target_table_entry.vlan_id = 0;
12626
12627                                 DP(NETIF_MSG_IFUP,
12628                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
12629                                    config->config_table[i].
12630                                                 cam_entry.msb_mac_addr,
12631                                    config->config_table[i].
12632                                                 cam_entry.middle_mac_addr,
12633                                    config->config_table[i].
12634                                                 cam_entry.lsb_mac_addr);
12635                                 i++;
12636                         }
12637                         old = config->hdr.length;
12638                         if (old > i) {
12639                                 for (; i < old; i++) {
12640                                         if (CAM_IS_INVALID(config->
12641                                                            config_table[i])) {
12642                                                 /* already invalidated */
12643                                                 break;
12644                                         }
12645                                         /* invalidate */
12646                                         CAM_INVALIDATE(config->
12647                                                        config_table[i]);
12648                                 }
12649                         }
12650
12651                         if (CHIP_REV_IS_SLOW(bp))
12652                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
12653                         else
12654                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
12655
12656                         config->hdr.length = i;
12657                         config->hdr.offset = offset;
12658                         config->hdr.client_id = bp->fp->cl_id;
12659                         config->hdr.reserved1 = 0;
12660
12661                         bp->set_mac_pending++;
12662                         smp_wmb();
12663
12664                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
12665                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
12666                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
12667                                       0);
12668                 } else { /* E1H */
12669                         /* Accept one or more multicasts */
12670                         struct netdev_hw_addr *ha;
12671                         u32 mc_filter[MC_HASH_SIZE];
12672                         u32 crc, bit, regidx;
12673                         int i;
12674
12675                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
12676
12677                         netdev_for_each_mc_addr(ha, dev) {
12678                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
12679                                    ha->addr);
12680
12681                                 crc = crc32c_le(0, ha->addr, ETH_ALEN);
12682                                 bit = (crc >> 24) & 0xff;
12683                                 regidx = bit >> 5;
12684                                 bit &= 0x1f;
12685                                 mc_filter[regidx] |= (1 << bit);
12686                         }
12687
12688                         for (i = 0; i < MC_HASH_SIZE; i++)
12689                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
12690                                        mc_filter[i]);
12691                 }
12692         }
12693
12694         bp->rx_mode = rx_mode;
12695         bnx2x_set_storm_rx_mode(bp);
12696 }
12697
12698 /* called with rtnl_lock */
12699 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
12700 {
12701         struct sockaddr *addr = p;
12702         struct bnx2x *bp = netdev_priv(dev);
12703
12704         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
12705                 return -EINVAL;
12706
12707         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
12708         if (netif_running(dev)) {
12709                 if (CHIP_IS_E1(bp))
12710                         bnx2x_set_eth_mac_addr_e1(bp, 1);
12711                 else
12712                         bnx2x_set_eth_mac_addr_e1h(bp, 1);
12713         }
12714
12715         return 0;
12716 }
12717
12718 /* called with rtnl_lock */
12719 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
12720                            int devad, u16 addr)
12721 {
12722         struct bnx2x *bp = netdev_priv(netdev);
12723         u16 value;
12724         int rc;
12725         u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
12726
12727         DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
12728            prtad, devad, addr);
12729
12730         if (prtad != bp->mdio.prtad) {
12731                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12732                    prtad, bp->mdio.prtad);
12733                 return -EINVAL;
12734         }
12735
12736         /* The HW expects different devad if CL22 is used */
12737         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12738
12739         bnx2x_acquire_phy_lock(bp);
12740         rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
12741                              devad, addr, &value);
12742         bnx2x_release_phy_lock(bp);
12743         DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
12744
12745         if (!rc)
12746                 rc = value;
12747         return rc;
12748 }
12749
12750 /* called with rtnl_lock */
12751 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
12752                             u16 addr, u16 value)
12753 {
12754         struct bnx2x *bp = netdev_priv(netdev);
12755         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
12756         int rc;
12757
12758         DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
12759                            " value 0x%x\n", prtad, devad, addr, value);
12760
12761         if (prtad != bp->mdio.prtad) {
12762                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12763                    prtad, bp->mdio.prtad);
12764                 return -EINVAL;
12765         }
12766
12767         /* The HW expects different devad if CL22 is used */
12768         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12769
12770         bnx2x_acquire_phy_lock(bp);
12771         rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
12772                               devad, addr, value);
12773         bnx2x_release_phy_lock(bp);
12774         return rc;
12775 }
12776
12777 /* called with rtnl_lock */
12778 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12779 {
12780         struct bnx2x *bp = netdev_priv(dev);
12781         struct mii_ioctl_data *mdio = if_mii(ifr);
12782
12783         DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
12784            mdio->phy_id, mdio->reg_num, mdio->val_in);
12785
12786         if (!netif_running(dev))
12787                 return -EAGAIN;
12788
12789         return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
12790 }
12791
12792 /* called with rtnl_lock */
12793 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
12794 {
12795         struct bnx2x *bp = netdev_priv(dev);
12796         int rc = 0;
12797
12798         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
12799                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
12800                 return -EAGAIN;
12801         }
12802
12803         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
12804             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
12805                 return -EINVAL;
12806
12807         /* This does not race with packet allocation
12808          * because the actual alloc size is
12809          * only updated as part of load
12810          */
12811         dev->mtu = new_mtu;
12812
12813         if (netif_running(dev)) {
12814                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
12815                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
12816         }
12817
12818         return rc;
12819 }
12820
12821 static void bnx2x_tx_timeout(struct net_device *dev)
12822 {
12823         struct bnx2x *bp = netdev_priv(dev);
12824
12825 #ifdef BNX2X_STOP_ON_ERROR
12826         if (!bp->panic)
12827                 bnx2x_panic();
12828 #endif
12829         /* This allows the netif to be shutdown gracefully before resetting */
12830         schedule_delayed_work(&bp->reset_task, 0);
12831 }
12832
12833 #ifdef BCM_VLAN
12834 /* called with rtnl_lock */
12835 static void bnx2x_vlan_rx_register(struct net_device *dev,
12836                                    struct vlan_group *vlgrp)
12837 {
12838         struct bnx2x *bp = netdev_priv(dev);
12839
12840         bp->vlgrp = vlgrp;
12841
12842         /* Set flags according to the required capabilities */
12843         bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
12844
12845         if (dev->features & NETIF_F_HW_VLAN_TX)
12846                 bp->flags |= HW_VLAN_TX_FLAG;
12847
12848         if (dev->features & NETIF_F_HW_VLAN_RX)
12849                 bp->flags |= HW_VLAN_RX_FLAG;
12850
12851         if (netif_running(dev))
12852                 bnx2x_set_client_config(bp);
12853 }
12854
12855 #endif
12856
12857 #ifdef CONFIG_NET_POLL_CONTROLLER
12858 static void poll_bnx2x(struct net_device *dev)
12859 {
12860         struct bnx2x *bp = netdev_priv(dev);
12861
12862         disable_irq(bp->pdev->irq);
12863         bnx2x_interrupt(bp->pdev->irq, dev);
12864         enable_irq(bp->pdev->irq);
12865 }
12866 #endif
12867
12868 static const struct net_device_ops bnx2x_netdev_ops = {
12869         .ndo_open               = bnx2x_open,
12870         .ndo_stop               = bnx2x_close,
12871         .ndo_start_xmit         = bnx2x_start_xmit,
12872         .ndo_set_multicast_list = bnx2x_set_rx_mode,
12873         .ndo_set_mac_address    = bnx2x_change_mac_addr,
12874         .ndo_validate_addr      = eth_validate_addr,
12875         .ndo_do_ioctl           = bnx2x_ioctl,
12876         .ndo_change_mtu         = bnx2x_change_mtu,
12877         .ndo_tx_timeout         = bnx2x_tx_timeout,
12878 #ifdef BCM_VLAN
12879         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
12880 #endif
12881 #ifdef CONFIG_NET_POLL_CONTROLLER
12882         .ndo_poll_controller    = poll_bnx2x,
12883 #endif
12884 };
12885
12886 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
12887                                     struct net_device *dev)
12888 {
12889         struct bnx2x *bp;
12890         int rc;
12891
12892         SET_NETDEV_DEV(dev, &pdev->dev);
12893         bp = netdev_priv(dev);
12894
12895         bp->dev = dev;
12896         bp->pdev = pdev;
12897         bp->flags = 0;
12898         bp->func = PCI_FUNC(pdev->devfn);
12899
12900         rc = pci_enable_device(pdev);
12901         if (rc) {
12902                 dev_err(&bp->pdev->dev,
12903                         "Cannot enable PCI device, aborting\n");
12904                 goto err_out;
12905         }
12906
12907         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12908                 dev_err(&bp->pdev->dev,
12909                         "Cannot find PCI device base address, aborting\n");
12910                 rc = -ENODEV;
12911                 goto err_out_disable;
12912         }
12913
12914         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12915                 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
12916                        " base address, aborting\n");
12917                 rc = -ENODEV;
12918                 goto err_out_disable;
12919         }
12920
12921         if (atomic_read(&pdev->enable_cnt) == 1) {
12922                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12923                 if (rc) {
12924                         dev_err(&bp->pdev->dev,
12925                                 "Cannot obtain PCI resources, aborting\n");
12926                         goto err_out_disable;
12927                 }
12928
12929                 pci_set_master(pdev);
12930                 pci_save_state(pdev);
12931         }
12932
12933         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12934         if (bp->pm_cap == 0) {
12935                 dev_err(&bp->pdev->dev,
12936                         "Cannot find power management capability, aborting\n");
12937                 rc = -EIO;
12938                 goto err_out_release;
12939         }
12940
12941         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
12942         if (bp->pcie_cap == 0) {
12943                 dev_err(&bp->pdev->dev,
12944                         "Cannot find PCI Express capability, aborting\n");
12945                 rc = -EIO;
12946                 goto err_out_release;
12947         }
12948
12949         if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
12950                 bp->flags |= USING_DAC_FLAG;
12951                 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
12952                         dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
12953                                " failed, aborting\n");
12954                         rc = -EIO;
12955                         goto err_out_release;
12956                 }
12957
12958         } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
12959                 dev_err(&bp->pdev->dev,
12960                         "System does not support DMA, aborting\n");
12961                 rc = -EIO;
12962                 goto err_out_release;
12963         }
12964
12965         dev->mem_start = pci_resource_start(pdev, 0);
12966         dev->base_addr = dev->mem_start;
12967         dev->mem_end = pci_resource_end(pdev, 0);
12968
12969         dev->irq = pdev->irq;
12970
12971         bp->regview = pci_ioremap_bar(pdev, 0);
12972         if (!bp->regview) {
12973                 dev_err(&bp->pdev->dev,
12974                         "Cannot map register space, aborting\n");
12975                 rc = -ENOMEM;
12976                 goto err_out_release;
12977         }
12978
12979         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
12980                                         min_t(u64, BNX2X_DB_SIZE,
12981                                               pci_resource_len(pdev, 2)));
12982         if (!bp->doorbells) {
12983                 dev_err(&bp->pdev->dev,
12984                         "Cannot map doorbell space, aborting\n");
12985                 rc = -ENOMEM;
12986                 goto err_out_unmap;
12987         }
12988
12989         bnx2x_set_power_state(bp, PCI_D0);
12990
12991         /* clean indirect addresses */
12992         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
12993                                PCICFG_VENDOR_ID_OFFSET);
12994         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
12995         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
12996         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
12997         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
12998
12999         /* Reset the load counter */
13000         bnx2x_clear_load_cnt(bp);
13001
13002         dev->watchdog_timeo = TX_TIMEOUT;
13003
13004         dev->netdev_ops = &bnx2x_netdev_ops;
13005         dev->ethtool_ops = &bnx2x_ethtool_ops;
13006         dev->features |= NETIF_F_SG;
13007         dev->features |= NETIF_F_HW_CSUM;
13008         if (bp->flags & USING_DAC_FLAG)
13009                 dev->features |= NETIF_F_HIGHDMA;
13010         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
13011         dev->features |= NETIF_F_TSO6;
13012 #ifdef BCM_VLAN
13013         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
13014         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
13015
13016         dev->vlan_features |= NETIF_F_SG;
13017         dev->vlan_features |= NETIF_F_HW_CSUM;
13018         if (bp->flags & USING_DAC_FLAG)
13019                 dev->vlan_features |= NETIF_F_HIGHDMA;
13020         dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
13021         dev->vlan_features |= NETIF_F_TSO6;
13022 #endif
13023
13024         /* get_port_hwinfo() will set prtad and mmds properly */
13025         bp->mdio.prtad = MDIO_PRTAD_NONE;
13026         bp->mdio.mmds = 0;
13027         bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
13028         bp->mdio.dev = dev;
13029         bp->mdio.mdio_read = bnx2x_mdio_read;
13030         bp->mdio.mdio_write = bnx2x_mdio_write;
13031
13032         return 0;
13033
13034 err_out_unmap:
13035         if (bp->regview) {
13036                 iounmap(bp->regview);
13037                 bp->regview = NULL;
13038         }
13039         if (bp->doorbells) {
13040                 iounmap(bp->doorbells);
13041                 bp->doorbells = NULL;
13042         }
13043
13044 err_out_release:
13045         if (atomic_read(&pdev->enable_cnt) == 1)
13046                 pci_release_regions(pdev);
13047
13048 err_out_disable:
13049         pci_disable_device(pdev);
13050         pci_set_drvdata(pdev, NULL);
13051
13052 err_out:
13053         return rc;
13054 }
13055
13056 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
13057                                                  int *width, int *speed)
13058 {
13059         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
13060
13061         *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
13062
13063         /* return value of 1=2.5GHz 2=5GHz */
13064         *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
13065 }
13066
13067 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
13068 {
13069         const struct firmware *firmware = bp->firmware;
13070         struct bnx2x_fw_file_hdr *fw_hdr;
13071         struct bnx2x_fw_file_section *sections;
13072         u32 offset, len, num_ops;
13073         u16 *ops_offsets;
13074         int i;
13075         const u8 *fw_ver;
13076
13077         if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
13078                 return -EINVAL;
13079
13080         fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
13081         sections = (struct bnx2x_fw_file_section *)fw_hdr;
13082
13083         /* Make sure none of the offsets and sizes make us read beyond
13084          * the end of the firmware data */
13085         for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
13086                 offset = be32_to_cpu(sections[i].offset);
13087                 len = be32_to_cpu(sections[i].len);
13088                 if (offset + len > firmware->size) {
13089                         dev_err(&bp->pdev->dev,
13090                                 "Section %d length is out of bounds\n", i);
13091                         return -EINVAL;
13092                 }
13093         }
13094
13095         /* Likewise for the init_ops offsets */
13096         offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
13097         ops_offsets = (u16 *)(firmware->data + offset);
13098         num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
13099
13100         for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
13101                 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
13102                         dev_err(&bp->pdev->dev,
13103                                 "Section offset %d is out of bounds\n", i);
13104                         return -EINVAL;
13105                 }
13106         }
13107
13108         /* Check FW version */
13109         offset = be32_to_cpu(fw_hdr->fw_version.offset);
13110         fw_ver = firmware->data + offset;
13111         if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
13112             (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
13113             (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
13114             (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
13115                 dev_err(&bp->pdev->dev,
13116                         "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
13117                        fw_ver[0], fw_ver[1], fw_ver[2],
13118                        fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
13119                        BCM_5710_FW_MINOR_VERSION,
13120                        BCM_5710_FW_REVISION_VERSION,
13121                        BCM_5710_FW_ENGINEERING_VERSION);
13122                 return -EINVAL;
13123         }
13124
13125         return 0;
13126 }
13127
13128 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13129 {
13130         const __be32 *source = (const __be32 *)_source;
13131         u32 *target = (u32 *)_target;
13132         u32 i;
13133
13134         for (i = 0; i < n/4; i++)
13135                 target[i] = be32_to_cpu(source[i]);
13136 }
13137
13138 /*
13139    Ops array is stored in the following format:
13140    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
13141  */
13142 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
13143 {
13144         const __be32 *source = (const __be32 *)_source;
13145         struct raw_op *target = (struct raw_op *)_target;
13146         u32 i, j, tmp;
13147
13148         for (i = 0, j = 0; i < n/8; i++, j += 2) {
13149                 tmp = be32_to_cpu(source[j]);
13150                 target[i].op = (tmp >> 24) & 0xff;
13151                 target[i].offset = tmp & 0xffffff;
13152                 target[i].raw_data = be32_to_cpu(source[j + 1]);
13153         }
13154 }
13155
13156 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13157 {
13158         const __be16 *source = (const __be16 *)_source;
13159         u16 *target = (u16 *)_target;
13160         u32 i;
13161
13162         for (i = 0; i < n/2; i++)
13163                 target[i] = be16_to_cpu(source[i]);
13164 }
13165
13166 #define BNX2X_ALLOC_AND_SET(arr, lbl, func)                             \
13167 do {                                                                    \
13168         u32 len = be32_to_cpu(fw_hdr->arr.len);                         \
13169         bp->arr = kmalloc(len, GFP_KERNEL);                             \
13170         if (!bp->arr) {                                                 \
13171                 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
13172                 goto lbl;                                               \
13173         }                                                               \
13174         func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset),      \
13175              (u8 *)bp->arr, len);                                       \
13176 } while (0)
13177
13178 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
13179 {
13180         const char *fw_file_name;
13181         struct bnx2x_fw_file_hdr *fw_hdr;
13182         int rc;
13183
13184         if (CHIP_IS_E1(bp))
13185                 fw_file_name = FW_FILE_NAME_E1;
13186         else if (CHIP_IS_E1H(bp))
13187                 fw_file_name = FW_FILE_NAME_E1H;
13188         else {
13189                 dev_err(dev, "Unsupported chip revision\n");
13190                 return -EINVAL;
13191         }
13192
13193         dev_info(dev, "Loading %s\n", fw_file_name);
13194
13195         rc = request_firmware(&bp->firmware, fw_file_name, dev);
13196         if (rc) {
13197                 dev_err(dev, "Can't load firmware file %s\n", fw_file_name);
13198                 goto request_firmware_exit;
13199         }
13200
13201         rc = bnx2x_check_firmware(bp);
13202         if (rc) {
13203                 dev_err(dev, "Corrupt firmware file %s\n", fw_file_name);
13204                 goto request_firmware_exit;
13205         }
13206
13207         fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
13208
13209         /* Initialize the pointers to the init arrays */
13210         /* Blob */
13211         BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
13212
13213         /* Opcodes */
13214         BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
13215
13216         /* Offsets */
13217         BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
13218                             be16_to_cpu_n);
13219
13220         /* STORMs firmware */
13221         INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13222                         be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
13223         INIT_TSEM_PRAM_DATA(bp)      = bp->firmware->data +
13224                         be32_to_cpu(fw_hdr->tsem_pram_data.offset);
13225         INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13226                         be32_to_cpu(fw_hdr->usem_int_table_data.offset);
13227         INIT_USEM_PRAM_DATA(bp)      = bp->firmware->data +
13228                         be32_to_cpu(fw_hdr->usem_pram_data.offset);
13229         INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13230                         be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
13231         INIT_XSEM_PRAM_DATA(bp)      = bp->firmware->data +
13232                         be32_to_cpu(fw_hdr->xsem_pram_data.offset);
13233         INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13234                         be32_to_cpu(fw_hdr->csem_int_table_data.offset);
13235         INIT_CSEM_PRAM_DATA(bp)      = bp->firmware->data +
13236                         be32_to_cpu(fw_hdr->csem_pram_data.offset);
13237
13238         return 0;
13239
13240 init_offsets_alloc_err:
13241         kfree(bp->init_ops);
13242 init_ops_alloc_err:
13243         kfree(bp->init_data);
13244 request_firmware_exit:
13245         release_firmware(bp->firmware);
13246
13247         return rc;
13248 }
13249
13250
13251 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
13252                                     const struct pci_device_id *ent)
13253 {
13254         struct net_device *dev = NULL;
13255         struct bnx2x *bp;
13256         int pcie_width, pcie_speed;
13257         int rc;
13258
13259         /* dev zeroed in init_etherdev */
13260         dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
13261         if (!dev) {
13262                 dev_err(&pdev->dev, "Cannot allocate net device\n");
13263                 return -ENOMEM;
13264         }
13265
13266         bp = netdev_priv(dev);
13267         bp->msg_enable = debug;
13268
13269         pci_set_drvdata(pdev, dev);
13270
13271         rc = bnx2x_init_dev(pdev, dev);
13272         if (rc < 0) {
13273                 free_netdev(dev);
13274                 return rc;
13275         }
13276
13277         rc = bnx2x_init_bp(bp);
13278         if (rc)
13279                 goto init_one_exit;
13280
13281         /* Set init arrays */
13282         rc = bnx2x_init_firmware(bp, &pdev->dev);
13283         if (rc) {
13284                 dev_err(&pdev->dev, "Error loading firmware\n");
13285                 goto init_one_exit;
13286         }
13287
13288         rc = register_netdev(dev);
13289         if (rc) {
13290                 dev_err(&pdev->dev, "Cannot register net device\n");
13291                 goto init_one_exit;
13292         }
13293
13294         bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
13295         netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
13296                " IRQ %d, ", board_info[ent->driver_data].name,
13297                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
13298                pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
13299                dev->base_addr, bp->pdev->irq);
13300         pr_cont("node addr %pM\n", dev->dev_addr);
13301
13302         return 0;
13303
13304 init_one_exit:
13305         if (bp->regview)
13306                 iounmap(bp->regview);
13307
13308         if (bp->doorbells)
13309                 iounmap(bp->doorbells);
13310
13311         free_netdev(dev);
13312
13313         if (atomic_read(&pdev->enable_cnt) == 1)
13314                 pci_release_regions(pdev);
13315
13316         pci_disable_device(pdev);
13317         pci_set_drvdata(pdev, NULL);
13318
13319         return rc;
13320 }
13321
13322 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
13323 {
13324         struct net_device *dev = pci_get_drvdata(pdev);
13325         struct bnx2x *bp;
13326
13327         if (!dev) {
13328                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13329                 return;
13330         }
13331         bp = netdev_priv(dev);
13332
13333         unregister_netdev(dev);
13334
13335         /* Make sure RESET task is not scheduled before continuing */
13336         cancel_delayed_work_sync(&bp->reset_task);
13337
13338         kfree(bp->init_ops_offsets);
13339         kfree(bp->init_ops);
13340         kfree(bp->init_data);
13341         release_firmware(bp->firmware);
13342
13343         if (bp->regview)
13344                 iounmap(bp->regview);
13345
13346         if (bp->doorbells)
13347                 iounmap(bp->doorbells);
13348
13349         free_netdev(dev);
13350
13351         if (atomic_read(&pdev->enable_cnt) == 1)
13352                 pci_release_regions(pdev);
13353
13354         pci_disable_device(pdev);
13355         pci_set_drvdata(pdev, NULL);
13356 }
13357
13358 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
13359 {
13360         struct net_device *dev = pci_get_drvdata(pdev);
13361         struct bnx2x *bp;
13362
13363         if (!dev) {
13364                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13365                 return -ENODEV;
13366         }
13367         bp = netdev_priv(dev);
13368
13369         rtnl_lock();
13370
13371         pci_save_state(pdev);
13372
13373         if (!netif_running(dev)) {
13374                 rtnl_unlock();
13375                 return 0;
13376         }
13377
13378         netif_device_detach(dev);
13379
13380         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
13381
13382         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
13383
13384         rtnl_unlock();
13385
13386         return 0;
13387 }
13388
13389 static int bnx2x_resume(struct pci_dev *pdev)
13390 {
13391         struct net_device *dev = pci_get_drvdata(pdev);
13392         struct bnx2x *bp;
13393         int rc;
13394
13395         if (!dev) {
13396                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13397                 return -ENODEV;
13398         }
13399         bp = netdev_priv(dev);
13400
13401         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13402                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13403                 return -EAGAIN;
13404         }
13405
13406         rtnl_lock();
13407
13408         pci_restore_state(pdev);
13409
13410         if (!netif_running(dev)) {
13411                 rtnl_unlock();
13412                 return 0;
13413         }
13414
13415         bnx2x_set_power_state(bp, PCI_D0);
13416         netif_device_attach(dev);
13417
13418         rc = bnx2x_nic_load(bp, LOAD_OPEN);
13419
13420         rtnl_unlock();
13421
13422         return rc;
13423 }
13424
13425 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
13426 {
13427         int i;
13428
13429         bp->state = BNX2X_STATE_ERROR;
13430
13431         bp->rx_mode = BNX2X_RX_MODE_NONE;
13432
13433         bnx2x_netif_stop(bp, 0);
13434
13435         del_timer_sync(&bp->timer);
13436         bp->stats_state = STATS_STATE_DISABLED;
13437         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
13438
13439         /* Release IRQs */
13440         bnx2x_free_irq(bp, false);
13441
13442         if (CHIP_IS_E1(bp)) {
13443                 struct mac_configuration_cmd *config =
13444                                                 bnx2x_sp(bp, mcast_config);
13445
13446                 for (i = 0; i < config->hdr.length; i++)
13447                         CAM_INVALIDATE(config->config_table[i]);
13448         }
13449
13450         /* Free SKBs, SGEs, TPA pool and driver internals */
13451         bnx2x_free_skbs(bp);
13452         for_each_queue(bp, i)
13453                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
13454         for_each_queue(bp, i)
13455                 netif_napi_del(&bnx2x_fp(bp, i, napi));
13456         bnx2x_free_mem(bp);
13457
13458         bp->state = BNX2X_STATE_CLOSED;
13459
13460         netif_carrier_off(bp->dev);
13461
13462         return 0;
13463 }
13464
13465 static void bnx2x_eeh_recover(struct bnx2x *bp)
13466 {
13467         u32 val;
13468
13469         mutex_init(&bp->port.phy_mutex);
13470
13471         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
13472         bp->link_params.shmem_base = bp->common.shmem_base;
13473         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
13474
13475         if (!bp->common.shmem_base ||
13476             (bp->common.shmem_base < 0xA0000) ||
13477             (bp->common.shmem_base >= 0xC0000)) {
13478                 BNX2X_DEV_INFO("MCP not active\n");
13479                 bp->flags |= NO_MCP_FLAG;
13480                 return;
13481         }
13482
13483         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
13484         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13485                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13486                 BNX2X_ERR("BAD MCP validity signature\n");
13487
13488         if (!BP_NOMCP(bp)) {
13489                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
13490                               & DRV_MSG_SEQ_NUMBER_MASK);
13491                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
13492         }
13493 }
13494
13495 /**
13496  * bnx2x_io_error_detected - called when PCI error is detected
13497  * @pdev: Pointer to PCI device
13498  * @state: The current pci connection state
13499  *
13500  * This function is called after a PCI bus error affecting
13501  * this device has been detected.
13502  */
13503 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
13504                                                 pci_channel_state_t state)
13505 {
13506         struct net_device *dev = pci_get_drvdata(pdev);
13507         struct bnx2x *bp = netdev_priv(dev);
13508
13509         rtnl_lock();
13510
13511         netif_device_detach(dev);
13512
13513         if (state == pci_channel_io_perm_failure) {
13514                 rtnl_unlock();
13515                 return PCI_ERS_RESULT_DISCONNECT;
13516         }
13517
13518         if (netif_running(dev))
13519                 bnx2x_eeh_nic_unload(bp);
13520
13521         pci_disable_device(pdev);
13522
13523         rtnl_unlock();
13524
13525         /* Request a slot reset */
13526         return PCI_ERS_RESULT_NEED_RESET;
13527 }
13528
13529 /**
13530  * bnx2x_io_slot_reset - called after the PCI bus has been reset
13531  * @pdev: Pointer to PCI device
13532  *
13533  * Restart the card from scratch, as if from a cold-boot.
13534  */
13535 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
13536 {
13537         struct net_device *dev = pci_get_drvdata(pdev);
13538         struct bnx2x *bp = netdev_priv(dev);
13539
13540         rtnl_lock();
13541
13542         if (pci_enable_device(pdev)) {
13543                 dev_err(&pdev->dev,
13544                         "Cannot re-enable PCI device after reset\n");
13545                 rtnl_unlock();
13546                 return PCI_ERS_RESULT_DISCONNECT;
13547         }
13548
13549         pci_set_master(pdev);
13550         pci_restore_state(pdev);
13551
13552         if (netif_running(dev))
13553                 bnx2x_set_power_state(bp, PCI_D0);
13554
13555         rtnl_unlock();
13556
13557         return PCI_ERS_RESULT_RECOVERED;
13558 }
13559
13560 /**
13561  * bnx2x_io_resume - called when traffic can start flowing again
13562  * @pdev: Pointer to PCI device
13563  *
13564  * This callback is called when the error recovery driver tells us that
13565  * its OK to resume normal operation.
13566  */
13567 static void bnx2x_io_resume(struct pci_dev *pdev)
13568 {
13569         struct net_device *dev = pci_get_drvdata(pdev);
13570         struct bnx2x *bp = netdev_priv(dev);
13571
13572         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13573                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13574                 return;
13575         }
13576
13577         rtnl_lock();
13578
13579         bnx2x_eeh_recover(bp);
13580
13581         if (netif_running(dev))
13582                 bnx2x_nic_load(bp, LOAD_NORMAL);
13583
13584         netif_device_attach(dev);
13585
13586         rtnl_unlock();
13587 }
13588
13589 static struct pci_error_handlers bnx2x_err_handler = {
13590         .error_detected = bnx2x_io_error_detected,
13591         .slot_reset     = bnx2x_io_slot_reset,
13592         .resume         = bnx2x_io_resume,
13593 };
13594
13595 static struct pci_driver bnx2x_pci_driver = {
13596         .name        = DRV_MODULE_NAME,
13597         .id_table    = bnx2x_pci_tbl,
13598         .probe       = bnx2x_init_one,
13599         .remove      = __devexit_p(bnx2x_remove_one),
13600         .suspend     = bnx2x_suspend,
13601         .resume      = bnx2x_resume,
13602         .err_handler = &bnx2x_err_handler,
13603 };
13604
13605 static int __init bnx2x_init(void)
13606 {
13607         int ret;
13608
13609         pr_info("%s", version);
13610
13611         bnx2x_wq = create_singlethread_workqueue("bnx2x");
13612         if (bnx2x_wq == NULL) {
13613                 pr_err("Cannot create workqueue\n");
13614                 return -ENOMEM;
13615         }
13616
13617         ret = pci_register_driver(&bnx2x_pci_driver);
13618         if (ret) {
13619                 pr_err("Cannot register driver\n");
13620                 destroy_workqueue(bnx2x_wq);
13621         }
13622         return ret;
13623 }
13624
13625 static void __exit bnx2x_cleanup(void)
13626 {
13627         pci_unregister_driver(&bnx2x_pci_driver);
13628
13629         destroy_workqueue(bnx2x_wq);
13630 }
13631
13632 module_init(bnx2x_init);
13633 module_exit(bnx2x_cleanup);
13634
13635 #ifdef BCM_CNIC
13636
13637 /* count denotes the number of new completions we have seen */
13638 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
13639 {
13640         struct eth_spe *spe;
13641
13642 #ifdef BNX2X_STOP_ON_ERROR
13643         if (unlikely(bp->panic))
13644                 return;
13645 #endif
13646
13647         spin_lock_bh(&bp->spq_lock);
13648         bp->cnic_spq_pending -= count;
13649
13650         for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
13651              bp->cnic_spq_pending++) {
13652
13653                 if (!bp->cnic_kwq_pending)
13654                         break;
13655
13656                 spe = bnx2x_sp_get_next(bp);
13657                 *spe = *bp->cnic_kwq_cons;
13658
13659                 bp->cnic_kwq_pending--;
13660
13661                 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
13662                    bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
13663
13664                 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
13665                         bp->cnic_kwq_cons = bp->cnic_kwq;
13666                 else
13667                         bp->cnic_kwq_cons++;
13668         }
13669         bnx2x_sp_prod_update(bp);
13670         spin_unlock_bh(&bp->spq_lock);
13671 }
13672
13673 static int bnx2x_cnic_sp_queue(struct net_device *dev,
13674                                struct kwqe_16 *kwqes[], u32 count)
13675 {
13676         struct bnx2x *bp = netdev_priv(dev);
13677         int i;
13678
13679 #ifdef BNX2X_STOP_ON_ERROR
13680         if (unlikely(bp->panic))
13681                 return -EIO;
13682 #endif
13683
13684         spin_lock_bh(&bp->spq_lock);
13685
13686         for (i = 0; i < count; i++) {
13687                 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
13688
13689                 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
13690                         break;
13691
13692                 *bp->cnic_kwq_prod = *spe;
13693
13694                 bp->cnic_kwq_pending++;
13695
13696                 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
13697                    spe->hdr.conn_and_cmd_data, spe->hdr.type,
13698                    spe->data.mac_config_addr.hi,
13699                    spe->data.mac_config_addr.lo,
13700                    bp->cnic_kwq_pending);
13701
13702                 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
13703                         bp->cnic_kwq_prod = bp->cnic_kwq;
13704                 else
13705                         bp->cnic_kwq_prod++;
13706         }
13707
13708         spin_unlock_bh(&bp->spq_lock);
13709
13710         if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
13711                 bnx2x_cnic_sp_post(bp, 0);
13712
13713         return i;
13714 }
13715
13716 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13717 {
13718         struct cnic_ops *c_ops;
13719         int rc = 0;
13720
13721         mutex_lock(&bp->cnic_mutex);
13722         c_ops = bp->cnic_ops;
13723         if (c_ops)
13724                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13725         mutex_unlock(&bp->cnic_mutex);
13726
13727         return rc;
13728 }
13729
13730 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13731 {
13732         struct cnic_ops *c_ops;
13733         int rc = 0;
13734
13735         rcu_read_lock();
13736         c_ops = rcu_dereference(bp->cnic_ops);
13737         if (c_ops)
13738                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13739         rcu_read_unlock();
13740
13741         return rc;
13742 }
13743
13744 /*
13745  * for commands that have no data
13746  */
13747 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
13748 {
13749         struct cnic_ctl_info ctl = {0};
13750
13751         ctl.cmd = cmd;
13752
13753         return bnx2x_cnic_ctl_send(bp, &ctl);
13754 }
13755
13756 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
13757 {
13758         struct cnic_ctl_info ctl;
13759
13760         /* first we tell CNIC and only then we count this as a completion */
13761         ctl.cmd = CNIC_CTL_COMPLETION_CMD;
13762         ctl.data.comp.cid = cid;
13763
13764         bnx2x_cnic_ctl_send_bh(bp, &ctl);
13765         bnx2x_cnic_sp_post(bp, 1);
13766 }
13767
13768 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
13769 {
13770         struct bnx2x *bp = netdev_priv(dev);
13771         int rc = 0;
13772
13773         switch (ctl->cmd) {
13774         case DRV_CTL_CTXTBL_WR_CMD: {
13775                 u32 index = ctl->data.io.offset;
13776                 dma_addr_t addr = ctl->data.io.dma_addr;
13777
13778                 bnx2x_ilt_wr(bp, index, addr);
13779                 break;
13780         }
13781
13782         case DRV_CTL_COMPLETION_CMD: {
13783                 int count = ctl->data.comp.comp_count;
13784
13785                 bnx2x_cnic_sp_post(bp, count);
13786                 break;
13787         }
13788
13789         /* rtnl_lock is held.  */
13790         case DRV_CTL_START_L2_CMD: {
13791                 u32 cli = ctl->data.ring.client_id;
13792
13793                 bp->rx_mode_cl_mask |= (1 << cli);
13794                 bnx2x_set_storm_rx_mode(bp);
13795                 break;
13796         }
13797
13798         /* rtnl_lock is held.  */
13799         case DRV_CTL_STOP_L2_CMD: {
13800                 u32 cli = ctl->data.ring.client_id;
13801
13802                 bp->rx_mode_cl_mask &= ~(1 << cli);
13803                 bnx2x_set_storm_rx_mode(bp);
13804                 break;
13805         }
13806
13807         default:
13808                 BNX2X_ERR("unknown command %x\n", ctl->cmd);
13809                 rc = -EINVAL;
13810         }
13811
13812         return rc;
13813 }
13814
13815 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
13816 {
13817         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13818
13819         if (bp->flags & USING_MSIX_FLAG) {
13820                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
13821                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
13822                 cp->irq_arr[0].vector = bp->msix_table[1].vector;
13823         } else {
13824                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
13825                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
13826         }
13827         cp->irq_arr[0].status_blk = bp->cnic_sb;
13828         cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
13829         cp->irq_arr[1].status_blk = bp->def_status_blk;
13830         cp->irq_arr[1].status_blk_num = DEF_SB_ID;
13831
13832         cp->num_irq = 2;
13833 }
13834
13835 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
13836                                void *data)
13837 {
13838         struct bnx2x *bp = netdev_priv(dev);
13839         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13840
13841         if (ops == NULL)
13842                 return -EINVAL;
13843
13844         if (atomic_read(&bp->intr_sem) != 0)
13845                 return -EBUSY;
13846
13847         bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
13848         if (!bp->cnic_kwq)
13849                 return -ENOMEM;
13850
13851         bp->cnic_kwq_cons = bp->cnic_kwq;
13852         bp->cnic_kwq_prod = bp->cnic_kwq;
13853         bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
13854
13855         bp->cnic_spq_pending = 0;
13856         bp->cnic_kwq_pending = 0;
13857
13858         bp->cnic_data = data;
13859
13860         cp->num_irq = 0;
13861         cp->drv_state = CNIC_DRV_STATE_REGD;
13862
13863         bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
13864
13865         bnx2x_setup_cnic_irq_info(bp);
13866         bnx2x_set_iscsi_eth_mac_addr(bp, 1);
13867         bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
13868         rcu_assign_pointer(bp->cnic_ops, ops);
13869
13870         return 0;
13871 }
13872
13873 static int bnx2x_unregister_cnic(struct net_device *dev)
13874 {
13875         struct bnx2x *bp = netdev_priv(dev);
13876         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13877
13878         mutex_lock(&bp->cnic_mutex);
13879         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
13880                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
13881                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
13882         }
13883         cp->drv_state = 0;
13884         rcu_assign_pointer(bp->cnic_ops, NULL);
13885         mutex_unlock(&bp->cnic_mutex);
13886         synchronize_rcu();
13887         kfree(bp->cnic_kwq);
13888         bp->cnic_kwq = NULL;
13889
13890         return 0;
13891 }
13892
13893 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
13894 {
13895         struct bnx2x *bp = netdev_priv(dev);
13896         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13897
13898         cp->drv_owner = THIS_MODULE;
13899         cp->chip_id = CHIP_ID(bp);
13900         cp->pdev = bp->pdev;
13901         cp->io_base = bp->regview;
13902         cp->io_base2 = bp->doorbells;
13903         cp->max_kwqe_pending = 8;
13904         cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
13905         cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
13906         cp->ctx_tbl_len = CNIC_ILT_LINES;
13907         cp->starting_cid = BCM_CNIC_CID_START;
13908         cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
13909         cp->drv_ctl = bnx2x_drv_ctl;
13910         cp->drv_register_cnic = bnx2x_register_cnic;
13911         cp->drv_unregister_cnic = bnx2x_unregister_cnic;
13912
13913         return cp;
13914 }
13915 EXPORT_SYMBOL(bnx2x_cnic_probe);
13916
13917 #endif /* BCM_CNIC */
13918