bnx2x: Report the maximal available BW as link speed
[safe/jmp/linux-2.6] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52
53
54 #include "bnx2x.h"
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_dump.h"
58
59 #define DRV_MODULE_VERSION      "1.52.1"
60 #define DRV_MODULE_RELDATE      "2009/08/12"
61 #define BNX2X_BC_VER            0x040200
62
63 #include <linux/firmware.h>
64 #include "bnx2x_fw_file_hdr.h"
65 /* FW files */
66 #define FW_FILE_PREFIX_E1       "bnx2x-e1-"
67 #define FW_FILE_PREFIX_E1H      "bnx2x-e1h-"
68
69 /* Time in jiffies before concluding the transmitter is hung */
70 #define TX_TIMEOUT              (5*HZ)
71
72 static char version[] __devinitdata =
73         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
74         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
76 MODULE_AUTHOR("Eliezer Tamir");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
80
81 static int multi_mode = 1;
82 module_param(multi_mode, int, 0);
83 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84                              "(0 Disable; 1 Enable (default))");
85
86 static int num_rx_queues;
87 module_param(num_rx_queues, int, 0);
88 MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89                                 " (default is half number of CPUs)");
90
91 static int num_tx_queues;
92 module_param(num_tx_queues, int, 0);
93 MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94                                 " (default is half number of CPUs)");
95
96 static int disable_tpa;
97 module_param(disable_tpa, int, 0);
98 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
99
100 static int int_mode;
101 module_param(int_mode, int, 0);
102 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
103
104 static int dropless_fc;
105 module_param(dropless_fc, int, 0);
106 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
107
108 static int poll;
109 module_param(poll, int, 0);
110 MODULE_PARM_DESC(poll, " Use polling (for debug)");
111
112 static int mrrs = -1;
113 module_param(mrrs, int, 0);
114 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
115
116 static int debug;
117 module_param(debug, int, 0);
118 MODULE_PARM_DESC(debug, " Default debug msglevel");
119
120 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
121
122 static struct workqueue_struct *bnx2x_wq;
123
124 enum bnx2x_board_type {
125         BCM57710 = 0,
126         BCM57711 = 1,
127         BCM57711E = 2,
128 };
129
130 /* indexed by board_type, above */
131 static struct {
132         char *name;
133 } board_info[] __devinitdata = {
134         { "Broadcom NetXtreme II BCM57710 XGb" },
135         { "Broadcom NetXtreme II BCM57711 XGb" },
136         { "Broadcom NetXtreme II BCM57711E XGb" }
137 };
138
139
140 static const struct pci_device_id bnx2x_pci_tbl[] = {
141         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
142         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
143         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
144         { 0 }
145 };
146
147 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
148
149 /****************************************************************************
150 * General service functions
151 ****************************************************************************/
152
153 /* used only at init
154  * locking is done by mcp
155  */
156 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
157 {
158         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
159         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
160         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
161                                PCICFG_VENDOR_ID_OFFSET);
162 }
163
164 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
165 {
166         u32 val;
167
168         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
169         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
170         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
171                                PCICFG_VENDOR_ID_OFFSET);
172
173         return val;
174 }
175
176 static const u32 dmae_reg_go_c[] = {
177         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
178         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
179         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
180         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
181 };
182
183 /* copy command into DMAE command memory and set DMAE command go */
184 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
185                             int idx)
186 {
187         u32 cmd_offset;
188         int i;
189
190         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
191         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
192                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
193
194                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
195                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
196         }
197         REG_WR(bp, dmae_reg_go_c[idx], 1);
198 }
199
200 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
201                       u32 len32)
202 {
203         struct dmae_command dmae;
204         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
205         int cnt = 200;
206
207         if (!bp->dmae_ready) {
208                 u32 *data = bnx2x_sp(bp, wb_data[0]);
209
210                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
211                    "  using indirect\n", dst_addr, len32);
212                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
213                 return;
214         }
215
216         memset(&dmae, 0, sizeof(struct dmae_command));
217
218         dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
219                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
220                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
221 #ifdef __BIG_ENDIAN
222                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
223 #else
224                        DMAE_CMD_ENDIANITY_DW_SWAP |
225 #endif
226                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
227                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
228         dmae.src_addr_lo = U64_LO(dma_addr);
229         dmae.src_addr_hi = U64_HI(dma_addr);
230         dmae.dst_addr_lo = dst_addr >> 2;
231         dmae.dst_addr_hi = 0;
232         dmae.len = len32;
233         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
234         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
235         dmae.comp_val = DMAE_COMP_VAL;
236
237         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
238            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
239                     "dst_addr [%x:%08x (%08x)]\n"
240            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
241            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
242            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
243            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
244         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
245            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
246            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
247
248         mutex_lock(&bp->dmae_mutex);
249
250         *wb_comp = 0;
251
252         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
253
254         udelay(5);
255
256         while (*wb_comp != DMAE_COMP_VAL) {
257                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
258
259                 if (!cnt) {
260                         BNX2X_ERR("DMAE timeout!\n");
261                         break;
262                 }
263                 cnt--;
264                 /* adjust delay for emulation/FPGA */
265                 if (CHIP_REV_IS_SLOW(bp))
266                         msleep(100);
267                 else
268                         udelay(5);
269         }
270
271         mutex_unlock(&bp->dmae_mutex);
272 }
273
274 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
275 {
276         struct dmae_command dmae;
277         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
278         int cnt = 200;
279
280         if (!bp->dmae_ready) {
281                 u32 *data = bnx2x_sp(bp, wb_data[0]);
282                 int i;
283
284                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
285                    "  using indirect\n", src_addr, len32);
286                 for (i = 0; i < len32; i++)
287                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
288                 return;
289         }
290
291         memset(&dmae, 0, sizeof(struct dmae_command));
292
293         dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
294                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
295                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
296 #ifdef __BIG_ENDIAN
297                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
298 #else
299                        DMAE_CMD_ENDIANITY_DW_SWAP |
300 #endif
301                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
302                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
303         dmae.src_addr_lo = src_addr >> 2;
304         dmae.src_addr_hi = 0;
305         dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
306         dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
307         dmae.len = len32;
308         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
309         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
310         dmae.comp_val = DMAE_COMP_VAL;
311
312         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
313            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
314                     "dst_addr [%x:%08x (%08x)]\n"
315            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
316            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
317            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
318            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
319
320         mutex_lock(&bp->dmae_mutex);
321
322         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
323         *wb_comp = 0;
324
325         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
326
327         udelay(5);
328
329         while (*wb_comp != DMAE_COMP_VAL) {
330
331                 if (!cnt) {
332                         BNX2X_ERR("DMAE timeout!\n");
333                         break;
334                 }
335                 cnt--;
336                 /* adjust delay for emulation/FPGA */
337                 if (CHIP_REV_IS_SLOW(bp))
338                         msleep(100);
339                 else
340                         udelay(5);
341         }
342         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
343            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
344            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
345
346         mutex_unlock(&bp->dmae_mutex);
347 }
348
349 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
350                                u32 addr, u32 len)
351 {
352         int offset = 0;
353
354         while (len > DMAE_LEN32_WR_MAX) {
355                 bnx2x_write_dmae(bp, phys_addr + offset,
356                                  addr + offset, DMAE_LEN32_WR_MAX);
357                 offset += DMAE_LEN32_WR_MAX * 4;
358                 len -= DMAE_LEN32_WR_MAX;
359         }
360
361         bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
362 }
363
364 /* used only for slowpath so not inlined */
365 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
366 {
367         u32 wb_write[2];
368
369         wb_write[0] = val_hi;
370         wb_write[1] = val_lo;
371         REG_WR_DMAE(bp, reg, wb_write, 2);
372 }
373
374 #ifdef USE_WB_RD
375 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
376 {
377         u32 wb_data[2];
378
379         REG_RD_DMAE(bp, reg, wb_data, 2);
380
381         return HILO_U64(wb_data[0], wb_data[1]);
382 }
383 #endif
384
385 static int bnx2x_mc_assert(struct bnx2x *bp)
386 {
387         char last_idx;
388         int i, rc = 0;
389         u32 row0, row1, row2, row3;
390
391         /* XSTORM */
392         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
393                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
394         if (last_idx)
395                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
396
397         /* print the asserts */
398         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
399
400                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
401                               XSTORM_ASSERT_LIST_OFFSET(i));
402                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
403                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
404                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
405                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
406                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
407                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
408
409                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
410                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
411                                   " 0x%08x 0x%08x 0x%08x\n",
412                                   i, row3, row2, row1, row0);
413                         rc++;
414                 } else {
415                         break;
416                 }
417         }
418
419         /* TSTORM */
420         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
421                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
422         if (last_idx)
423                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
424
425         /* print the asserts */
426         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
427
428                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
429                               TSTORM_ASSERT_LIST_OFFSET(i));
430                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
431                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
432                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
433                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
434                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
435                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
436
437                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
438                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
439                                   " 0x%08x 0x%08x 0x%08x\n",
440                                   i, row3, row2, row1, row0);
441                         rc++;
442                 } else {
443                         break;
444                 }
445         }
446
447         /* CSTORM */
448         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
449                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
450         if (last_idx)
451                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
452
453         /* print the asserts */
454         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
455
456                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
457                               CSTORM_ASSERT_LIST_OFFSET(i));
458                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
459                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
460                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
461                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
462                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
463                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
464
465                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
466                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
467                                   " 0x%08x 0x%08x 0x%08x\n",
468                                   i, row3, row2, row1, row0);
469                         rc++;
470                 } else {
471                         break;
472                 }
473         }
474
475         /* USTORM */
476         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
477                            USTORM_ASSERT_LIST_INDEX_OFFSET);
478         if (last_idx)
479                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
480
481         /* print the asserts */
482         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
483
484                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
485                               USTORM_ASSERT_LIST_OFFSET(i));
486                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
487                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
488                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
489                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
490                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
491                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
492
493                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
494                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
495                                   " 0x%08x 0x%08x 0x%08x\n",
496                                   i, row3, row2, row1, row0);
497                         rc++;
498                 } else {
499                         break;
500                 }
501         }
502
503         return rc;
504 }
505
506 static void bnx2x_fw_dump(struct bnx2x *bp)
507 {
508         u32 mark, offset;
509         __be32 data[9];
510         int word;
511
512         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
513         mark = ((mark + 0x3) & ~0x3);
514         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
515
516         printk(KERN_ERR PFX);
517         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
518                 for (word = 0; word < 8; word++)
519                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
520                                                   offset + 4*word));
521                 data[8] = 0x0;
522                 printk(KERN_CONT "%s", (char *)data);
523         }
524         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
525                 for (word = 0; word < 8; word++)
526                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
527                                                   offset + 4*word));
528                 data[8] = 0x0;
529                 printk(KERN_CONT "%s", (char *)data);
530         }
531         printk(KERN_ERR PFX "end of fw dump\n");
532 }
533
534 static void bnx2x_panic_dump(struct bnx2x *bp)
535 {
536         int i;
537         u16 j, start, end;
538
539         bp->stats_state = STATS_STATE_DISABLED;
540         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
541
542         BNX2X_ERR("begin crash dump -----------------\n");
543
544         /* Indices */
545         /* Common */
546         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
547                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
548                   "  spq_prod_idx(%u)\n",
549                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
550                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
551
552         /* Rx */
553         for_each_rx_queue(bp, i) {
554                 struct bnx2x_fastpath *fp = &bp->fp[i];
555
556                 BNX2X_ERR("fp%d: rx_bd_prod(%x)  rx_bd_cons(%x)"
557                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
558                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
559                           i, fp->rx_bd_prod, fp->rx_bd_cons,
560                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
561                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
562                 BNX2X_ERR("      rx_sge_prod(%x)  last_max_sge(%x)"
563                           "  fp_u_idx(%x) *sb_u_idx(%x)\n",
564                           fp->rx_sge_prod, fp->last_max_sge,
565                           le16_to_cpu(fp->fp_u_idx),
566                           fp->status_blk->u_status_block.status_block_index);
567         }
568
569         /* Tx */
570         for_each_tx_queue(bp, i) {
571                 struct bnx2x_fastpath *fp = &bp->fp[i];
572
573                 BNX2X_ERR("fp%d: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
574                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
575                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
576                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
577                 BNX2X_ERR("      fp_c_idx(%x)  *sb_c_idx(%x)"
578                           "  tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
579                           fp->status_blk->c_status_block.status_block_index,
580                           fp->tx_db.data.prod);
581         }
582
583         /* Rings */
584         /* Rx */
585         for_each_rx_queue(bp, i) {
586                 struct bnx2x_fastpath *fp = &bp->fp[i];
587
588                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
589                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
590                 for (j = start; j != end; j = RX_BD(j + 1)) {
591                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
592                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
593
594                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
595                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
596                 }
597
598                 start = RX_SGE(fp->rx_sge_prod);
599                 end = RX_SGE(fp->last_max_sge);
600                 for (j = start; j != end; j = RX_SGE(j + 1)) {
601                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
602                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
603
604                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
605                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
606                 }
607
608                 start = RCQ_BD(fp->rx_comp_cons - 10);
609                 end = RCQ_BD(fp->rx_comp_cons + 503);
610                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
611                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
612
613                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
614                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
615                 }
616         }
617
618         /* Tx */
619         for_each_tx_queue(bp, i) {
620                 struct bnx2x_fastpath *fp = &bp->fp[i];
621
622                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
623                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
624                 for (j = start; j != end; j = TX_BD(j + 1)) {
625                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
626
627                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
628                                   i, j, sw_bd->skb, sw_bd->first_bd);
629                 }
630
631                 start = TX_BD(fp->tx_bd_cons - 10);
632                 end = TX_BD(fp->tx_bd_cons + 254);
633                 for (j = start; j != end; j = TX_BD(j + 1)) {
634                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
635
636                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
637                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
638                 }
639         }
640
641         bnx2x_fw_dump(bp);
642         bnx2x_mc_assert(bp);
643         BNX2X_ERR("end crash dump -----------------\n");
644 }
645
646 static void bnx2x_int_enable(struct bnx2x *bp)
647 {
648         int port = BP_PORT(bp);
649         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
650         u32 val = REG_RD(bp, addr);
651         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
652         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
653
654         if (msix) {
655                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
656                          HC_CONFIG_0_REG_INT_LINE_EN_0);
657                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
658                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
659         } else if (msi) {
660                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
661                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
662                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
663                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
664         } else {
665                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
666                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
667                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
668                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
669
670                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
671                    val, port, addr);
672
673                 REG_WR(bp, addr, val);
674
675                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
676         }
677
678         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
679            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
680
681         REG_WR(bp, addr, val);
682         /*
683          * Ensure that HC_CONFIG is written before leading/trailing edge config
684          */
685         mmiowb();
686         barrier();
687
688         if (CHIP_IS_E1H(bp)) {
689                 /* init leading/trailing edge */
690                 if (IS_E1HMF(bp)) {
691                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
692                         if (bp->port.pmf)
693                                 /* enable nig and gpio3 attention */
694                                 val |= 0x1100;
695                 } else
696                         val = 0xffff;
697
698                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
699                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
700         }
701
702         /* Make sure that interrupts are indeed enabled from here on */
703         mmiowb();
704 }
705
706 static void bnx2x_int_disable(struct bnx2x *bp)
707 {
708         int port = BP_PORT(bp);
709         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
710         u32 val = REG_RD(bp, addr);
711
712         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
713                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
714                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
715                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
716
717         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
718            val, port, addr);
719
720         /* flush all outstanding writes */
721         mmiowb();
722
723         REG_WR(bp, addr, val);
724         if (REG_RD(bp, addr) != val)
725                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
726 }
727
728 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
729 {
730         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
731         int i, offset;
732
733         /* disable interrupt handling */
734         atomic_inc(&bp->intr_sem);
735         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
736
737         if (disable_hw)
738                 /* prevent the HW from sending interrupts */
739                 bnx2x_int_disable(bp);
740
741         /* make sure all ISRs are done */
742         if (msix) {
743                 synchronize_irq(bp->msix_table[0].vector);
744                 offset = 1;
745 #ifdef BCM_CNIC
746                 offset++;
747 #endif
748                 for_each_queue(bp, i)
749                         synchronize_irq(bp->msix_table[i + offset].vector);
750         } else
751                 synchronize_irq(bp->pdev->irq);
752
753         /* make sure sp_task is not running */
754         cancel_delayed_work(&bp->sp_task);
755         flush_workqueue(bnx2x_wq);
756 }
757
758 /* fast path */
759
760 /*
761  * General service functions
762  */
763
764 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
765                                 u8 storm, u16 index, u8 op, u8 update)
766 {
767         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
768                        COMMAND_REG_INT_ACK);
769         struct igu_ack_register igu_ack;
770
771         igu_ack.status_block_index = index;
772         igu_ack.sb_id_and_flags =
773                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
774                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
775                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
776                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
777
778         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
779            (*(u32 *)&igu_ack), hc_addr);
780         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
781
782         /* Make sure that ACK is written */
783         mmiowb();
784         barrier();
785 }
786
787 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
788 {
789         struct host_status_block *fpsb = fp->status_blk;
790         u16 rc = 0;
791
792         barrier(); /* status block is written to by the chip */
793         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
794                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
795                 rc |= 1;
796         }
797         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
798                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
799                 rc |= 2;
800         }
801         return rc;
802 }
803
804 static u16 bnx2x_ack_int(struct bnx2x *bp)
805 {
806         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
807                        COMMAND_REG_SIMD_MASK);
808         u32 result = REG_RD(bp, hc_addr);
809
810         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
811            result, hc_addr);
812
813         return result;
814 }
815
816
817 /*
818  * fast path service functions
819  */
820
821 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
822 {
823         /* Tell compiler that consumer and producer can change */
824         barrier();
825         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
826 }
827
828 /* free skb in the packet ring at pos idx
829  * return idx of last bd freed
830  */
831 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
832                              u16 idx)
833 {
834         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
835         struct eth_tx_start_bd *tx_start_bd;
836         struct eth_tx_bd *tx_data_bd;
837         struct sk_buff *skb = tx_buf->skb;
838         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
839         int nbd;
840
841         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
842            idx, tx_buf, skb);
843
844         /* unmap first bd */
845         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
846         tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
847         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
848                          BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
849
850         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
851 #ifdef BNX2X_STOP_ON_ERROR
852         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
853                 BNX2X_ERR("BAD nbd!\n");
854                 bnx2x_panic();
855         }
856 #endif
857         new_cons = nbd + tx_buf->first_bd;
858
859         /* Get the next bd */
860         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
861
862         /* Skip a parse bd... */
863         --nbd;
864         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
865
866         /* ...and the TSO split header bd since they have no mapping */
867         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
868                 --nbd;
869                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
870         }
871
872         /* now free frags */
873         while (nbd > 0) {
874
875                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
876                 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
877                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
878                                BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
879                 if (--nbd)
880                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
881         }
882
883         /* release skb */
884         WARN_ON(!skb);
885         dev_kfree_skb_any(skb);
886         tx_buf->first_bd = 0;
887         tx_buf->skb = NULL;
888
889         return new_cons;
890 }
891
892 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
893 {
894         s16 used;
895         u16 prod;
896         u16 cons;
897
898         barrier(); /* Tell compiler that prod and cons can change */
899         prod = fp->tx_bd_prod;
900         cons = fp->tx_bd_cons;
901
902         /* NUM_TX_RINGS = number of "next-page" entries
903            It will be used as a threshold */
904         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
905
906 #ifdef BNX2X_STOP_ON_ERROR
907         WARN_ON(used < 0);
908         WARN_ON(used > fp->bp->tx_ring_size);
909         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
910 #endif
911
912         return (s16)(fp->bp->tx_ring_size) - used;
913 }
914
915 static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
916 {
917         struct bnx2x *bp = fp->bp;
918         struct netdev_queue *txq;
919         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
920         int done = 0;
921
922 #ifdef BNX2X_STOP_ON_ERROR
923         if (unlikely(bp->panic))
924                 return;
925 #endif
926
927         txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
928         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
929         sw_cons = fp->tx_pkt_cons;
930
931         while (sw_cons != hw_cons) {
932                 u16 pkt_cons;
933
934                 pkt_cons = TX_BD(sw_cons);
935
936                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
937
938                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
939                    hw_cons, sw_cons, pkt_cons);
940
941 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
942                         rmb();
943                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
944                 }
945 */
946                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
947                 sw_cons++;
948                 done++;
949         }
950
951         fp->tx_pkt_cons = sw_cons;
952         fp->tx_bd_cons = bd_cons;
953
954         /* TBD need a thresh? */
955         if (unlikely(netif_tx_queue_stopped(txq))) {
956
957                 /* Need to make the tx_bd_cons update visible to start_xmit()
958                  * before checking for netif_tx_queue_stopped().  Without the
959                  * memory barrier, there is a small possibility that
960                  * start_xmit() will miss it and cause the queue to be stopped
961                  * forever.
962                  */
963                 smp_mb();
964
965                 if ((netif_tx_queue_stopped(txq)) &&
966                     (bp->state == BNX2X_STATE_OPEN) &&
967                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
968                         netif_tx_wake_queue(txq);
969         }
970 }
971
972 #ifdef BCM_CNIC
973 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
974 #endif
975
976 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
977                            union eth_rx_cqe *rr_cqe)
978 {
979         struct bnx2x *bp = fp->bp;
980         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
981         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
982
983         DP(BNX2X_MSG_SP,
984            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
985            fp->index, cid, command, bp->state,
986            rr_cqe->ramrod_cqe.ramrod_type);
987
988         bp->spq_left++;
989
990         if (fp->index) {
991                 switch (command | fp->state) {
992                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
993                                                 BNX2X_FP_STATE_OPENING):
994                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
995                            cid);
996                         fp->state = BNX2X_FP_STATE_OPEN;
997                         break;
998
999                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1000                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1001                            cid);
1002                         fp->state = BNX2X_FP_STATE_HALTED;
1003                         break;
1004
1005                 default:
1006                         BNX2X_ERR("unexpected MC reply (%d)  "
1007                                   "fp->state is %x\n", command, fp->state);
1008                         break;
1009                 }
1010                 mb(); /* force bnx2x_wait_ramrod() to see the change */
1011                 return;
1012         }
1013
1014         switch (command | bp->state) {
1015         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1016                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1017                 bp->state = BNX2X_STATE_OPEN;
1018                 break;
1019
1020         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1021                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1022                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1023                 fp->state = BNX2X_FP_STATE_HALTED;
1024                 break;
1025
1026         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1027                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1028                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1029                 break;
1030
1031 #ifdef BCM_CNIC
1032         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1033                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1034                 bnx2x_cnic_cfc_comp(bp, cid);
1035                 break;
1036 #endif
1037
1038         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1039         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1040                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1041                 bp->set_mac_pending--;
1042                 smp_wmb();
1043                 break;
1044
1045         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1046                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1047                 bp->set_mac_pending--;
1048                 smp_wmb();
1049                 break;
1050
1051         default:
1052                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
1053                           command, bp->state);
1054                 break;
1055         }
1056         mb(); /* force bnx2x_wait_ramrod() to see the change */
1057 }
1058
1059 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1060                                      struct bnx2x_fastpath *fp, u16 index)
1061 {
1062         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1063         struct page *page = sw_buf->page;
1064         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1065
1066         /* Skip "next page" elements */
1067         if (!page)
1068                 return;
1069
1070         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1071                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1072         __free_pages(page, PAGES_PER_SGE_SHIFT);
1073
1074         sw_buf->page = NULL;
1075         sge->addr_hi = 0;
1076         sge->addr_lo = 0;
1077 }
1078
1079 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1080                                            struct bnx2x_fastpath *fp, int last)
1081 {
1082         int i;
1083
1084         for (i = 0; i < last; i++)
1085                 bnx2x_free_rx_sge(bp, fp, i);
1086 }
1087
1088 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1089                                      struct bnx2x_fastpath *fp, u16 index)
1090 {
1091         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1092         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1093         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1094         dma_addr_t mapping;
1095
1096         if (unlikely(page == NULL))
1097                 return -ENOMEM;
1098
1099         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1100                                PCI_DMA_FROMDEVICE);
1101         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1102                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1103                 return -ENOMEM;
1104         }
1105
1106         sw_buf->page = page;
1107         pci_unmap_addr_set(sw_buf, mapping, mapping);
1108
1109         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1110         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1111
1112         return 0;
1113 }
1114
1115 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1116                                      struct bnx2x_fastpath *fp, u16 index)
1117 {
1118         struct sk_buff *skb;
1119         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1120         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1121         dma_addr_t mapping;
1122
1123         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1124         if (unlikely(skb == NULL))
1125                 return -ENOMEM;
1126
1127         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1128                                  PCI_DMA_FROMDEVICE);
1129         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1130                 dev_kfree_skb(skb);
1131                 return -ENOMEM;
1132         }
1133
1134         rx_buf->skb = skb;
1135         pci_unmap_addr_set(rx_buf, mapping, mapping);
1136
1137         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1138         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1139
1140         return 0;
1141 }
1142
1143 /* note that we are not allocating a new skb,
1144  * we are just moving one from cons to prod
1145  * we are not creating a new mapping,
1146  * so there is no need to check for dma_mapping_error().
1147  */
1148 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1149                                struct sk_buff *skb, u16 cons, u16 prod)
1150 {
1151         struct bnx2x *bp = fp->bp;
1152         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1153         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1154         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1155         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1156
1157         pci_dma_sync_single_for_device(bp->pdev,
1158                                        pci_unmap_addr(cons_rx_buf, mapping),
1159                                        RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1160
1161         prod_rx_buf->skb = cons_rx_buf->skb;
1162         pci_unmap_addr_set(prod_rx_buf, mapping,
1163                            pci_unmap_addr(cons_rx_buf, mapping));
1164         *prod_bd = *cons_bd;
1165 }
1166
1167 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1168                                              u16 idx)
1169 {
1170         u16 last_max = fp->last_max_sge;
1171
1172         if (SUB_S16(idx, last_max) > 0)
1173                 fp->last_max_sge = idx;
1174 }
1175
1176 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1177 {
1178         int i, j;
1179
1180         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1181                 int idx = RX_SGE_CNT * i - 1;
1182
1183                 for (j = 0; j < 2; j++) {
1184                         SGE_MASK_CLEAR_BIT(fp, idx);
1185                         idx--;
1186                 }
1187         }
1188 }
1189
1190 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1191                                   struct eth_fast_path_rx_cqe *fp_cqe)
1192 {
1193         struct bnx2x *bp = fp->bp;
1194         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1195                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1196                       SGE_PAGE_SHIFT;
1197         u16 last_max, last_elem, first_elem;
1198         u16 delta = 0;
1199         u16 i;
1200
1201         if (!sge_len)
1202                 return;
1203
1204         /* First mark all used pages */
1205         for (i = 0; i < sge_len; i++)
1206                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1207
1208         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1209            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1210
1211         /* Here we assume that the last SGE index is the biggest */
1212         prefetch((void *)(fp->sge_mask));
1213         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1214
1215         last_max = RX_SGE(fp->last_max_sge);
1216         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1217         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1218
1219         /* If ring is not full */
1220         if (last_elem + 1 != first_elem)
1221                 last_elem++;
1222
1223         /* Now update the prod */
1224         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1225                 if (likely(fp->sge_mask[i]))
1226                         break;
1227
1228                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1229                 delta += RX_SGE_MASK_ELEM_SZ;
1230         }
1231
1232         if (delta > 0) {
1233                 fp->rx_sge_prod += delta;
1234                 /* clear page-end entries */
1235                 bnx2x_clear_sge_mask_next_elems(fp);
1236         }
1237
1238         DP(NETIF_MSG_RX_STATUS,
1239            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1240            fp->last_max_sge, fp->rx_sge_prod);
1241 }
1242
1243 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1244 {
1245         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1246         memset(fp->sge_mask, 0xff,
1247                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1248
1249         /* Clear the two last indices in the page to 1:
1250            these are the indices that correspond to the "next" element,
1251            hence will never be indicated and should be removed from
1252            the calculations. */
1253         bnx2x_clear_sge_mask_next_elems(fp);
1254 }
1255
1256 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1257                             struct sk_buff *skb, u16 cons, u16 prod)
1258 {
1259         struct bnx2x *bp = fp->bp;
1260         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1261         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1262         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1263         dma_addr_t mapping;
1264
1265         /* move empty skb from pool to prod and map it */
1266         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1267         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1268                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1269         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1270
1271         /* move partial skb from cons to pool (don't unmap yet) */
1272         fp->tpa_pool[queue] = *cons_rx_buf;
1273
1274         /* mark bin state as start - print error if current state != stop */
1275         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1276                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1277
1278         fp->tpa_state[queue] = BNX2X_TPA_START;
1279
1280         /* point prod_bd to new skb */
1281         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1282         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1283
1284 #ifdef BNX2X_STOP_ON_ERROR
1285         fp->tpa_queue_used |= (1 << queue);
1286 #ifdef __powerpc64__
1287         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1288 #else
1289         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1290 #endif
1291            fp->tpa_queue_used);
1292 #endif
1293 }
1294
1295 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1296                                struct sk_buff *skb,
1297                                struct eth_fast_path_rx_cqe *fp_cqe,
1298                                u16 cqe_idx)
1299 {
1300         struct sw_rx_page *rx_pg, old_rx_pg;
1301         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1302         u32 i, frag_len, frag_size, pages;
1303         int err;
1304         int j;
1305
1306         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1307         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1308
1309         /* This is needed in order to enable forwarding support */
1310         if (frag_size)
1311                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1312                                                max(frag_size, (u32)len_on_bd));
1313
1314 #ifdef BNX2X_STOP_ON_ERROR
1315         if (pages >
1316             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1317                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1318                           pages, cqe_idx);
1319                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1320                           fp_cqe->pkt_len, len_on_bd);
1321                 bnx2x_panic();
1322                 return -EINVAL;
1323         }
1324 #endif
1325
1326         /* Run through the SGL and compose the fragmented skb */
1327         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1328                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1329
1330                 /* FW gives the indices of the SGE as if the ring is an array
1331                    (meaning that "next" element will consume 2 indices) */
1332                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1333                 rx_pg = &fp->rx_page_ring[sge_idx];
1334                 old_rx_pg = *rx_pg;
1335
1336                 /* If we fail to allocate a substitute page, we simply stop
1337                    where we are and drop the whole packet */
1338                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1339                 if (unlikely(err)) {
1340                         fp->eth_q_stats.rx_skb_alloc_failed++;
1341                         return err;
1342                 }
1343
1344                 /* Unmap the page as we r going to pass it to the stack */
1345                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1346                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1347
1348                 /* Add one frag and update the appropriate fields in the skb */
1349                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1350
1351                 skb->data_len += frag_len;
1352                 skb->truesize += frag_len;
1353                 skb->len += frag_len;
1354
1355                 frag_size -= frag_len;
1356         }
1357
1358         return 0;
1359 }
1360
1361 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1362                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1363                            u16 cqe_idx)
1364 {
1365         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1366         struct sk_buff *skb = rx_buf->skb;
1367         /* alloc new skb */
1368         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1369
1370         /* Unmap skb in the pool anyway, as we are going to change
1371            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1372            fails. */
1373         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1374                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1375
1376         if (likely(new_skb)) {
1377                 /* fix ip xsum and give it to the stack */
1378                 /* (no need to map the new skb) */
1379 #ifdef BCM_VLAN
1380                 int is_vlan_cqe =
1381                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1382                          PARSING_FLAGS_VLAN);
1383                 int is_not_hwaccel_vlan_cqe =
1384                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1385 #endif
1386
1387                 prefetch(skb);
1388                 prefetch(((char *)(skb)) + 128);
1389
1390 #ifdef BNX2X_STOP_ON_ERROR
1391                 if (pad + len > bp->rx_buf_size) {
1392                         BNX2X_ERR("skb_put is about to fail...  "
1393                                   "pad %d  len %d  rx_buf_size %d\n",
1394                                   pad, len, bp->rx_buf_size);
1395                         bnx2x_panic();
1396                         return;
1397                 }
1398 #endif
1399
1400                 skb_reserve(skb, pad);
1401                 skb_put(skb, len);
1402
1403                 skb->protocol = eth_type_trans(skb, bp->dev);
1404                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1405
1406                 {
1407                         struct iphdr *iph;
1408
1409                         iph = (struct iphdr *)skb->data;
1410 #ifdef BCM_VLAN
1411                         /* If there is no Rx VLAN offloading -
1412                            take VLAN tag into an account */
1413                         if (unlikely(is_not_hwaccel_vlan_cqe))
1414                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1415 #endif
1416                         iph->check = 0;
1417                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1418                 }
1419
1420                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1421                                          &cqe->fast_path_cqe, cqe_idx)) {
1422 #ifdef BCM_VLAN
1423                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1424                             (!is_not_hwaccel_vlan_cqe))
1425                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1426                                                 le16_to_cpu(cqe->fast_path_cqe.
1427                                                             vlan_tag));
1428                         else
1429 #endif
1430                                 netif_receive_skb(skb);
1431                 } else {
1432                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1433                            " - dropping packet!\n");
1434                         dev_kfree_skb(skb);
1435                 }
1436
1437
1438                 /* put new skb in bin */
1439                 fp->tpa_pool[queue].skb = new_skb;
1440
1441         } else {
1442                 /* else drop the packet and keep the buffer in the bin */
1443                 DP(NETIF_MSG_RX_STATUS,
1444                    "Failed to allocate new skb - dropping packet!\n");
1445                 fp->eth_q_stats.rx_skb_alloc_failed++;
1446         }
1447
1448         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1449 }
1450
1451 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1452                                         struct bnx2x_fastpath *fp,
1453                                         u16 bd_prod, u16 rx_comp_prod,
1454                                         u16 rx_sge_prod)
1455 {
1456         struct ustorm_eth_rx_producers rx_prods = {0};
1457         int i;
1458
1459         /* Update producers */
1460         rx_prods.bd_prod = bd_prod;
1461         rx_prods.cqe_prod = rx_comp_prod;
1462         rx_prods.sge_prod = rx_sge_prod;
1463
1464         /*
1465          * Make sure that the BD and SGE data is updated before updating the
1466          * producers since FW might read the BD/SGE right after the producer
1467          * is updated.
1468          * This is only applicable for weak-ordered memory model archs such
1469          * as IA-64. The following barrier is also mandatory since FW will
1470          * assumes BDs must have buffers.
1471          */
1472         wmb();
1473
1474         for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1475                 REG_WR(bp, BAR_USTRORM_INTMEM +
1476                        USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1477                        ((u32 *)&rx_prods)[i]);
1478
1479         mmiowb(); /* keep prod updates ordered */
1480
1481         DP(NETIF_MSG_RX_STATUS,
1482            "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
1483            fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1484 }
1485
1486 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1487 {
1488         struct bnx2x *bp = fp->bp;
1489         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1490         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1491         int rx_pkt = 0;
1492
1493 #ifdef BNX2X_STOP_ON_ERROR
1494         if (unlikely(bp->panic))
1495                 return 0;
1496 #endif
1497
1498         /* CQ "next element" is of the size of the regular element,
1499            that's why it's ok here */
1500         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1501         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1502                 hw_comp_cons++;
1503
1504         bd_cons = fp->rx_bd_cons;
1505         bd_prod = fp->rx_bd_prod;
1506         bd_prod_fw = bd_prod;
1507         sw_comp_cons = fp->rx_comp_cons;
1508         sw_comp_prod = fp->rx_comp_prod;
1509
1510         /* Memory barrier necessary as speculative reads of the rx
1511          * buffer can be ahead of the index in the status block
1512          */
1513         rmb();
1514
1515         DP(NETIF_MSG_RX_STATUS,
1516            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1517            fp->index, hw_comp_cons, sw_comp_cons);
1518
1519         while (sw_comp_cons != hw_comp_cons) {
1520                 struct sw_rx_bd *rx_buf = NULL;
1521                 struct sk_buff *skb;
1522                 union eth_rx_cqe *cqe;
1523                 u8 cqe_fp_flags;
1524                 u16 len, pad;
1525
1526                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1527                 bd_prod = RX_BD(bd_prod);
1528                 bd_cons = RX_BD(bd_cons);
1529
1530                 /* Prefetch the page containing the BD descriptor
1531                    at producer's index. It will be needed when new skb is
1532                    allocated */
1533                 prefetch((void *)(PAGE_ALIGN((unsigned long)
1534                                              (&fp->rx_desc_ring[bd_prod])) -
1535                                   PAGE_SIZE + 1));
1536
1537                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1538                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1539
1540                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1541                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1542                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1543                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1544                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1545                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1546
1547                 /* is this a slowpath msg? */
1548                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1549                         bnx2x_sp_event(fp, cqe);
1550                         goto next_cqe;
1551
1552                 /* this is an rx packet */
1553                 } else {
1554                         rx_buf = &fp->rx_buf_ring[bd_cons];
1555                         skb = rx_buf->skb;
1556                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1557                         pad = cqe->fast_path_cqe.placement_offset;
1558
1559                         /* If CQE is marked both TPA_START and TPA_END
1560                            it is a non-TPA CQE */
1561                         if ((!fp->disable_tpa) &&
1562                             (TPA_TYPE(cqe_fp_flags) !=
1563                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1564                                 u16 queue = cqe->fast_path_cqe.queue_index;
1565
1566                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1567                                         DP(NETIF_MSG_RX_STATUS,
1568                                            "calling tpa_start on queue %d\n",
1569                                            queue);
1570
1571                                         bnx2x_tpa_start(fp, queue, skb,
1572                                                         bd_cons, bd_prod);
1573                                         goto next_rx;
1574                                 }
1575
1576                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1577                                         DP(NETIF_MSG_RX_STATUS,
1578                                            "calling tpa_stop on queue %d\n",
1579                                            queue);
1580
1581                                         if (!BNX2X_RX_SUM_FIX(cqe))
1582                                                 BNX2X_ERR("STOP on none TCP "
1583                                                           "data\n");
1584
1585                                         /* This is a size of the linear data
1586                                            on this skb */
1587                                         len = le16_to_cpu(cqe->fast_path_cqe.
1588                                                                 len_on_bd);
1589                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1590                                                     len, cqe, comp_ring_cons);
1591 #ifdef BNX2X_STOP_ON_ERROR
1592                                         if (bp->panic)
1593                                                 return 0;
1594 #endif
1595
1596                                         bnx2x_update_sge_prod(fp,
1597                                                         &cqe->fast_path_cqe);
1598                                         goto next_cqe;
1599                                 }
1600                         }
1601
1602                         pci_dma_sync_single_for_device(bp->pdev,
1603                                         pci_unmap_addr(rx_buf, mapping),
1604                                                        pad + RX_COPY_THRESH,
1605                                                        PCI_DMA_FROMDEVICE);
1606                         prefetch(skb);
1607                         prefetch(((char *)(skb)) + 128);
1608
1609                         /* is this an error packet? */
1610                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1611                                 DP(NETIF_MSG_RX_ERR,
1612                                    "ERROR  flags %x  rx packet %u\n",
1613                                    cqe_fp_flags, sw_comp_cons);
1614                                 fp->eth_q_stats.rx_err_discard_pkt++;
1615                                 goto reuse_rx;
1616                         }
1617
1618                         /* Since we don't have a jumbo ring
1619                          * copy small packets if mtu > 1500
1620                          */
1621                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1622                             (len <= RX_COPY_THRESH)) {
1623                                 struct sk_buff *new_skb;
1624
1625                                 new_skb = netdev_alloc_skb(bp->dev,
1626                                                            len + pad);
1627                                 if (new_skb == NULL) {
1628                                         DP(NETIF_MSG_RX_ERR,
1629                                            "ERROR  packet dropped "
1630                                            "because of alloc failure\n");
1631                                         fp->eth_q_stats.rx_skb_alloc_failed++;
1632                                         goto reuse_rx;
1633                                 }
1634
1635                                 /* aligned copy */
1636                                 skb_copy_from_linear_data_offset(skb, pad,
1637                                                     new_skb->data + pad, len);
1638                                 skb_reserve(new_skb, pad);
1639                                 skb_put(new_skb, len);
1640
1641                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1642
1643                                 skb = new_skb;
1644
1645                         } else
1646                         if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1647                                 pci_unmap_single(bp->pdev,
1648                                         pci_unmap_addr(rx_buf, mapping),
1649                                                  bp->rx_buf_size,
1650                                                  PCI_DMA_FROMDEVICE);
1651                                 skb_reserve(skb, pad);
1652                                 skb_put(skb, len);
1653
1654                         } else {
1655                                 DP(NETIF_MSG_RX_ERR,
1656                                    "ERROR  packet dropped because "
1657                                    "of alloc failure\n");
1658                                 fp->eth_q_stats.rx_skb_alloc_failed++;
1659 reuse_rx:
1660                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1661                                 goto next_rx;
1662                         }
1663
1664                         skb->protocol = eth_type_trans(skb, bp->dev);
1665
1666                         skb->ip_summed = CHECKSUM_NONE;
1667                         if (bp->rx_csum) {
1668                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1669                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1670                                 else
1671                                         fp->eth_q_stats.hw_csum_err++;
1672                         }
1673                 }
1674
1675                 skb_record_rx_queue(skb, fp->index);
1676
1677 #ifdef BCM_VLAN
1678                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1679                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1680                      PARSING_FLAGS_VLAN))
1681                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1682                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1683                 else
1684 #endif
1685                         netif_receive_skb(skb);
1686
1687
1688 next_rx:
1689                 rx_buf->skb = NULL;
1690
1691                 bd_cons = NEXT_RX_IDX(bd_cons);
1692                 bd_prod = NEXT_RX_IDX(bd_prod);
1693                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1694                 rx_pkt++;
1695 next_cqe:
1696                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1697                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1698
1699                 if (rx_pkt == budget)
1700                         break;
1701         } /* while */
1702
1703         fp->rx_bd_cons = bd_cons;
1704         fp->rx_bd_prod = bd_prod_fw;
1705         fp->rx_comp_cons = sw_comp_cons;
1706         fp->rx_comp_prod = sw_comp_prod;
1707
1708         /* Update producers */
1709         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1710                              fp->rx_sge_prod);
1711
1712         fp->rx_pkt += rx_pkt;
1713         fp->rx_calls++;
1714
1715         return rx_pkt;
1716 }
1717
1718 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1719 {
1720         struct bnx2x_fastpath *fp = fp_cookie;
1721         struct bnx2x *bp = fp->bp;
1722
1723         /* Return here if interrupt is disabled */
1724         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1725                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1726                 return IRQ_HANDLED;
1727         }
1728
1729         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1730            fp->index, fp->sb_id);
1731         bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1732
1733 #ifdef BNX2X_STOP_ON_ERROR
1734         if (unlikely(bp->panic))
1735                 return IRQ_HANDLED;
1736 #endif
1737         /* Handle Rx or Tx according to MSI-X vector */
1738         if (fp->is_rx_queue) {
1739                 prefetch(fp->rx_cons_sb);
1740                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1741
1742                 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1743
1744         } else {
1745                 prefetch(fp->tx_cons_sb);
1746                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1747
1748                 bnx2x_update_fpsb_idx(fp);
1749                 rmb();
1750                 bnx2x_tx_int(fp);
1751
1752                 /* Re-enable interrupts */
1753                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1754                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1755                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1756                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1757         }
1758
1759         return IRQ_HANDLED;
1760 }
1761
1762 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1763 {
1764         struct bnx2x *bp = netdev_priv(dev_instance);
1765         u16 status = bnx2x_ack_int(bp);
1766         u16 mask;
1767         int i;
1768
1769         /* Return here if interrupt is shared and it's not for us */
1770         if (unlikely(status == 0)) {
1771                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1772                 return IRQ_NONE;
1773         }
1774         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1775
1776         /* Return here if interrupt is disabled */
1777         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1778                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1779                 return IRQ_HANDLED;
1780         }
1781
1782 #ifdef BNX2X_STOP_ON_ERROR
1783         if (unlikely(bp->panic))
1784                 return IRQ_HANDLED;
1785 #endif
1786
1787         for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1788                 struct bnx2x_fastpath *fp = &bp->fp[i];
1789
1790                 mask = 0x2 << fp->sb_id;
1791                 if (status & mask) {
1792                         /* Handle Rx or Tx according to SB id */
1793                         if (fp->is_rx_queue) {
1794                                 prefetch(fp->rx_cons_sb);
1795                                 prefetch(&fp->status_blk->u_status_block.
1796                                                         status_block_index);
1797
1798                                 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1799
1800                         } else {
1801                                 prefetch(fp->tx_cons_sb);
1802                                 prefetch(&fp->status_blk->c_status_block.
1803                                                         status_block_index);
1804
1805                                 bnx2x_update_fpsb_idx(fp);
1806                                 rmb();
1807                                 bnx2x_tx_int(fp);
1808
1809                                 /* Re-enable interrupts */
1810                                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1811                                              le16_to_cpu(fp->fp_u_idx),
1812                                              IGU_INT_NOP, 1);
1813                                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1814                                              le16_to_cpu(fp->fp_c_idx),
1815                                              IGU_INT_ENABLE, 1);
1816                         }
1817                         status &= ~mask;
1818                 }
1819         }
1820
1821 #ifdef BCM_CNIC
1822         mask = 0x2 << CNIC_SB_ID(bp);
1823         if (status & (mask | 0x1)) {
1824                 struct cnic_ops *c_ops = NULL;
1825
1826                 rcu_read_lock();
1827                 c_ops = rcu_dereference(bp->cnic_ops);
1828                 if (c_ops)
1829                         c_ops->cnic_handler(bp->cnic_data, NULL);
1830                 rcu_read_unlock();
1831
1832                 status &= ~mask;
1833         }
1834 #endif
1835
1836         if (unlikely(status & 0x1)) {
1837                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1838
1839                 status &= ~0x1;
1840                 if (!status)
1841                         return IRQ_HANDLED;
1842         }
1843
1844         if (status)
1845                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1846                    status);
1847
1848         return IRQ_HANDLED;
1849 }
1850
1851 /* end of fast path */
1852
1853 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1854
1855 /* Link */
1856
1857 /*
1858  * General service functions
1859  */
1860
1861 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1862 {
1863         u32 lock_status;
1864         u32 resource_bit = (1 << resource);
1865         int func = BP_FUNC(bp);
1866         u32 hw_lock_control_reg;
1867         int cnt;
1868
1869         /* Validating that the resource is within range */
1870         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1871                 DP(NETIF_MSG_HW,
1872                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1873                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1874                 return -EINVAL;
1875         }
1876
1877         if (func <= 5) {
1878                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1879         } else {
1880                 hw_lock_control_reg =
1881                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1882         }
1883
1884         /* Validating that the resource is not already taken */
1885         lock_status = REG_RD(bp, hw_lock_control_reg);
1886         if (lock_status & resource_bit) {
1887                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1888                    lock_status, resource_bit);
1889                 return -EEXIST;
1890         }
1891
1892         /* Try for 5 second every 5ms */
1893         for (cnt = 0; cnt < 1000; cnt++) {
1894                 /* Try to acquire the lock */
1895                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1896                 lock_status = REG_RD(bp, hw_lock_control_reg);
1897                 if (lock_status & resource_bit)
1898                         return 0;
1899
1900                 msleep(5);
1901         }
1902         DP(NETIF_MSG_HW, "Timeout\n");
1903         return -EAGAIN;
1904 }
1905
1906 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1907 {
1908         u32 lock_status;
1909         u32 resource_bit = (1 << resource);
1910         int func = BP_FUNC(bp);
1911         u32 hw_lock_control_reg;
1912
1913         /* Validating that the resource is within range */
1914         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1915                 DP(NETIF_MSG_HW,
1916                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1917                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1918                 return -EINVAL;
1919         }
1920
1921         if (func <= 5) {
1922                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1923         } else {
1924                 hw_lock_control_reg =
1925                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1926         }
1927
1928         /* Validating that the resource is currently taken */
1929         lock_status = REG_RD(bp, hw_lock_control_reg);
1930         if (!(lock_status & resource_bit)) {
1931                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1932                    lock_status, resource_bit);
1933                 return -EFAULT;
1934         }
1935
1936         REG_WR(bp, hw_lock_control_reg, resource_bit);
1937         return 0;
1938 }
1939
1940 /* HW Lock for shared dual port PHYs */
1941 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1942 {
1943         mutex_lock(&bp->port.phy_mutex);
1944
1945         if (bp->port.need_hw_lock)
1946                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1947 }
1948
1949 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1950 {
1951         if (bp->port.need_hw_lock)
1952                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1953
1954         mutex_unlock(&bp->port.phy_mutex);
1955 }
1956
1957 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1958 {
1959         /* The GPIO should be swapped if swap register is set and active */
1960         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1961                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1962         int gpio_shift = gpio_num +
1963                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1964         u32 gpio_mask = (1 << gpio_shift);
1965         u32 gpio_reg;
1966         int value;
1967
1968         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1969                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1970                 return -EINVAL;
1971         }
1972
1973         /* read GPIO value */
1974         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1975
1976         /* get the requested pin value */
1977         if ((gpio_reg & gpio_mask) == gpio_mask)
1978                 value = 1;
1979         else
1980                 value = 0;
1981
1982         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1983
1984         return value;
1985 }
1986
1987 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1988 {
1989         /* The GPIO should be swapped if swap register is set and active */
1990         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1991                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1992         int gpio_shift = gpio_num +
1993                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1994         u32 gpio_mask = (1 << gpio_shift);
1995         u32 gpio_reg;
1996
1997         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1998                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1999                 return -EINVAL;
2000         }
2001
2002         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2003         /* read GPIO and mask except the float bits */
2004         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2005
2006         switch (mode) {
2007         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2008                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
2009                    gpio_num, gpio_shift);
2010                 /* clear FLOAT and set CLR */
2011                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2012                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2013                 break;
2014
2015         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2016                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2017                    gpio_num, gpio_shift);
2018                 /* clear FLOAT and set SET */
2019                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2020                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2021                 break;
2022
2023         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2024                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2025                    gpio_num, gpio_shift);
2026                 /* set FLOAT */
2027                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2028                 break;
2029
2030         default:
2031                 break;
2032         }
2033
2034         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2035         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2036
2037         return 0;
2038 }
2039
2040 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2041 {
2042         /* The GPIO should be swapped if swap register is set and active */
2043         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2044                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2045         int gpio_shift = gpio_num +
2046                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2047         u32 gpio_mask = (1 << gpio_shift);
2048         u32 gpio_reg;
2049
2050         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2051                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2052                 return -EINVAL;
2053         }
2054
2055         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2056         /* read GPIO int */
2057         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2058
2059         switch (mode) {
2060         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2061                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2062                                    "output low\n", gpio_num, gpio_shift);
2063                 /* clear SET and set CLR */
2064                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2065                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2066                 break;
2067
2068         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2069                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2070                                    "output high\n", gpio_num, gpio_shift);
2071                 /* clear CLR and set SET */
2072                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2073                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2074                 break;
2075
2076         default:
2077                 break;
2078         }
2079
2080         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2081         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2082
2083         return 0;
2084 }
2085
2086 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2087 {
2088         u32 spio_mask = (1 << spio_num);
2089         u32 spio_reg;
2090
2091         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2092             (spio_num > MISC_REGISTERS_SPIO_7)) {
2093                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2094                 return -EINVAL;
2095         }
2096
2097         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2098         /* read SPIO and mask except the float bits */
2099         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2100
2101         switch (mode) {
2102         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2103                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2104                 /* clear FLOAT and set CLR */
2105                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2106                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2107                 break;
2108
2109         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2110                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2111                 /* clear FLOAT and set SET */
2112                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2113                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2114                 break;
2115
2116         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2117                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2118                 /* set FLOAT */
2119                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2120                 break;
2121
2122         default:
2123                 break;
2124         }
2125
2126         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2127         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2128
2129         return 0;
2130 }
2131
2132 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2133 {
2134         switch (bp->link_vars.ieee_fc &
2135                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2136         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2137                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2138                                           ADVERTISED_Pause);
2139                 break;
2140
2141         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2142                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2143                                          ADVERTISED_Pause);
2144                 break;
2145
2146         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2147                 bp->port.advertising |= ADVERTISED_Asym_Pause;
2148                 break;
2149
2150         default:
2151                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2152                                           ADVERTISED_Pause);
2153                 break;
2154         }
2155 }
2156
2157 static void bnx2x_link_report(struct bnx2x *bp)
2158 {
2159         if (bp->flags & MF_FUNC_DIS) {
2160                 netif_carrier_off(bp->dev);
2161                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2162                 return;
2163         }
2164
2165         if (bp->link_vars.link_up) {
2166                 u16 line_speed;
2167
2168                 if (bp->state == BNX2X_STATE_OPEN)
2169                         netif_carrier_on(bp->dev);
2170                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2171
2172                 line_speed = bp->link_vars.line_speed;
2173                 if (IS_E1HMF(bp)) {
2174                         u16 vn_max_rate;
2175
2176                         vn_max_rate =
2177                                 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2178                                  FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2179                         if (vn_max_rate < line_speed)
2180                                 line_speed = vn_max_rate;
2181                 }
2182                 printk("%d Mbps ", line_speed);
2183
2184                 if (bp->link_vars.duplex == DUPLEX_FULL)
2185                         printk("full duplex");
2186                 else
2187                         printk("half duplex");
2188
2189                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2190                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2191                                 printk(", receive ");
2192                                 if (bp->link_vars.flow_ctrl &
2193                                     BNX2X_FLOW_CTRL_TX)
2194                                         printk("& transmit ");
2195                         } else {
2196                                 printk(", transmit ");
2197                         }
2198                         printk("flow control ON");
2199                 }
2200                 printk("\n");
2201
2202         } else { /* link_down */
2203                 netif_carrier_off(bp->dev);
2204                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2205         }
2206 }
2207
2208 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2209 {
2210         if (!BP_NOMCP(bp)) {
2211                 u8 rc;
2212
2213                 /* Initialize link parameters structure variables */
2214                 /* It is recommended to turn off RX FC for jumbo frames
2215                    for better performance */
2216                 if (bp->dev->mtu > 5000)
2217                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2218                 else
2219                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2220
2221                 bnx2x_acquire_phy_lock(bp);
2222
2223                 if (load_mode == LOAD_DIAG)
2224                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2225
2226                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2227
2228                 bnx2x_release_phy_lock(bp);
2229
2230                 bnx2x_calc_fc_adv(bp);
2231
2232                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2233                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2234                         bnx2x_link_report(bp);
2235                 }
2236
2237                 return rc;
2238         }
2239         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2240         return -EINVAL;
2241 }
2242
2243 static void bnx2x_link_set(struct bnx2x *bp)
2244 {
2245         if (!BP_NOMCP(bp)) {
2246                 bnx2x_acquire_phy_lock(bp);
2247                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2248                 bnx2x_release_phy_lock(bp);
2249
2250                 bnx2x_calc_fc_adv(bp);
2251         } else
2252                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2253 }
2254
2255 static void bnx2x__link_reset(struct bnx2x *bp)
2256 {
2257         if (!BP_NOMCP(bp)) {
2258                 bnx2x_acquire_phy_lock(bp);
2259                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2260                 bnx2x_release_phy_lock(bp);
2261         } else
2262                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2263 }
2264
2265 static u8 bnx2x_link_test(struct bnx2x *bp)
2266 {
2267         u8 rc;
2268
2269         bnx2x_acquire_phy_lock(bp);
2270         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2271         bnx2x_release_phy_lock(bp);
2272
2273         return rc;
2274 }
2275
2276 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2277 {
2278         u32 r_param = bp->link_vars.line_speed / 8;
2279         u32 fair_periodic_timeout_usec;
2280         u32 t_fair;
2281
2282         memset(&(bp->cmng.rs_vars), 0,
2283                sizeof(struct rate_shaping_vars_per_port));
2284         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2285
2286         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2287         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2288
2289         /* this is the threshold below which no timer arming will occur
2290            1.25 coefficient is for the threshold to be a little bigger
2291            than the real time, to compensate for timer in-accuracy */
2292         bp->cmng.rs_vars.rs_threshold =
2293                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2294
2295         /* resolution of fairness timer */
2296         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2297         /* for 10G it is 1000usec. for 1G it is 10000usec. */
2298         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2299
2300         /* this is the threshold below which we won't arm the timer anymore */
2301         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2302
2303         /* we multiply by 1e3/8 to get bytes/msec.
2304            We don't want the credits to pass a credit
2305            of the t_fair*FAIR_MEM (algorithm resolution) */
2306         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2307         /* since each tick is 4 usec */
2308         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2309 }
2310
2311 /* Calculates the sum of vn_min_rates.
2312    It's needed for further normalizing of the min_rates.
2313    Returns:
2314      sum of vn_min_rates.
2315        or
2316      0 - if all the min_rates are 0.
2317      In the later case fainess algorithm should be deactivated.
2318      If not all min_rates are zero then those that are zeroes will be set to 1.
2319  */
2320 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2321 {
2322         int all_zero = 1;
2323         int port = BP_PORT(bp);
2324         int vn;
2325
2326         bp->vn_weight_sum = 0;
2327         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2328                 int func = 2*vn + port;
2329                 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2330                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2331                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2332
2333                 /* Skip hidden vns */
2334                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2335                         continue;
2336
2337                 /* If min rate is zero - set it to 1 */
2338                 if (!vn_min_rate)
2339                         vn_min_rate = DEF_MIN_RATE;
2340                 else
2341                         all_zero = 0;
2342
2343                 bp->vn_weight_sum += vn_min_rate;
2344         }
2345
2346         /* ... only if all min rates are zeros - disable fairness */
2347         if (all_zero) {
2348                 bp->cmng.flags.cmng_enables &=
2349                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2350                 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2351                    "  fairness will be disabled\n");
2352         } else
2353                 bp->cmng.flags.cmng_enables |=
2354                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2355 }
2356
2357 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2358 {
2359         struct rate_shaping_vars_per_vn m_rs_vn;
2360         struct fairness_vars_per_vn m_fair_vn;
2361         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2362         u16 vn_min_rate, vn_max_rate;
2363         int i;
2364
2365         /* If function is hidden - set min and max to zeroes */
2366         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2367                 vn_min_rate = 0;
2368                 vn_max_rate = 0;
2369
2370         } else {
2371                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2372                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2373                 /* If min rate is zero - set it to 1 */
2374                 if (!vn_min_rate)
2375                         vn_min_rate = DEF_MIN_RATE;
2376                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2377                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2378         }
2379         DP(NETIF_MSG_IFUP,
2380            "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
2381            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2382
2383         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2384         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2385
2386         /* global vn counter - maximal Mbps for this vn */
2387         m_rs_vn.vn_counter.rate = vn_max_rate;
2388
2389         /* quota - number of bytes transmitted in this period */
2390         m_rs_vn.vn_counter.quota =
2391                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2392
2393         if (bp->vn_weight_sum) {
2394                 /* credit for each period of the fairness algorithm:
2395                    number of bytes in T_FAIR (the vn share the port rate).
2396                    vn_weight_sum should not be larger than 10000, thus
2397                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2398                    than zero */
2399                 m_fair_vn.vn_credit_delta =
2400                         max((u32)(vn_min_rate * (T_FAIR_COEF /
2401                                                  (8 * bp->vn_weight_sum))),
2402                             (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2403                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2404                    m_fair_vn.vn_credit_delta);
2405         }
2406
2407         /* Store it to internal memory */
2408         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2409                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2410                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2411                        ((u32 *)(&m_rs_vn))[i]);
2412
2413         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2414                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2415                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2416                        ((u32 *)(&m_fair_vn))[i]);
2417 }
2418
2419
2420 /* This function is called upon link interrupt */
2421 static void bnx2x_link_attn(struct bnx2x *bp)
2422 {
2423         /* Make sure that we are synced with the current statistics */
2424         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2425
2426         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2427
2428         if (bp->link_vars.link_up) {
2429
2430                 /* dropless flow control */
2431                 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
2432                         int port = BP_PORT(bp);
2433                         u32 pause_enabled = 0;
2434
2435                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2436                                 pause_enabled = 1;
2437
2438                         REG_WR(bp, BAR_USTRORM_INTMEM +
2439                                USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2440                                pause_enabled);
2441                 }
2442
2443                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2444                         struct host_port_stats *pstats;
2445
2446                         pstats = bnx2x_sp(bp, port_stats);
2447                         /* reset old bmac stats */
2448                         memset(&(pstats->mac_stx[0]), 0,
2449                                sizeof(struct mac_stx));
2450                 }
2451                 if (bp->state == BNX2X_STATE_OPEN)
2452                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2453         }
2454
2455         /* indicate link status */
2456         bnx2x_link_report(bp);
2457
2458         if (IS_E1HMF(bp)) {
2459                 int port = BP_PORT(bp);
2460                 int func;
2461                 int vn;
2462
2463                 /* Set the attention towards other drivers on the same port */
2464                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2465                         if (vn == BP_E1HVN(bp))
2466                                 continue;
2467
2468                         func = ((vn << 1) | port);
2469                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2470                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2471                 }
2472
2473                 if (bp->link_vars.link_up) {
2474                         int i;
2475
2476                         /* Init rate shaping and fairness contexts */
2477                         bnx2x_init_port_minmax(bp);
2478
2479                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2480                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2481
2482                         /* Store it to internal memory */
2483                         for (i = 0;
2484                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
2485                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2486                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2487                                        ((u32 *)(&bp->cmng))[i]);
2488                 }
2489         }
2490 }
2491
2492 static void bnx2x__link_status_update(struct bnx2x *bp)
2493 {
2494         if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2495                 return;
2496
2497         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2498
2499         if (bp->link_vars.link_up)
2500                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2501         else
2502                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2503
2504         bnx2x_calc_vn_weight_sum(bp);
2505
2506         /* indicate link status */
2507         bnx2x_link_report(bp);
2508 }
2509
2510 static void bnx2x_pmf_update(struct bnx2x *bp)
2511 {
2512         int port = BP_PORT(bp);
2513         u32 val;
2514
2515         bp->port.pmf = 1;
2516         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2517
2518         /* enable nig attention */
2519         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2520         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2521         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2522
2523         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2524 }
2525
2526 /* end of Link */
2527
2528 /* slow path */
2529
2530 /*
2531  * General service functions
2532  */
2533
2534 /* send the MCP a request, block until there is a reply */
2535 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2536 {
2537         int func = BP_FUNC(bp);
2538         u32 seq = ++bp->fw_seq;
2539         u32 rc = 0;
2540         u32 cnt = 1;
2541         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2542
2543         mutex_lock(&bp->fw_mb_mutex);
2544         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2545         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2546
2547         do {
2548                 /* let the FW do it's magic ... */
2549                 msleep(delay);
2550
2551                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2552
2553                 /* Give the FW up to 5 second (500*10ms) */
2554         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2555
2556         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2557            cnt*delay, rc, seq);
2558
2559         /* is this a reply to our command? */
2560         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2561                 rc &= FW_MSG_CODE_MASK;
2562         else {
2563                 /* FW BUG! */
2564                 BNX2X_ERR("FW failed to respond!\n");
2565                 bnx2x_fw_dump(bp);
2566                 rc = 0;
2567         }
2568         mutex_unlock(&bp->fw_mb_mutex);
2569
2570         return rc;
2571 }
2572
2573 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2574 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2575 static void bnx2x_set_rx_mode(struct net_device *dev);
2576
2577 static void bnx2x_e1h_disable(struct bnx2x *bp)
2578 {
2579         int port = BP_PORT(bp);
2580
2581         netif_tx_disable(bp->dev);
2582         bp->dev->trans_start = jiffies; /* prevent tx timeout */
2583
2584         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2585
2586         netif_carrier_off(bp->dev);
2587 }
2588
2589 static void bnx2x_e1h_enable(struct bnx2x *bp)
2590 {
2591         int port = BP_PORT(bp);
2592
2593         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2594
2595         /* Tx queue should be only reenabled */
2596         netif_tx_wake_all_queues(bp->dev);
2597
2598         /*
2599          * Should not call netif_carrier_on since it will be called if the link
2600          * is up when checking for link state
2601          */
2602 }
2603
2604 static void bnx2x_update_min_max(struct bnx2x *bp)
2605 {
2606         int port = BP_PORT(bp);
2607         int vn, i;
2608
2609         /* Init rate shaping and fairness contexts */
2610         bnx2x_init_port_minmax(bp);
2611
2612         bnx2x_calc_vn_weight_sum(bp);
2613
2614         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2615                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2616
2617         if (bp->port.pmf) {
2618                 int func;
2619
2620                 /* Set the attention towards other drivers on the same port */
2621                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2622                         if (vn == BP_E1HVN(bp))
2623                                 continue;
2624
2625                         func = ((vn << 1) | port);
2626                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2627                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2628                 }
2629
2630                 /* Store it to internal memory */
2631                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2632                         REG_WR(bp, BAR_XSTRORM_INTMEM +
2633                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2634                                ((u32 *)(&bp->cmng))[i]);
2635         }
2636 }
2637
2638 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2639 {
2640         DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2641
2642         if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2643
2644                 /*
2645                  * This is the only place besides the function initialization
2646                  * where the bp->flags can change so it is done without any
2647                  * locks
2648                  */
2649                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2650                         DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2651                         bp->flags |= MF_FUNC_DIS;
2652
2653                         bnx2x_e1h_disable(bp);
2654                 } else {
2655                         DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2656                         bp->flags &= ~MF_FUNC_DIS;
2657
2658                         bnx2x_e1h_enable(bp);
2659                 }
2660                 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2661         }
2662         if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2663
2664                 bnx2x_update_min_max(bp);
2665                 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2666         }
2667
2668         /* Report results to MCP */
2669         if (dcc_event)
2670                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2671         else
2672                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2673 }
2674
2675 /* must be called under the spq lock */
2676 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2677 {
2678         struct eth_spe *next_spe = bp->spq_prod_bd;
2679
2680         if (bp->spq_prod_bd == bp->spq_last_bd) {
2681                 bp->spq_prod_bd = bp->spq;
2682                 bp->spq_prod_idx = 0;
2683                 DP(NETIF_MSG_TIMER, "end of spq\n");
2684         } else {
2685                 bp->spq_prod_bd++;
2686                 bp->spq_prod_idx++;
2687         }
2688         return next_spe;
2689 }
2690
2691 /* must be called under the spq lock */
2692 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2693 {
2694         int func = BP_FUNC(bp);
2695
2696         /* Make sure that BD data is updated before writing the producer */
2697         wmb();
2698
2699         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2700                bp->spq_prod_idx);
2701         mmiowb();
2702 }
2703
2704 /* the slow path queue is odd since completions arrive on the fastpath ring */
2705 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2706                          u32 data_hi, u32 data_lo, int common)
2707 {
2708         struct eth_spe *spe;
2709
2710         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2711            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2712            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2713            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2714            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2715
2716 #ifdef BNX2X_STOP_ON_ERROR
2717         if (unlikely(bp->panic))
2718                 return -EIO;
2719 #endif
2720
2721         spin_lock_bh(&bp->spq_lock);
2722
2723         if (!bp->spq_left) {
2724                 BNX2X_ERR("BUG! SPQ ring full!\n");
2725                 spin_unlock_bh(&bp->spq_lock);
2726                 bnx2x_panic();
2727                 return -EBUSY;
2728         }
2729
2730         spe = bnx2x_sp_get_next(bp);
2731
2732         /* CID needs port number to be encoded int it */
2733         spe->hdr.conn_and_cmd_data =
2734                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2735                                      HW_CID(bp, cid)));
2736         spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2737         if (common)
2738                 spe->hdr.type |=
2739                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2740
2741         spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2742         spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2743
2744         bp->spq_left--;
2745
2746         bnx2x_sp_prod_update(bp);
2747         spin_unlock_bh(&bp->spq_lock);
2748         return 0;
2749 }
2750
2751 /* acquire split MCP access lock register */
2752 static int bnx2x_acquire_alr(struct bnx2x *bp)
2753 {
2754         u32 i, j, val;
2755         int rc = 0;
2756
2757         might_sleep();
2758         i = 100;
2759         for (j = 0; j < i*10; j++) {
2760                 val = (1UL << 31);
2761                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2762                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2763                 if (val & (1L << 31))
2764                         break;
2765
2766                 msleep(5);
2767         }
2768         if (!(val & (1L << 31))) {
2769                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2770                 rc = -EBUSY;
2771         }
2772
2773         return rc;
2774 }
2775
2776 /* release split MCP access lock register */
2777 static void bnx2x_release_alr(struct bnx2x *bp)
2778 {
2779         u32 val = 0;
2780
2781         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2782 }
2783
2784 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2785 {
2786         struct host_def_status_block *def_sb = bp->def_status_blk;
2787         u16 rc = 0;
2788
2789         barrier(); /* status block is written to by the chip */
2790         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2791                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2792                 rc |= 1;
2793         }
2794         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2795                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2796                 rc |= 2;
2797         }
2798         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2799                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2800                 rc |= 4;
2801         }
2802         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2803                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2804                 rc |= 8;
2805         }
2806         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2807                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2808                 rc |= 16;
2809         }
2810         return rc;
2811 }
2812
2813 /*
2814  * slow path service functions
2815  */
2816
2817 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2818 {
2819         int port = BP_PORT(bp);
2820         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2821                        COMMAND_REG_ATTN_BITS_SET);
2822         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2823                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2824         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2825                                        NIG_REG_MASK_INTERRUPT_PORT0;
2826         u32 aeu_mask;
2827         u32 nig_mask = 0;
2828
2829         if (bp->attn_state & asserted)
2830                 BNX2X_ERR("IGU ERROR\n");
2831
2832         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2833         aeu_mask = REG_RD(bp, aeu_addr);
2834
2835         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2836            aeu_mask, asserted);
2837         aeu_mask &= ~(asserted & 0xff);
2838         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2839
2840         REG_WR(bp, aeu_addr, aeu_mask);
2841         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2842
2843         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2844         bp->attn_state |= asserted;
2845         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2846
2847         if (asserted & ATTN_HARD_WIRED_MASK) {
2848                 if (asserted & ATTN_NIG_FOR_FUNC) {
2849
2850                         bnx2x_acquire_phy_lock(bp);
2851
2852                         /* save nig interrupt mask */
2853                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2854                         REG_WR(bp, nig_int_mask_addr, 0);
2855
2856                         bnx2x_link_attn(bp);
2857
2858                         /* handle unicore attn? */
2859                 }
2860                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2861                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2862
2863                 if (asserted & GPIO_2_FUNC)
2864                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2865
2866                 if (asserted & GPIO_3_FUNC)
2867                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2868
2869                 if (asserted & GPIO_4_FUNC)
2870                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2871
2872                 if (port == 0) {
2873                         if (asserted & ATTN_GENERAL_ATTN_1) {
2874                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2875                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2876                         }
2877                         if (asserted & ATTN_GENERAL_ATTN_2) {
2878                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2879                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2880                         }
2881                         if (asserted & ATTN_GENERAL_ATTN_3) {
2882                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2883                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2884                         }
2885                 } else {
2886                         if (asserted & ATTN_GENERAL_ATTN_4) {
2887                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2888                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2889                         }
2890                         if (asserted & ATTN_GENERAL_ATTN_5) {
2891                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2892                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2893                         }
2894                         if (asserted & ATTN_GENERAL_ATTN_6) {
2895                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2896                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2897                         }
2898                 }
2899
2900         } /* if hardwired */
2901
2902         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2903            asserted, hc_addr);
2904         REG_WR(bp, hc_addr, asserted);
2905
2906         /* now set back the mask */
2907         if (asserted & ATTN_NIG_FOR_FUNC) {
2908                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2909                 bnx2x_release_phy_lock(bp);
2910         }
2911 }
2912
2913 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2914 {
2915         int port = BP_PORT(bp);
2916
2917         /* mark the failure */
2918         bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2919         bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2920         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2921                  bp->link_params.ext_phy_config);
2922
2923         /* log the failure */
2924         printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2925                " the driver to shutdown the card to prevent permanent"
2926                " damage.  Please contact Dell Support for assistance\n",
2927                bp->dev->name);
2928 }
2929
2930 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2931 {
2932         int port = BP_PORT(bp);
2933         int reg_offset;
2934         u32 val, swap_val, swap_override;
2935
2936         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2937                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2938
2939         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2940
2941                 val = REG_RD(bp, reg_offset);
2942                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2943                 REG_WR(bp, reg_offset, val);
2944
2945                 BNX2X_ERR("SPIO5 hw attention\n");
2946
2947                 /* Fan failure attention */
2948                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2949                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2950                         /* Low power mode is controlled by GPIO 2 */
2951                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2952                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2953                         /* The PHY reset is controlled by GPIO 1 */
2954                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2955                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2956                         break;
2957
2958                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2959                         /* The PHY reset is controlled by GPIO 1 */
2960                         /* fake the port number to cancel the swap done in
2961                            set_gpio() */
2962                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2963                         swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2964                         port = (swap_val && swap_override) ^ 1;
2965                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2966                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2967                         break;
2968
2969                 default:
2970                         break;
2971                 }
2972                 bnx2x_fan_failure(bp);
2973         }
2974
2975         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2976                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2977                 bnx2x_acquire_phy_lock(bp);
2978                 bnx2x_handle_module_detect_int(&bp->link_params);
2979                 bnx2x_release_phy_lock(bp);
2980         }
2981
2982         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2983
2984                 val = REG_RD(bp, reg_offset);
2985                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2986                 REG_WR(bp, reg_offset, val);
2987
2988                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2989                           (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2990                 bnx2x_panic();
2991         }
2992 }
2993
2994 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2995 {
2996         u32 val;
2997
2998         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2999
3000                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3001                 BNX2X_ERR("DB hw attention 0x%x\n", val);
3002                 /* DORQ discard attention */
3003                 if (val & 0x2)
3004                         BNX2X_ERR("FATAL error from DORQ\n");
3005         }
3006
3007         if (attn & HW_INTERRUT_ASSERT_SET_1) {
3008
3009                 int port = BP_PORT(bp);
3010                 int reg_offset;
3011
3012                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3013                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3014
3015                 val = REG_RD(bp, reg_offset);
3016                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3017                 REG_WR(bp, reg_offset, val);
3018
3019                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
3020                           (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
3021                 bnx2x_panic();
3022         }
3023 }
3024
3025 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3026 {
3027         u32 val;
3028
3029         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3030
3031                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3032                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3033                 /* CFC error attention */
3034                 if (val & 0x2)
3035                         BNX2X_ERR("FATAL error from CFC\n");
3036         }
3037
3038         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3039
3040                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3041                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3042                 /* RQ_USDMDP_FIFO_OVERFLOW */
3043                 if (val & 0x18000)
3044                         BNX2X_ERR("FATAL error from PXP\n");
3045         }
3046
3047         if (attn & HW_INTERRUT_ASSERT_SET_2) {
3048
3049                 int port = BP_PORT(bp);
3050                 int reg_offset;
3051
3052                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3053                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3054
3055                 val = REG_RD(bp, reg_offset);
3056                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3057                 REG_WR(bp, reg_offset, val);
3058
3059                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3060                           (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3061                 bnx2x_panic();
3062         }
3063 }
3064
3065 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3066 {
3067         u32 val;
3068
3069         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3070
3071                 if (attn & BNX2X_PMF_LINK_ASSERT) {
3072                         int func = BP_FUNC(bp);
3073
3074                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3075                         bp->mf_config = SHMEM_RD(bp,
3076                                            mf_cfg.func_mf_config[func].config);
3077                         val = SHMEM_RD(bp, func_mb[func].drv_status);
3078                         if (val & DRV_STATUS_DCC_EVENT_MASK)
3079                                 bnx2x_dcc_event(bp,
3080                                             (val & DRV_STATUS_DCC_EVENT_MASK));
3081                         bnx2x__link_status_update(bp);
3082                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3083                                 bnx2x_pmf_update(bp);
3084
3085                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3086
3087                         BNX2X_ERR("MC assert!\n");
3088                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3089                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3090                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3091                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3092                         bnx2x_panic();
3093
3094                 } else if (attn & BNX2X_MCP_ASSERT) {
3095
3096                         BNX2X_ERR("MCP assert!\n");
3097                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3098                         bnx2x_fw_dump(bp);
3099
3100                 } else
3101                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3102         }
3103
3104         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3105                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3106                 if (attn & BNX2X_GRC_TIMEOUT) {
3107                         val = CHIP_IS_E1H(bp) ?
3108                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3109                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
3110                 }
3111                 if (attn & BNX2X_GRC_RSV) {
3112                         val = CHIP_IS_E1H(bp) ?
3113                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3114                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
3115                 }
3116                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3117         }
3118 }
3119
3120 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3121 {
3122         struct attn_route attn;
3123         struct attn_route group_mask;
3124         int port = BP_PORT(bp);
3125         int index;
3126         u32 reg_addr;
3127         u32 val;
3128         u32 aeu_mask;
3129
3130         /* need to take HW lock because MCP or other port might also
3131            try to handle this event */
3132         bnx2x_acquire_alr(bp);
3133
3134         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3135         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3136         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3137         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3138         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3139            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3140
3141         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3142                 if (deasserted & (1 << index)) {
3143                         group_mask = bp->attn_group[index];
3144
3145                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3146                            index, group_mask.sig[0], group_mask.sig[1],
3147                            group_mask.sig[2], group_mask.sig[3]);
3148
3149                         bnx2x_attn_int_deasserted3(bp,
3150                                         attn.sig[3] & group_mask.sig[3]);
3151                         bnx2x_attn_int_deasserted1(bp,
3152                                         attn.sig[1] & group_mask.sig[1]);
3153                         bnx2x_attn_int_deasserted2(bp,
3154                                         attn.sig[2] & group_mask.sig[2]);
3155                         bnx2x_attn_int_deasserted0(bp,
3156                                         attn.sig[0] & group_mask.sig[0]);
3157
3158                         if ((attn.sig[0] & group_mask.sig[0] &
3159                                                 HW_PRTY_ASSERT_SET_0) ||
3160                             (attn.sig[1] & group_mask.sig[1] &
3161                                                 HW_PRTY_ASSERT_SET_1) ||
3162                             (attn.sig[2] & group_mask.sig[2] &
3163                                                 HW_PRTY_ASSERT_SET_2))
3164                                 BNX2X_ERR("FATAL HW block parity attention\n");
3165                 }
3166         }
3167
3168         bnx2x_release_alr(bp);
3169
3170         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3171
3172         val = ~deasserted;
3173         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3174            val, reg_addr);
3175         REG_WR(bp, reg_addr, val);
3176
3177         if (~bp->attn_state & deasserted)
3178                 BNX2X_ERR("IGU ERROR\n");
3179
3180         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3181                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
3182
3183         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3184         aeu_mask = REG_RD(bp, reg_addr);
3185
3186         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
3187            aeu_mask, deasserted);
3188         aeu_mask |= (deasserted & 0xff);
3189         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3190
3191         REG_WR(bp, reg_addr, aeu_mask);
3192         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3193
3194         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3195         bp->attn_state &= ~deasserted;
3196         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3197 }
3198
3199 static void bnx2x_attn_int(struct bnx2x *bp)
3200 {
3201         /* read local copy of bits */
3202         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3203                                                                 attn_bits);
3204         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3205                                                                 attn_bits_ack);
3206         u32 attn_state = bp->attn_state;
3207
3208         /* look for changed bits */
3209         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
3210         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
3211
3212         DP(NETIF_MSG_HW,
3213            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
3214            attn_bits, attn_ack, asserted, deasserted);
3215
3216         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3217                 BNX2X_ERR("BAD attention state\n");
3218
3219         /* handle bits that were raised */
3220         if (asserted)
3221                 bnx2x_attn_int_asserted(bp, asserted);
3222
3223         if (deasserted)
3224                 bnx2x_attn_int_deasserted(bp, deasserted);
3225 }
3226
3227 static void bnx2x_sp_task(struct work_struct *work)
3228 {
3229         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3230         u16 status;
3231
3232
3233         /* Return here if interrupt is disabled */
3234         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3235                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3236                 return;
3237         }
3238
3239         status = bnx2x_update_dsb_idx(bp);
3240 /*      if (status == 0)                                     */
3241 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
3242
3243         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
3244
3245         /* HW attentions */
3246         if (status & 0x1)
3247                 bnx2x_attn_int(bp);
3248
3249         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3250                      IGU_INT_NOP, 1);
3251         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3252                      IGU_INT_NOP, 1);
3253         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3254                      IGU_INT_NOP, 1);
3255         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3256                      IGU_INT_NOP, 1);
3257         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3258                      IGU_INT_ENABLE, 1);
3259
3260 }
3261
3262 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3263 {
3264         struct net_device *dev = dev_instance;
3265         struct bnx2x *bp = netdev_priv(dev);
3266
3267         /* Return here if interrupt is disabled */
3268         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3269                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3270                 return IRQ_HANDLED;
3271         }
3272
3273         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3274
3275 #ifdef BNX2X_STOP_ON_ERROR
3276         if (unlikely(bp->panic))
3277                 return IRQ_HANDLED;
3278 #endif
3279
3280 #ifdef BCM_CNIC
3281         {
3282                 struct cnic_ops *c_ops;
3283
3284                 rcu_read_lock();
3285                 c_ops = rcu_dereference(bp->cnic_ops);
3286                 if (c_ops)
3287                         c_ops->cnic_handler(bp->cnic_data, NULL);
3288                 rcu_read_unlock();
3289         }
3290 #endif
3291         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3292
3293         return IRQ_HANDLED;
3294 }
3295
3296 /* end of slow path */
3297
3298 /* Statistics */
3299
3300 /****************************************************************************
3301 * Macros
3302 ****************************************************************************/
3303
3304 /* sum[hi:lo] += add[hi:lo] */
3305 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3306         do { \
3307                 s_lo += a_lo; \
3308                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3309         } while (0)
3310
3311 /* difference = minuend - subtrahend */
3312 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3313         do { \
3314                 if (m_lo < s_lo) { \
3315                         /* underflow */ \
3316                         d_hi = m_hi - s_hi; \
3317                         if (d_hi > 0) { \
3318                                 /* we can 'loan' 1 */ \
3319                                 d_hi--; \
3320                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3321                         } else { \
3322                                 /* m_hi <= s_hi */ \
3323                                 d_hi = 0; \
3324                                 d_lo = 0; \
3325                         } \
3326                 } else { \
3327                         /* m_lo >= s_lo */ \
3328                         if (m_hi < s_hi) { \
3329                                 d_hi = 0; \
3330                                 d_lo = 0; \
3331                         } else { \
3332                                 /* m_hi >= s_hi */ \
3333                                 d_hi = m_hi - s_hi; \
3334                                 d_lo = m_lo - s_lo; \
3335                         } \
3336                 } \
3337         } while (0)
3338
3339 #define UPDATE_STAT64(s, t) \
3340         do { \
3341                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3342                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3343                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3344                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3345                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3346                        pstats->mac_stx[1].t##_lo, diff.lo); \
3347         } while (0)
3348
3349 #define UPDATE_STAT64_NIG(s, t) \
3350         do { \
3351                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3352                         diff.lo, new->s##_lo, old->s##_lo); \
3353                 ADD_64(estats->t##_hi, diff.hi, \
3354                        estats->t##_lo, diff.lo); \
3355         } while (0)
3356
3357 /* sum[hi:lo] += add */
3358 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3359         do { \
3360                 s_lo += a; \
3361                 s_hi += (s_lo < a) ? 1 : 0; \
3362         } while (0)
3363
3364 #define UPDATE_EXTEND_STAT(s) \
3365         do { \
3366                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3367                               pstats->mac_stx[1].s##_lo, \
3368                               new->s); \
3369         } while (0)
3370
3371 #define UPDATE_EXTEND_TSTAT(s, t) \
3372         do { \
3373                 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3374                 old_tclient->s = tclient->s; \
3375                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3376         } while (0)
3377
3378 #define UPDATE_EXTEND_USTAT(s, t) \
3379         do { \
3380                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3381                 old_uclient->s = uclient->s; \
3382                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3383         } while (0)
3384
3385 #define UPDATE_EXTEND_XSTAT(s, t) \
3386         do { \
3387                 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3388                 old_xclient->s = xclient->s; \
3389                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3390         } while (0)
3391
3392 /* minuend -= subtrahend */
3393 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3394         do { \
3395                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3396         } while (0)
3397
3398 /* minuend[hi:lo] -= subtrahend */
3399 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3400         do { \
3401                 SUB_64(m_hi, 0, m_lo, s); \
3402         } while (0)
3403
3404 #define SUB_EXTEND_USTAT(s, t) \
3405         do { \
3406                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3407                 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3408         } while (0)
3409
3410 /*
3411  * General service functions
3412  */
3413
3414 static inline long bnx2x_hilo(u32 *hiref)
3415 {
3416         u32 lo = *(hiref + 1);
3417 #if (BITS_PER_LONG == 64)
3418         u32 hi = *hiref;
3419
3420         return HILO_U64(hi, lo);
3421 #else
3422         return lo;
3423 #endif
3424 }
3425
3426 /*
3427  * Init service functions
3428  */
3429
3430 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3431 {
3432         if (!bp->stats_pending) {
3433                 struct eth_query_ramrod_data ramrod_data = {0};
3434                 int i, rc;
3435
3436                 ramrod_data.drv_counter = bp->stats_counter++;
3437                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3438                 for_each_queue(bp, i)
3439                         ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3440
3441                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3442                                    ((u32 *)&ramrod_data)[1],
3443                                    ((u32 *)&ramrod_data)[0], 0);
3444                 if (rc == 0) {
3445                         /* stats ramrod has it's own slot on the spq */
3446                         bp->spq_left++;
3447                         bp->stats_pending = 1;
3448                 }
3449         }
3450 }
3451
3452 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3453 {
3454         struct dmae_command *dmae = &bp->stats_dmae;
3455         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3456
3457         *stats_comp = DMAE_COMP_VAL;
3458         if (CHIP_REV_IS_SLOW(bp))
3459                 return;
3460
3461         /* loader */
3462         if (bp->executer_idx) {
3463                 int loader_idx = PMF_DMAE_C(bp);
3464
3465                 memset(dmae, 0, sizeof(struct dmae_command));
3466
3467                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3468                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3469                                 DMAE_CMD_DST_RESET |
3470 #ifdef __BIG_ENDIAN
3471                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3472 #else
3473                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3474 #endif
3475                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3476                                                DMAE_CMD_PORT_0) |
3477                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3478                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3479                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3480                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3481                                      sizeof(struct dmae_command) *
3482                                      (loader_idx + 1)) >> 2;
3483                 dmae->dst_addr_hi = 0;
3484                 dmae->len = sizeof(struct dmae_command) >> 2;
3485                 if (CHIP_IS_E1(bp))
3486                         dmae->len--;
3487                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3488                 dmae->comp_addr_hi = 0;
3489                 dmae->comp_val = 1;
3490
3491                 *stats_comp = 0;
3492                 bnx2x_post_dmae(bp, dmae, loader_idx);
3493
3494         } else if (bp->func_stx) {
3495                 *stats_comp = 0;
3496                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3497         }
3498 }
3499
3500 static int bnx2x_stats_comp(struct bnx2x *bp)
3501 {
3502         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3503         int cnt = 10;
3504
3505         might_sleep();
3506         while (*stats_comp != DMAE_COMP_VAL) {
3507                 if (!cnt) {
3508                         BNX2X_ERR("timeout waiting for stats finished\n");
3509                         break;
3510                 }
3511                 cnt--;
3512                 msleep(1);
3513         }
3514         return 1;
3515 }
3516
3517 /*
3518  * Statistics service functions
3519  */
3520
3521 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3522 {
3523         struct dmae_command *dmae;
3524         u32 opcode;
3525         int loader_idx = PMF_DMAE_C(bp);
3526         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3527
3528         /* sanity */
3529         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3530                 BNX2X_ERR("BUG!\n");
3531                 return;
3532         }
3533
3534         bp->executer_idx = 0;
3535
3536         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3537                   DMAE_CMD_C_ENABLE |
3538                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3539 #ifdef __BIG_ENDIAN
3540                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3541 #else
3542                   DMAE_CMD_ENDIANITY_DW_SWAP |
3543 #endif
3544                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3545                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3546
3547         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3548         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3549         dmae->src_addr_lo = bp->port.port_stx >> 2;
3550         dmae->src_addr_hi = 0;
3551         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3552         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3553         dmae->len = DMAE_LEN32_RD_MAX;
3554         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3555         dmae->comp_addr_hi = 0;
3556         dmae->comp_val = 1;
3557
3558         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3559         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3560         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3561         dmae->src_addr_hi = 0;
3562         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3563                                    DMAE_LEN32_RD_MAX * 4);
3564         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3565                                    DMAE_LEN32_RD_MAX * 4);
3566         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3567         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3568         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3569         dmae->comp_val = DMAE_COMP_VAL;
3570
3571         *stats_comp = 0;
3572         bnx2x_hw_stats_post(bp);
3573         bnx2x_stats_comp(bp);
3574 }
3575
3576 static void bnx2x_port_stats_init(struct bnx2x *bp)
3577 {
3578         struct dmae_command *dmae;
3579         int port = BP_PORT(bp);
3580         int vn = BP_E1HVN(bp);
3581         u32 opcode;
3582         int loader_idx = PMF_DMAE_C(bp);
3583         u32 mac_addr;
3584         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3585
3586         /* sanity */
3587         if (!bp->link_vars.link_up || !bp->port.pmf) {
3588                 BNX2X_ERR("BUG!\n");
3589                 return;
3590         }
3591
3592         bp->executer_idx = 0;
3593
3594         /* MCP */
3595         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3596                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3597                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3598 #ifdef __BIG_ENDIAN
3599                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3600 #else
3601                   DMAE_CMD_ENDIANITY_DW_SWAP |
3602 #endif
3603                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3604                   (vn << DMAE_CMD_E1HVN_SHIFT));
3605
3606         if (bp->port.port_stx) {
3607
3608                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3609                 dmae->opcode = opcode;
3610                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3611                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3612                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3613                 dmae->dst_addr_hi = 0;
3614                 dmae->len = sizeof(struct host_port_stats) >> 2;
3615                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3616                 dmae->comp_addr_hi = 0;
3617                 dmae->comp_val = 1;
3618         }
3619
3620         if (bp->func_stx) {
3621
3622                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3623                 dmae->opcode = opcode;
3624                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3625                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3626                 dmae->dst_addr_lo = bp->func_stx >> 2;
3627                 dmae->dst_addr_hi = 0;
3628                 dmae->len = sizeof(struct host_func_stats) >> 2;
3629                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3630                 dmae->comp_addr_hi = 0;
3631                 dmae->comp_val = 1;
3632         }
3633
3634         /* MAC */
3635         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3636                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3637                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3638 #ifdef __BIG_ENDIAN
3639                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3640 #else
3641                   DMAE_CMD_ENDIANITY_DW_SWAP |
3642 #endif
3643                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3644                   (vn << DMAE_CMD_E1HVN_SHIFT));
3645
3646         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3647
3648                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3649                                    NIG_REG_INGRESS_BMAC0_MEM);
3650
3651                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3652                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3653                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3654                 dmae->opcode = opcode;
3655                 dmae->src_addr_lo = (mac_addr +
3656                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3657                 dmae->src_addr_hi = 0;
3658                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3659                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3660                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3661                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3662                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3663                 dmae->comp_addr_hi = 0;
3664                 dmae->comp_val = 1;
3665
3666                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3667                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3668                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3669                 dmae->opcode = opcode;
3670                 dmae->src_addr_lo = (mac_addr +
3671                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3672                 dmae->src_addr_hi = 0;
3673                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3674                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3675                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3676                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3677                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3678                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3679                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3680                 dmae->comp_addr_hi = 0;
3681                 dmae->comp_val = 1;
3682
3683         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3684
3685                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3686
3687                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3688                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3689                 dmae->opcode = opcode;
3690                 dmae->src_addr_lo = (mac_addr +
3691                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3692                 dmae->src_addr_hi = 0;
3693                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3694                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3695                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3696                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3697                 dmae->comp_addr_hi = 0;
3698                 dmae->comp_val = 1;
3699
3700                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3701                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3702                 dmae->opcode = opcode;
3703                 dmae->src_addr_lo = (mac_addr +
3704                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3705                 dmae->src_addr_hi = 0;
3706                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3707                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3708                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3709                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3710                 dmae->len = 1;
3711                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3712                 dmae->comp_addr_hi = 0;
3713                 dmae->comp_val = 1;
3714
3715                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3716                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3717                 dmae->opcode = opcode;
3718                 dmae->src_addr_lo = (mac_addr +
3719                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3720                 dmae->src_addr_hi = 0;
3721                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3722                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3723                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3724                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3725                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3726                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3727                 dmae->comp_addr_hi = 0;
3728                 dmae->comp_val = 1;
3729         }
3730
3731         /* NIG */
3732         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3733         dmae->opcode = opcode;
3734         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3735                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3736         dmae->src_addr_hi = 0;
3737         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3738         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3739         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3740         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3741         dmae->comp_addr_hi = 0;
3742         dmae->comp_val = 1;
3743
3744         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3745         dmae->opcode = opcode;
3746         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3747                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3748         dmae->src_addr_hi = 0;
3749         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3750                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3751         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3752                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3753         dmae->len = (2*sizeof(u32)) >> 2;
3754         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3755         dmae->comp_addr_hi = 0;
3756         dmae->comp_val = 1;
3757
3758         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3759         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3760                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3761                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3762 #ifdef __BIG_ENDIAN
3763                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3764 #else
3765                         DMAE_CMD_ENDIANITY_DW_SWAP |
3766 #endif
3767                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3768                         (vn << DMAE_CMD_E1HVN_SHIFT));
3769         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3770                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3771         dmae->src_addr_hi = 0;
3772         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3773                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3774         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3775                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3776         dmae->len = (2*sizeof(u32)) >> 2;
3777         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3778         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3779         dmae->comp_val = DMAE_COMP_VAL;
3780
3781         *stats_comp = 0;
3782 }
3783
3784 static void bnx2x_func_stats_init(struct bnx2x *bp)
3785 {
3786         struct dmae_command *dmae = &bp->stats_dmae;
3787         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3788
3789         /* sanity */
3790         if (!bp->func_stx) {
3791                 BNX2X_ERR("BUG!\n");
3792                 return;
3793         }
3794
3795         bp->executer_idx = 0;
3796         memset(dmae, 0, sizeof(struct dmae_command));
3797
3798         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3799                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3800                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3801 #ifdef __BIG_ENDIAN
3802                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3803 #else
3804                         DMAE_CMD_ENDIANITY_DW_SWAP |
3805 #endif
3806                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3807                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3808         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3809         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3810         dmae->dst_addr_lo = bp->func_stx >> 2;
3811         dmae->dst_addr_hi = 0;
3812         dmae->len = sizeof(struct host_func_stats) >> 2;
3813         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3814         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3815         dmae->comp_val = DMAE_COMP_VAL;
3816
3817         *stats_comp = 0;
3818 }
3819
3820 static void bnx2x_stats_start(struct bnx2x *bp)
3821 {
3822         if (bp->port.pmf)
3823                 bnx2x_port_stats_init(bp);
3824
3825         else if (bp->func_stx)
3826                 bnx2x_func_stats_init(bp);
3827
3828         bnx2x_hw_stats_post(bp);
3829         bnx2x_storm_stats_post(bp);
3830 }
3831
3832 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3833 {
3834         bnx2x_stats_comp(bp);
3835         bnx2x_stats_pmf_update(bp);
3836         bnx2x_stats_start(bp);
3837 }
3838
3839 static void bnx2x_stats_restart(struct bnx2x *bp)
3840 {
3841         bnx2x_stats_comp(bp);
3842         bnx2x_stats_start(bp);
3843 }
3844
3845 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3846 {
3847         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3848         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3849         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3850         struct {
3851                 u32 lo;
3852                 u32 hi;
3853         } diff;
3854
3855         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3856         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3857         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3858         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3859         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3860         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3861         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3862         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3863         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3864         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3865         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3866         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3867         UPDATE_STAT64(tx_stat_gt127,
3868                                 tx_stat_etherstatspkts65octetsto127octets);
3869         UPDATE_STAT64(tx_stat_gt255,
3870                                 tx_stat_etherstatspkts128octetsto255octets);
3871         UPDATE_STAT64(tx_stat_gt511,
3872                                 tx_stat_etherstatspkts256octetsto511octets);
3873         UPDATE_STAT64(tx_stat_gt1023,
3874                                 tx_stat_etherstatspkts512octetsto1023octets);
3875         UPDATE_STAT64(tx_stat_gt1518,
3876                                 tx_stat_etherstatspkts1024octetsto1522octets);
3877         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3878         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3879         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3880         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3881         UPDATE_STAT64(tx_stat_gterr,
3882                                 tx_stat_dot3statsinternalmactransmiterrors);
3883         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3884
3885         estats->pause_frames_received_hi =
3886                                 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3887         estats->pause_frames_received_lo =
3888                                 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3889
3890         estats->pause_frames_sent_hi =
3891                                 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3892         estats->pause_frames_sent_lo =
3893                                 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3894 }
3895
3896 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3897 {
3898         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3899         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3900         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3901
3902         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3903         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3904         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3905         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3906         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3907         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3908         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3909         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3910         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3911         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3912         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3913         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3914         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3915         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3916         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3917         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3918         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3919         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3920         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3921         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3922         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3923         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3924         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3925         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3926         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3927         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3928         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3929         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3930         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3931         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3932         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3933
3934         estats->pause_frames_received_hi =
3935                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3936         estats->pause_frames_received_lo =
3937                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3938         ADD_64(estats->pause_frames_received_hi,
3939                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3940                estats->pause_frames_received_lo,
3941                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3942
3943         estats->pause_frames_sent_hi =
3944                         pstats->mac_stx[1].tx_stat_outxonsent_hi;
3945         estats->pause_frames_sent_lo =
3946                         pstats->mac_stx[1].tx_stat_outxonsent_lo;
3947         ADD_64(estats->pause_frames_sent_hi,
3948                pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3949                estats->pause_frames_sent_lo,
3950                pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3951 }
3952
3953 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3954 {
3955         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3956         struct nig_stats *old = &(bp->port.old_nig_stats);
3957         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3958         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3959         struct {
3960                 u32 lo;
3961                 u32 hi;
3962         } diff;
3963         u32 nig_timer_max;
3964
3965         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3966                 bnx2x_bmac_stats_update(bp);
3967
3968         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3969                 bnx2x_emac_stats_update(bp);
3970
3971         else { /* unreached */
3972                 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3973                 return -1;
3974         }
3975
3976         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3977                       new->brb_discard - old->brb_discard);
3978         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3979                       new->brb_truncate - old->brb_truncate);
3980
3981         UPDATE_STAT64_NIG(egress_mac_pkt0,
3982                                         etherstatspkts1024octetsto1522octets);
3983         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3984
3985         memcpy(old, new, sizeof(struct nig_stats));
3986
3987         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3988                sizeof(struct mac_stx));
3989         estats->brb_drop_hi = pstats->brb_drop_hi;
3990         estats->brb_drop_lo = pstats->brb_drop_lo;
3991
3992         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3993
3994         nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3995         if (nig_timer_max != estats->nig_timer_max) {
3996                 estats->nig_timer_max = nig_timer_max;
3997                 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3998         }
3999
4000         return 0;
4001 }
4002
4003 static int bnx2x_storm_stats_update(struct bnx2x *bp)
4004 {
4005         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
4006         struct tstorm_per_port_stats *tport =
4007                                         &stats->tstorm_common.port_statistics;
4008         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
4009         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4010         int i;
4011
4012         memcpy(&(fstats->total_bytes_received_hi),
4013                &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
4014                sizeof(struct host_func_stats) - 2*sizeof(u32));
4015         estats->error_bytes_received_hi = 0;
4016         estats->error_bytes_received_lo = 0;
4017         estats->etherstatsoverrsizepkts_hi = 0;
4018         estats->etherstatsoverrsizepkts_lo = 0;
4019         estats->no_buff_discard_hi = 0;
4020         estats->no_buff_discard_lo = 0;
4021
4022         for_each_rx_queue(bp, i) {
4023                 struct bnx2x_fastpath *fp = &bp->fp[i];
4024                 int cl_id = fp->cl_id;
4025                 struct tstorm_per_client_stats *tclient =
4026                                 &stats->tstorm_common.client_statistics[cl_id];
4027                 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4028                 struct ustorm_per_client_stats *uclient =
4029                                 &stats->ustorm_common.client_statistics[cl_id];
4030                 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4031                 struct xstorm_per_client_stats *xclient =
4032                                 &stats->xstorm_common.client_statistics[cl_id];
4033                 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4034                 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4035                 u32 diff;
4036
4037                 /* are storm stats valid? */
4038                 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4039                                                         bp->stats_counter) {
4040                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4041                            "  xstorm counter (%d) != stats_counter (%d)\n",
4042                            i, xclient->stats_counter, bp->stats_counter);
4043                         return -1;
4044                 }
4045                 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4046                                                         bp->stats_counter) {
4047                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4048                            "  tstorm counter (%d) != stats_counter (%d)\n",
4049                            i, tclient->stats_counter, bp->stats_counter);
4050                         return -2;
4051                 }
4052                 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4053                                                         bp->stats_counter) {
4054                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4055                            "  ustorm counter (%d) != stats_counter (%d)\n",
4056                            i, uclient->stats_counter, bp->stats_counter);
4057                         return -4;
4058                 }
4059
4060                 qstats->total_bytes_received_hi =
4061                         le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4062                 qstats->total_bytes_received_lo =
4063                         le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4064
4065                 ADD_64(qstats->total_bytes_received_hi,
4066                        le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4067                        qstats->total_bytes_received_lo,
4068                        le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4069
4070                 ADD_64(qstats->total_bytes_received_hi,
4071                        le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4072                        qstats->total_bytes_received_lo,
4073                        le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4074
4075                 qstats->valid_bytes_received_hi =
4076                                         qstats->total_bytes_received_hi;
4077                 qstats->valid_bytes_received_lo =
4078                                         qstats->total_bytes_received_lo;
4079
4080                 qstats->error_bytes_received_hi =
4081                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
4082                 qstats->error_bytes_received_lo =
4083                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
4084
4085                 ADD_64(qstats->total_bytes_received_hi,
4086                        qstats->error_bytes_received_hi,
4087                        qstats->total_bytes_received_lo,
4088                        qstats->error_bytes_received_lo);
4089
4090                 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4091                                         total_unicast_packets_received);
4092                 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4093                                         total_multicast_packets_received);
4094                 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4095                                         total_broadcast_packets_received);
4096                 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4097                                         etherstatsoverrsizepkts);
4098                 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4099
4100                 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4101                                         total_unicast_packets_received);
4102                 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4103                                         total_multicast_packets_received);
4104                 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4105                                         total_broadcast_packets_received);
4106                 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4107                 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4108                 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4109
4110                 qstats->total_bytes_transmitted_hi =
4111                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4112                 qstats->total_bytes_transmitted_lo =
4113                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4114
4115                 ADD_64(qstats->total_bytes_transmitted_hi,
4116                        le32_to_cpu(xclient->multicast_bytes_sent.hi),
4117                        qstats->total_bytes_transmitted_lo,
4118                        le32_to_cpu(xclient->multicast_bytes_sent.lo));
4119
4120                 ADD_64(qstats->total_bytes_transmitted_hi,
4121                        le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4122                        qstats->total_bytes_transmitted_lo,
4123                        le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4124
4125                 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4126                                         total_unicast_packets_transmitted);
4127                 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4128                                         total_multicast_packets_transmitted);
4129                 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4130                                         total_broadcast_packets_transmitted);
4131
4132                 old_tclient->checksum_discard = tclient->checksum_discard;
4133                 old_tclient->ttl0_discard = tclient->ttl0_discard;
4134
4135                 ADD_64(fstats->total_bytes_received_hi,
4136                        qstats->total_bytes_received_hi,
4137                        fstats->total_bytes_received_lo,
4138                        qstats->total_bytes_received_lo);
4139                 ADD_64(fstats->total_bytes_transmitted_hi,
4140                        qstats->total_bytes_transmitted_hi,
4141                        fstats->total_bytes_transmitted_lo,
4142                        qstats->total_bytes_transmitted_lo);
4143                 ADD_64(fstats->total_unicast_packets_received_hi,
4144                        qstats->total_unicast_packets_received_hi,
4145                        fstats->total_unicast_packets_received_lo,
4146                        qstats->total_unicast_packets_received_lo);
4147                 ADD_64(fstats->total_multicast_packets_received_hi,
4148                        qstats->total_multicast_packets_received_hi,
4149                        fstats->total_multicast_packets_received_lo,
4150                        qstats->total_multicast_packets_received_lo);
4151                 ADD_64(fstats->total_broadcast_packets_received_hi,
4152                        qstats->total_broadcast_packets_received_hi,
4153                        fstats->total_broadcast_packets_received_lo,
4154                        qstats->total_broadcast_packets_received_lo);
4155                 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4156                        qstats->total_unicast_packets_transmitted_hi,
4157                        fstats->total_unicast_packets_transmitted_lo,
4158                        qstats->total_unicast_packets_transmitted_lo);
4159                 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4160                        qstats->total_multicast_packets_transmitted_hi,
4161                        fstats->total_multicast_packets_transmitted_lo,
4162                        qstats->total_multicast_packets_transmitted_lo);
4163                 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4164                        qstats->total_broadcast_packets_transmitted_hi,
4165                        fstats->total_broadcast_packets_transmitted_lo,
4166                        qstats->total_broadcast_packets_transmitted_lo);
4167                 ADD_64(fstats->valid_bytes_received_hi,
4168                        qstats->valid_bytes_received_hi,
4169                        fstats->valid_bytes_received_lo,
4170                        qstats->valid_bytes_received_lo);
4171
4172                 ADD_64(estats->error_bytes_received_hi,
4173                        qstats->error_bytes_received_hi,
4174                        estats->error_bytes_received_lo,
4175                        qstats->error_bytes_received_lo);
4176                 ADD_64(estats->etherstatsoverrsizepkts_hi,
4177                        qstats->etherstatsoverrsizepkts_hi,
4178                        estats->etherstatsoverrsizepkts_lo,
4179                        qstats->etherstatsoverrsizepkts_lo);
4180                 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4181                        estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4182         }
4183
4184         ADD_64(fstats->total_bytes_received_hi,
4185                estats->rx_stat_ifhcinbadoctets_hi,
4186                fstats->total_bytes_received_lo,
4187                estats->rx_stat_ifhcinbadoctets_lo);
4188
4189         memcpy(estats, &(fstats->total_bytes_received_hi),
4190                sizeof(struct host_func_stats) - 2*sizeof(u32));
4191
4192         ADD_64(estats->etherstatsoverrsizepkts_hi,
4193                estats->rx_stat_dot3statsframestoolong_hi,
4194                estats->etherstatsoverrsizepkts_lo,
4195                estats->rx_stat_dot3statsframestoolong_lo);
4196         ADD_64(estats->error_bytes_received_hi,
4197                estats->rx_stat_ifhcinbadoctets_hi,
4198                estats->error_bytes_received_lo,
4199                estats->rx_stat_ifhcinbadoctets_lo);
4200
4201         if (bp->port.pmf) {
4202                 estats->mac_filter_discard =
4203                                 le32_to_cpu(tport->mac_filter_discard);
4204                 estats->xxoverflow_discard =
4205                                 le32_to_cpu(tport->xxoverflow_discard);
4206                 estats->brb_truncate_discard =
4207                                 le32_to_cpu(tport->brb_truncate_discard);
4208                 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4209         }
4210
4211         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4212
4213         bp->stats_pending = 0;
4214
4215         return 0;
4216 }
4217
4218 static void bnx2x_net_stats_update(struct bnx2x *bp)
4219 {
4220         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4221         struct net_device_stats *nstats = &bp->dev->stats;
4222         int i;
4223
4224         nstats->rx_packets =
4225                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4226                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4227                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4228
4229         nstats->tx_packets =
4230                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4231                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4232                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4233
4234         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4235
4236         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4237
4238         nstats->rx_dropped = estats->mac_discard;
4239         for_each_rx_queue(bp, i)
4240                 nstats->rx_dropped +=
4241                         le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4242
4243         nstats->tx_dropped = 0;
4244
4245         nstats->multicast =
4246                 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4247
4248         nstats->collisions =
4249                 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4250
4251         nstats->rx_length_errors =
4252                 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4253                 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4254         nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4255                                  bnx2x_hilo(&estats->brb_truncate_hi);
4256         nstats->rx_crc_errors =
4257                 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4258         nstats->rx_frame_errors =
4259                 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4260         nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4261         nstats->rx_missed_errors = estats->xxoverflow_discard;
4262
4263         nstats->rx_errors = nstats->rx_length_errors +
4264                             nstats->rx_over_errors +
4265                             nstats->rx_crc_errors +
4266                             nstats->rx_frame_errors +
4267                             nstats->rx_fifo_errors +
4268                             nstats->rx_missed_errors;
4269
4270         nstats->tx_aborted_errors =
4271                 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4272                 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4273         nstats->tx_carrier_errors =
4274                 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4275         nstats->tx_fifo_errors = 0;
4276         nstats->tx_heartbeat_errors = 0;
4277         nstats->tx_window_errors = 0;
4278
4279         nstats->tx_errors = nstats->tx_aborted_errors +
4280                             nstats->tx_carrier_errors +
4281             bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4282 }
4283
4284 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4285 {
4286         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4287         int i;
4288
4289         estats->driver_xoff = 0;
4290         estats->rx_err_discard_pkt = 0;
4291         estats->rx_skb_alloc_failed = 0;
4292         estats->hw_csum_err = 0;
4293         for_each_rx_queue(bp, i) {
4294                 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4295
4296                 estats->driver_xoff += qstats->driver_xoff;
4297                 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4298                 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4299                 estats->hw_csum_err += qstats->hw_csum_err;
4300         }
4301 }
4302
4303 static void bnx2x_stats_update(struct bnx2x *bp)
4304 {
4305         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4306
4307         if (*stats_comp != DMAE_COMP_VAL)
4308                 return;
4309
4310         if (bp->port.pmf)
4311                 bnx2x_hw_stats_update(bp);
4312
4313         if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4314                 BNX2X_ERR("storm stats were not updated for 3 times\n");
4315                 bnx2x_panic();
4316                 return;
4317         }
4318
4319         bnx2x_net_stats_update(bp);
4320         bnx2x_drv_stats_update(bp);
4321
4322         if (bp->msglevel & NETIF_MSG_TIMER) {
4323                 struct bnx2x_fastpath *fp0_rx = bp->fp;
4324                 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
4325                 struct tstorm_per_client_stats *old_tclient =
4326                                                         &bp->fp->old_tclient;
4327                 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4328                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4329                 struct net_device_stats *nstats = &bp->dev->stats;
4330                 int i;
4331
4332                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4333                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
4334                                   "  tx pkt (%lx)\n",
4335                        bnx2x_tx_avail(fp0_tx),
4336                        le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
4337                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
4338                                   "  rx pkt (%lx)\n",
4339                        (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4340                              fp0_rx->rx_comp_cons),
4341                        le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
4342                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u  "
4343                                   "brb truncate %u\n",
4344                        (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4345                        qstats->driver_xoff,
4346                        estats->brb_drop_lo, estats->brb_truncate_lo);
4347                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
4348                         "packets_too_big_discard %lu  no_buff_discard %lu  "
4349                         "mac_discard %u  mac_filter_discard %u  "
4350                         "xxovrflow_discard %u  brb_truncate_discard %u  "
4351                         "ttl0_discard %u\n",
4352                        le32_to_cpu(old_tclient->checksum_discard),
4353                        bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4354                        bnx2x_hilo(&qstats->no_buff_discard_hi),
4355                        estats->mac_discard, estats->mac_filter_discard,
4356                        estats->xxoverflow_discard, estats->brb_truncate_discard,
4357                        le32_to_cpu(old_tclient->ttl0_discard));
4358
4359                 for_each_queue(bp, i) {
4360                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4361                                bnx2x_fp(bp, i, tx_pkt),
4362                                bnx2x_fp(bp, i, rx_pkt),
4363                                bnx2x_fp(bp, i, rx_calls));
4364                 }
4365         }
4366
4367         bnx2x_hw_stats_post(bp);
4368         bnx2x_storm_stats_post(bp);
4369 }
4370
4371 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4372 {
4373         struct dmae_command *dmae;
4374         u32 opcode;
4375         int loader_idx = PMF_DMAE_C(bp);
4376         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4377
4378         bp->executer_idx = 0;
4379
4380         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4381                   DMAE_CMD_C_ENABLE |
4382                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4383 #ifdef __BIG_ENDIAN
4384                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4385 #else
4386                   DMAE_CMD_ENDIANITY_DW_SWAP |
4387 #endif
4388                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4389                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4390
4391         if (bp->port.port_stx) {
4392
4393                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4394                 if (bp->func_stx)
4395                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4396                 else
4397                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4398                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4399                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4400                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4401                 dmae->dst_addr_hi = 0;
4402                 dmae->len = sizeof(struct host_port_stats) >> 2;
4403                 if (bp->func_stx) {
4404                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4405                         dmae->comp_addr_hi = 0;
4406                         dmae->comp_val = 1;
4407                 } else {
4408                         dmae->comp_addr_lo =
4409                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4410                         dmae->comp_addr_hi =
4411                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4412                         dmae->comp_val = DMAE_COMP_VAL;
4413
4414                         *stats_comp = 0;
4415                 }
4416         }
4417
4418         if (bp->func_stx) {
4419
4420                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4421                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4422                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4423                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4424                 dmae->dst_addr_lo = bp->func_stx >> 2;
4425                 dmae->dst_addr_hi = 0;
4426                 dmae->len = sizeof(struct host_func_stats) >> 2;
4427                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4428                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4429                 dmae->comp_val = DMAE_COMP_VAL;
4430
4431                 *stats_comp = 0;
4432         }
4433 }
4434
4435 static void bnx2x_stats_stop(struct bnx2x *bp)
4436 {
4437         int update = 0;
4438
4439         bnx2x_stats_comp(bp);
4440
4441         if (bp->port.pmf)
4442                 update = (bnx2x_hw_stats_update(bp) == 0);
4443
4444         update |= (bnx2x_storm_stats_update(bp) == 0);
4445
4446         if (update) {
4447                 bnx2x_net_stats_update(bp);
4448
4449                 if (bp->port.pmf)
4450                         bnx2x_port_stats_stop(bp);
4451
4452                 bnx2x_hw_stats_post(bp);
4453                 bnx2x_stats_comp(bp);
4454         }
4455 }
4456
4457 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4458 {
4459 }
4460
4461 static const struct {
4462         void (*action)(struct bnx2x *bp);
4463         enum bnx2x_stats_state next_state;
4464 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4465 /* state        event   */
4466 {
4467 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4468 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
4469 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4470 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4471 },
4472 {
4473 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
4474 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
4475 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
4476 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
4477 }
4478 };
4479
4480 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4481 {
4482         enum bnx2x_stats_state state = bp->stats_state;
4483
4484         bnx2x_stats_stm[state][event].action(bp);
4485         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4486
4487         /* Make sure the state has been "changed" */
4488         smp_wmb();
4489
4490         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4491                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4492                    state, event, bp->stats_state);
4493 }
4494
4495 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4496 {
4497         struct dmae_command *dmae;
4498         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4499
4500         /* sanity */
4501         if (!bp->port.pmf || !bp->port.port_stx) {
4502                 BNX2X_ERR("BUG!\n");
4503                 return;
4504         }
4505
4506         bp->executer_idx = 0;
4507
4508         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4509         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4510                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4511                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4512 #ifdef __BIG_ENDIAN
4513                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4514 #else
4515                         DMAE_CMD_ENDIANITY_DW_SWAP |
4516 #endif
4517                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4518                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4519         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4520         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4521         dmae->dst_addr_lo = bp->port.port_stx >> 2;
4522         dmae->dst_addr_hi = 0;
4523         dmae->len = sizeof(struct host_port_stats) >> 2;
4524         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4525         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4526         dmae->comp_val = DMAE_COMP_VAL;
4527
4528         *stats_comp = 0;
4529         bnx2x_hw_stats_post(bp);
4530         bnx2x_stats_comp(bp);
4531 }
4532
4533 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4534 {
4535         int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4536         int port = BP_PORT(bp);
4537         int func;
4538         u32 func_stx;
4539
4540         /* sanity */
4541         if (!bp->port.pmf || !bp->func_stx) {
4542                 BNX2X_ERR("BUG!\n");
4543                 return;
4544         }
4545
4546         /* save our func_stx */
4547         func_stx = bp->func_stx;
4548
4549         for (vn = VN_0; vn < vn_max; vn++) {
4550                 func = 2*vn + port;
4551
4552                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4553                 bnx2x_func_stats_init(bp);
4554                 bnx2x_hw_stats_post(bp);
4555                 bnx2x_stats_comp(bp);
4556         }
4557
4558         /* restore our func_stx */
4559         bp->func_stx = func_stx;
4560 }
4561
4562 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4563 {
4564         struct dmae_command *dmae = &bp->stats_dmae;
4565         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4566
4567         /* sanity */
4568         if (!bp->func_stx) {
4569                 BNX2X_ERR("BUG!\n");
4570                 return;
4571         }
4572
4573         bp->executer_idx = 0;
4574         memset(dmae, 0, sizeof(struct dmae_command));
4575
4576         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4577                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4578                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4579 #ifdef __BIG_ENDIAN
4580                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4581 #else
4582                         DMAE_CMD_ENDIANITY_DW_SWAP |
4583 #endif
4584                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4585                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4586         dmae->src_addr_lo = bp->func_stx >> 2;
4587         dmae->src_addr_hi = 0;
4588         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4589         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4590         dmae->len = sizeof(struct host_func_stats) >> 2;
4591         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4592         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4593         dmae->comp_val = DMAE_COMP_VAL;
4594
4595         *stats_comp = 0;
4596         bnx2x_hw_stats_post(bp);
4597         bnx2x_stats_comp(bp);
4598 }
4599
4600 static void bnx2x_stats_init(struct bnx2x *bp)
4601 {
4602         int port = BP_PORT(bp);
4603         int func = BP_FUNC(bp);
4604         int i;
4605
4606         bp->stats_pending = 0;
4607         bp->executer_idx = 0;
4608         bp->stats_counter = 0;
4609
4610         /* port and func stats for management */
4611         if (!BP_NOMCP(bp)) {
4612                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4613                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4614
4615         } else {
4616                 bp->port.port_stx = 0;
4617                 bp->func_stx = 0;
4618         }
4619         DP(BNX2X_MSG_STATS, "port_stx 0x%x  func_stx 0x%x\n",
4620            bp->port.port_stx, bp->func_stx);
4621
4622         /* port stats */
4623         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4624         bp->port.old_nig_stats.brb_discard =
4625                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4626         bp->port.old_nig_stats.brb_truncate =
4627                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4628         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4629                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4630         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4631                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4632
4633         /* function stats */
4634         for_each_queue(bp, i) {
4635                 struct bnx2x_fastpath *fp = &bp->fp[i];
4636
4637                 memset(&fp->old_tclient, 0,
4638                        sizeof(struct tstorm_per_client_stats));
4639                 memset(&fp->old_uclient, 0,
4640                        sizeof(struct ustorm_per_client_stats));
4641                 memset(&fp->old_xclient, 0,
4642                        sizeof(struct xstorm_per_client_stats));
4643                 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4644         }
4645
4646         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4647         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4648
4649         bp->stats_state = STATS_STATE_DISABLED;
4650
4651         if (bp->port.pmf) {
4652                 if (bp->port.port_stx)
4653                         bnx2x_port_stats_base_init(bp);
4654
4655                 if (bp->func_stx)
4656                         bnx2x_func_stats_base_init(bp);
4657
4658         } else if (bp->func_stx)
4659                 bnx2x_func_stats_base_update(bp);
4660 }
4661
4662 static void bnx2x_timer(unsigned long data)
4663 {
4664         struct bnx2x *bp = (struct bnx2x *) data;
4665
4666         if (!netif_running(bp->dev))
4667                 return;
4668
4669         if (atomic_read(&bp->intr_sem) != 0)
4670                 goto timer_restart;
4671
4672         if (poll) {
4673                 struct bnx2x_fastpath *fp = &bp->fp[0];
4674                 int rc;
4675
4676                 bnx2x_tx_int(fp);
4677                 rc = bnx2x_rx_int(fp, 1000);
4678         }
4679
4680         if (!BP_NOMCP(bp)) {
4681                 int func = BP_FUNC(bp);
4682                 u32 drv_pulse;
4683                 u32 mcp_pulse;
4684
4685                 ++bp->fw_drv_pulse_wr_seq;
4686                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4687                 /* TBD - add SYSTEM_TIME */
4688                 drv_pulse = bp->fw_drv_pulse_wr_seq;
4689                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4690
4691                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4692                              MCP_PULSE_SEQ_MASK);
4693                 /* The delta between driver pulse and mcp response
4694                  * should be 1 (before mcp response) or 0 (after mcp response)
4695                  */
4696                 if ((drv_pulse != mcp_pulse) &&
4697                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4698                         /* someone lost a heartbeat... */
4699                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4700                                   drv_pulse, mcp_pulse);
4701                 }
4702         }
4703
4704         if (bp->state == BNX2X_STATE_OPEN)
4705                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4706
4707 timer_restart:
4708         mod_timer(&bp->timer, jiffies + bp->current_interval);
4709 }
4710
4711 /* end of Statistics */
4712
4713 /* nic init */
4714
4715 /*
4716  * nic init service functions
4717  */
4718
4719 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4720 {
4721         int port = BP_PORT(bp);
4722
4723         /* "CSTORM" */
4724         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4725                         CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4726                         CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4727         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4728                         CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4729                         CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
4730 }
4731
4732 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4733                           dma_addr_t mapping, int sb_id)
4734 {
4735         int port = BP_PORT(bp);
4736         int func = BP_FUNC(bp);
4737         int index;
4738         u64 section;
4739
4740         /* USTORM */
4741         section = ((u64)mapping) + offsetof(struct host_status_block,
4742                                             u_status_block);
4743         sb->u_status_block.status_block_id = sb_id;
4744
4745         REG_WR(bp, BAR_CSTRORM_INTMEM +
4746                CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4747         REG_WR(bp, BAR_CSTRORM_INTMEM +
4748                ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
4749                U64_HI(section));
4750         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4751                 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
4752
4753         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4754                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4755                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
4756
4757         /* CSTORM */
4758         section = ((u64)mapping) + offsetof(struct host_status_block,
4759                                             c_status_block);
4760         sb->c_status_block.status_block_id = sb_id;
4761
4762         REG_WR(bp, BAR_CSTRORM_INTMEM +
4763                CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
4764         REG_WR(bp, BAR_CSTRORM_INTMEM +
4765                ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
4766                U64_HI(section));
4767         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4768                 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
4769
4770         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4771                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4772                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
4773
4774         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4775 }
4776
4777 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4778 {
4779         int func = BP_FUNC(bp);
4780
4781         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
4782                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4783                         sizeof(struct tstorm_def_status_block)/4);
4784         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4785                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4786                         sizeof(struct cstorm_def_status_block_u)/4);
4787         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4788                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4789                         sizeof(struct cstorm_def_status_block_c)/4);
4790         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
4791                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4792                         sizeof(struct xstorm_def_status_block)/4);
4793 }
4794
4795 static void bnx2x_init_def_sb(struct bnx2x *bp,
4796                               struct host_def_status_block *def_sb,
4797                               dma_addr_t mapping, int sb_id)
4798 {
4799         int port = BP_PORT(bp);
4800         int func = BP_FUNC(bp);
4801         int index, val, reg_offset;
4802         u64 section;
4803
4804         /* ATTN */
4805         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4806                                             atten_status_block);
4807         def_sb->atten_status_block.status_block_id = sb_id;
4808
4809         bp->attn_state = 0;
4810
4811         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4812                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4813
4814         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4815                 bp->attn_group[index].sig[0] = REG_RD(bp,
4816                                                      reg_offset + 0x10*index);
4817                 bp->attn_group[index].sig[1] = REG_RD(bp,
4818                                                reg_offset + 0x4 + 0x10*index);
4819                 bp->attn_group[index].sig[2] = REG_RD(bp,
4820                                                reg_offset + 0x8 + 0x10*index);
4821                 bp->attn_group[index].sig[3] = REG_RD(bp,
4822                                                reg_offset + 0xc + 0x10*index);
4823         }
4824
4825         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4826                              HC_REG_ATTN_MSG0_ADDR_L);
4827
4828         REG_WR(bp, reg_offset, U64_LO(section));
4829         REG_WR(bp, reg_offset + 4, U64_HI(section));
4830
4831         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4832
4833         val = REG_RD(bp, reg_offset);
4834         val |= sb_id;
4835         REG_WR(bp, reg_offset, val);
4836
4837         /* USTORM */
4838         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4839                                             u_def_status_block);
4840         def_sb->u_def_status_block.status_block_id = sb_id;
4841
4842         REG_WR(bp, BAR_CSTRORM_INTMEM +
4843                CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4844         REG_WR(bp, BAR_CSTRORM_INTMEM +
4845                ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
4846                U64_HI(section));
4847         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4848                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
4849
4850         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4851                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4852                          CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
4853
4854         /* CSTORM */
4855         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4856                                             c_def_status_block);
4857         def_sb->c_def_status_block.status_block_id = sb_id;
4858
4859         REG_WR(bp, BAR_CSTRORM_INTMEM +
4860                CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
4861         REG_WR(bp, BAR_CSTRORM_INTMEM +
4862                ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
4863                U64_HI(section));
4864         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4865                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
4866
4867         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4868                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4869                          CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
4870
4871         /* TSTORM */
4872         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4873                                             t_def_status_block);
4874         def_sb->t_def_status_block.status_block_id = sb_id;
4875
4876         REG_WR(bp, BAR_TSTRORM_INTMEM +
4877                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4878         REG_WR(bp, BAR_TSTRORM_INTMEM +
4879                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4880                U64_HI(section));
4881         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4882                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4883
4884         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4885                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4886                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4887
4888         /* XSTORM */
4889         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4890                                             x_def_status_block);
4891         def_sb->x_def_status_block.status_block_id = sb_id;
4892
4893         REG_WR(bp, BAR_XSTRORM_INTMEM +
4894                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4895         REG_WR(bp, BAR_XSTRORM_INTMEM +
4896                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4897                U64_HI(section));
4898         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4899                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4900
4901         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4902                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4903                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4904
4905         bp->stats_pending = 0;
4906         bp->set_mac_pending = 0;
4907
4908         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4909 }
4910
4911 static void bnx2x_update_coalesce(struct bnx2x *bp)
4912 {
4913         int port = BP_PORT(bp);
4914         int i;
4915
4916         for_each_queue(bp, i) {
4917                 int sb_id = bp->fp[i].sb_id;
4918
4919                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4920                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4921                         CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4922                                                       U_SB_ETH_RX_CQ_INDEX),
4923                         bp->rx_ticks/12);
4924                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4925                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4926                                                        U_SB_ETH_RX_CQ_INDEX),
4927                          (bp->rx_ticks/12) ? 0 : 1);
4928
4929                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4930                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4931                         CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4932                                                       C_SB_ETH_TX_CQ_INDEX),
4933                         bp->tx_ticks/12);
4934                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4935                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4936                                                        C_SB_ETH_TX_CQ_INDEX),
4937                          (bp->tx_ticks/12) ? 0 : 1);
4938         }
4939 }
4940
4941 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4942                                        struct bnx2x_fastpath *fp, int last)
4943 {
4944         int i;
4945
4946         for (i = 0; i < last; i++) {
4947                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4948                 struct sk_buff *skb = rx_buf->skb;
4949
4950                 if (skb == NULL) {
4951                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4952                         continue;
4953                 }
4954
4955                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4956                         pci_unmap_single(bp->pdev,
4957                                          pci_unmap_addr(rx_buf, mapping),
4958                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4959
4960                 dev_kfree_skb(skb);
4961                 rx_buf->skb = NULL;
4962         }
4963 }
4964
4965 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4966 {
4967         int func = BP_FUNC(bp);
4968         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4969                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4970         u16 ring_prod, cqe_ring_prod;
4971         int i, j;
4972
4973         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4974         DP(NETIF_MSG_IFUP,
4975            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4976
4977         if (bp->flags & TPA_ENABLE_FLAG) {
4978
4979                 for_each_rx_queue(bp, j) {
4980                         struct bnx2x_fastpath *fp = &bp->fp[j];
4981
4982                         for (i = 0; i < max_agg_queues; i++) {
4983                                 fp->tpa_pool[i].skb =
4984                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4985                                 if (!fp->tpa_pool[i].skb) {
4986                                         BNX2X_ERR("Failed to allocate TPA "
4987                                                   "skb pool for queue[%d] - "
4988                                                   "disabling TPA on this "
4989                                                   "queue!\n", j);
4990                                         bnx2x_free_tpa_pool(bp, fp, i);
4991                                         fp->disable_tpa = 1;
4992                                         break;
4993                                 }
4994                                 pci_unmap_addr_set((struct sw_rx_bd *)
4995                                                         &bp->fp->tpa_pool[i],
4996                                                    mapping, 0);
4997                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4998                         }
4999                 }
5000         }
5001
5002         for_each_rx_queue(bp, j) {
5003                 struct bnx2x_fastpath *fp = &bp->fp[j];
5004
5005                 fp->rx_bd_cons = 0;
5006                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5007                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
5008
5009                 /* Mark queue as Rx */
5010                 fp->is_rx_queue = 1;
5011
5012                 /* "next page" elements initialization */
5013                 /* SGE ring */
5014                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5015                         struct eth_rx_sge *sge;
5016
5017                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5018                         sge->addr_hi =
5019                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5020                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5021                         sge->addr_lo =
5022                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5023                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5024                 }
5025
5026                 bnx2x_init_sge_ring_bit_mask(fp);
5027
5028                 /* RX BD ring */
5029                 for (i = 1; i <= NUM_RX_RINGS; i++) {
5030                         struct eth_rx_bd *rx_bd;
5031
5032                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5033                         rx_bd->addr_hi =
5034                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5035                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5036                         rx_bd->addr_lo =
5037                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5038                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5039                 }
5040
5041                 /* CQ ring */
5042                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5043                         struct eth_rx_cqe_next_page *nextpg;
5044
5045                         nextpg = (struct eth_rx_cqe_next_page *)
5046                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5047                         nextpg->addr_hi =
5048                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5049                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5050                         nextpg->addr_lo =
5051                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5052                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5053                 }
5054
5055                 /* Allocate SGEs and initialize the ring elements */
5056                 for (i = 0, ring_prod = 0;
5057                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
5058
5059                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5060                                 BNX2X_ERR("was only able to allocate "
5061                                           "%d rx sges\n", i);
5062                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5063                                 /* Cleanup already allocated elements */
5064                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
5065                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
5066                                 fp->disable_tpa = 1;
5067                                 ring_prod = 0;
5068                                 break;
5069                         }
5070                         ring_prod = NEXT_SGE_IDX(ring_prod);
5071                 }
5072                 fp->rx_sge_prod = ring_prod;
5073
5074                 /* Allocate BDs and initialize BD ring */
5075                 fp->rx_comp_cons = 0;
5076                 cqe_ring_prod = ring_prod = 0;
5077                 for (i = 0; i < bp->rx_ring_size; i++) {
5078                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5079                                 BNX2X_ERR("was only able to allocate "
5080                                           "%d rx skbs on queue[%d]\n", i, j);
5081                                 fp->eth_q_stats.rx_skb_alloc_failed++;
5082                                 break;
5083                         }
5084                         ring_prod = NEXT_RX_IDX(ring_prod);
5085                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5086                         WARN_ON(ring_prod <= i);
5087                 }
5088
5089                 fp->rx_bd_prod = ring_prod;
5090                 /* must not have more available CQEs than BDs */
5091                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5092                                        cqe_ring_prod);
5093                 fp->rx_pkt = fp->rx_calls = 0;
5094
5095                 /* Warning!
5096                  * this will generate an interrupt (to the TSTORM)
5097                  * must only be done after chip is initialized
5098                  */
5099                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5100                                      fp->rx_sge_prod);
5101                 if (j != 0)
5102                         continue;
5103
5104                 REG_WR(bp, BAR_USTRORM_INTMEM +
5105                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5106                        U64_LO(fp->rx_comp_mapping));
5107                 REG_WR(bp, BAR_USTRORM_INTMEM +
5108                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5109                        U64_HI(fp->rx_comp_mapping));
5110         }
5111 }
5112
5113 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5114 {
5115         int i, j;
5116
5117         for_each_tx_queue(bp, j) {
5118                 struct bnx2x_fastpath *fp = &bp->fp[j];
5119
5120                 for (i = 1; i <= NUM_TX_RINGS; i++) {
5121                         struct eth_tx_next_bd *tx_next_bd =
5122                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5123
5124                         tx_next_bd->addr_hi =
5125                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5126                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5127                         tx_next_bd->addr_lo =
5128                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5129                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5130                 }
5131
5132                 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5133                 fp->tx_db.data.zero_fill1 = 0;
5134                 fp->tx_db.data.prod = 0;
5135
5136                 fp->tx_pkt_prod = 0;
5137                 fp->tx_pkt_cons = 0;
5138                 fp->tx_bd_prod = 0;
5139                 fp->tx_bd_cons = 0;
5140                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5141                 fp->tx_pkt = 0;
5142         }
5143
5144         /* clean tx statistics */
5145         for_each_rx_queue(bp, i)
5146                 bnx2x_fp(bp, i, tx_pkt) = 0;
5147 }
5148
5149 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5150 {
5151         int func = BP_FUNC(bp);
5152
5153         spin_lock_init(&bp->spq_lock);
5154
5155         bp->spq_left = MAX_SPQ_PENDING;
5156         bp->spq_prod_idx = 0;
5157         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5158         bp->spq_prod_bd = bp->spq;
5159         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5160
5161         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5162                U64_LO(bp->spq_mapping));
5163         REG_WR(bp,
5164                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5165                U64_HI(bp->spq_mapping));
5166
5167         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5168                bp->spq_prod_idx);
5169 }
5170
5171 static void bnx2x_init_context(struct bnx2x *bp)
5172 {
5173         int i;
5174
5175         for_each_rx_queue(bp, i) {
5176                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5177                 struct bnx2x_fastpath *fp = &bp->fp[i];
5178                 u8 cl_id = fp->cl_id;
5179
5180                 context->ustorm_st_context.common.sb_index_numbers =
5181                                                 BNX2X_RX_SB_INDEX_NUM;
5182                 context->ustorm_st_context.common.clientId = cl_id;
5183                 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5184                 context->ustorm_st_context.common.flags =
5185                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5186                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5187                 context->ustorm_st_context.common.statistics_counter_id =
5188                                                 cl_id;
5189                 context->ustorm_st_context.common.mc_alignment_log_size =
5190                                                 BNX2X_RX_ALIGN_SHIFT;
5191                 context->ustorm_st_context.common.bd_buff_size =
5192                                                 bp->rx_buf_size;
5193                 context->ustorm_st_context.common.bd_page_base_hi =
5194                                                 U64_HI(fp->rx_desc_mapping);
5195                 context->ustorm_st_context.common.bd_page_base_lo =
5196                                                 U64_LO(fp->rx_desc_mapping);
5197                 if (!fp->disable_tpa) {
5198                         context->ustorm_st_context.common.flags |=
5199                                 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5200                         context->ustorm_st_context.common.sge_buff_size =
5201                                 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5202                                          (u32)0xffff);
5203                         context->ustorm_st_context.common.sge_page_base_hi =
5204                                                 U64_HI(fp->rx_sge_mapping);
5205                         context->ustorm_st_context.common.sge_page_base_lo =
5206                                                 U64_LO(fp->rx_sge_mapping);
5207
5208                         context->ustorm_st_context.common.max_sges_for_packet =
5209                                 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5210                         context->ustorm_st_context.common.max_sges_for_packet =
5211                                 ((context->ustorm_st_context.common.
5212                                   max_sges_for_packet + PAGES_PER_SGE - 1) &
5213                                  (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5214                 }
5215
5216                 context->ustorm_ag_context.cdu_usage =
5217                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5218                                                CDU_REGION_NUMBER_UCM_AG,
5219                                                ETH_CONNECTION_TYPE);
5220
5221                 context->xstorm_ag_context.cdu_reserved =
5222                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5223                                                CDU_REGION_NUMBER_XCM_AG,
5224                                                ETH_CONNECTION_TYPE);
5225         }
5226
5227         for_each_tx_queue(bp, i) {
5228                 struct bnx2x_fastpath *fp = &bp->fp[i];
5229                 struct eth_context *context =
5230                         bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
5231
5232                 context->cstorm_st_context.sb_index_number =
5233                                                 C_SB_ETH_TX_CQ_INDEX;
5234                 context->cstorm_st_context.status_block_id = fp->sb_id;
5235
5236                 context->xstorm_st_context.tx_bd_page_base_hi =
5237                                                 U64_HI(fp->tx_desc_mapping);
5238                 context->xstorm_st_context.tx_bd_page_base_lo =
5239                                                 U64_LO(fp->tx_desc_mapping);
5240                 context->xstorm_st_context.statistics_data = (fp->cl_id |
5241                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5242         }
5243 }
5244
5245 static void bnx2x_init_ind_table(struct bnx2x *bp)
5246 {
5247         int func = BP_FUNC(bp);
5248         int i;
5249
5250         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5251                 return;
5252
5253         DP(NETIF_MSG_IFUP,
5254            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
5255         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5256                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5257                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5258                         bp->fp->cl_id + (i % bp->num_rx_queues));
5259 }
5260
5261 static void bnx2x_set_client_config(struct bnx2x *bp)
5262 {
5263         struct tstorm_eth_client_config tstorm_client = {0};
5264         int port = BP_PORT(bp);
5265         int i;
5266
5267         tstorm_client.mtu = bp->dev->mtu;
5268         tstorm_client.config_flags =
5269                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5270                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5271 #ifdef BCM_VLAN
5272         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5273                 tstorm_client.config_flags |=
5274                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5275                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5276         }
5277 #endif
5278
5279         for_each_queue(bp, i) {
5280                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5281
5282                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5283                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5284                        ((u32 *)&tstorm_client)[0]);
5285                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5286                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5287                        ((u32 *)&tstorm_client)[1]);
5288         }
5289
5290         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5291            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5292 }
5293
5294 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5295 {
5296         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5297         int mode = bp->rx_mode;
5298         int mask = bp->rx_mode_cl_mask;
5299         int func = BP_FUNC(bp);
5300         int port = BP_PORT(bp);
5301         int i;
5302         /* All but management unicast packets should pass to the host as well */
5303         u32 llh_mask =
5304                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5305                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5306                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5307                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5308
5309         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
5310
5311         switch (mode) {
5312         case BNX2X_RX_MODE_NONE: /* no Rx */
5313                 tstorm_mac_filter.ucast_drop_all = mask;
5314                 tstorm_mac_filter.mcast_drop_all = mask;
5315                 tstorm_mac_filter.bcast_drop_all = mask;
5316                 break;
5317
5318         case BNX2X_RX_MODE_NORMAL:
5319                 tstorm_mac_filter.bcast_accept_all = mask;
5320                 break;
5321
5322         case BNX2X_RX_MODE_ALLMULTI:
5323                 tstorm_mac_filter.mcast_accept_all = mask;
5324                 tstorm_mac_filter.bcast_accept_all = mask;
5325                 break;
5326
5327         case BNX2X_RX_MODE_PROMISC:
5328                 tstorm_mac_filter.ucast_accept_all = mask;
5329                 tstorm_mac_filter.mcast_accept_all = mask;
5330                 tstorm_mac_filter.bcast_accept_all = mask;
5331                 /* pass management unicast packets as well */
5332                 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5333                 break;
5334
5335         default:
5336                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5337                 break;
5338         }
5339
5340         REG_WR(bp,
5341                (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5342                llh_mask);
5343
5344         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5345                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5346                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5347                        ((u32 *)&tstorm_mac_filter)[i]);
5348
5349 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5350                    ((u32 *)&tstorm_mac_filter)[i]); */
5351         }
5352
5353         if (mode != BNX2X_RX_MODE_NONE)
5354                 bnx2x_set_client_config(bp);
5355 }
5356
5357 static void bnx2x_init_internal_common(struct bnx2x *bp)
5358 {
5359         int i;
5360
5361         /* Zero this manually as its initialization is
5362            currently missing in the initTool */
5363         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5364                 REG_WR(bp, BAR_USTRORM_INTMEM +
5365                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
5366 }
5367
5368 static void bnx2x_init_internal_port(struct bnx2x *bp)
5369 {
5370         int port = BP_PORT(bp);
5371
5372         REG_WR(bp,
5373                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5374         REG_WR(bp,
5375                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5376         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5377         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5378 }
5379
5380 static void bnx2x_init_internal_func(struct bnx2x *bp)
5381 {
5382         struct tstorm_eth_function_common_config tstorm_config = {0};
5383         struct stats_indication_flags stats_flags = {0};
5384         int port = BP_PORT(bp);
5385         int func = BP_FUNC(bp);
5386         int i, j;
5387         u32 offset;
5388         u16 max_agg_size;
5389
5390         if (is_multi(bp)) {
5391                 tstorm_config.config_flags = MULTI_FLAGS(bp);
5392                 tstorm_config.rss_result_mask = MULTI_MASK;
5393         }
5394
5395         /* Enable TPA if needed */
5396         if (bp->flags & TPA_ENABLE_FLAG)
5397                 tstorm_config.config_flags |=
5398                         TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5399
5400         if (IS_E1HMF(bp))
5401                 tstorm_config.config_flags |=
5402                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5403
5404         tstorm_config.leading_client_id = BP_L_ID(bp);
5405
5406         REG_WR(bp, BAR_TSTRORM_INTMEM +
5407                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5408                (*(u32 *)&tstorm_config));
5409
5410         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5411         bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
5412         bnx2x_set_storm_rx_mode(bp);
5413
5414         for_each_queue(bp, i) {
5415                 u8 cl_id = bp->fp[i].cl_id;
5416
5417                 /* reset xstorm per client statistics */
5418                 offset = BAR_XSTRORM_INTMEM +
5419                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5420                 for (j = 0;
5421                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5422                         REG_WR(bp, offset + j*4, 0);
5423
5424                 /* reset tstorm per client statistics */
5425                 offset = BAR_TSTRORM_INTMEM +
5426                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5427                 for (j = 0;
5428                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5429                         REG_WR(bp, offset + j*4, 0);
5430
5431                 /* reset ustorm per client statistics */
5432                 offset = BAR_USTRORM_INTMEM +
5433                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5434                 for (j = 0;
5435                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5436                         REG_WR(bp, offset + j*4, 0);
5437         }
5438
5439         /* Init statistics related context */
5440         stats_flags.collect_eth = 1;
5441
5442         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5443                ((u32 *)&stats_flags)[0]);
5444         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5445                ((u32 *)&stats_flags)[1]);
5446
5447         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5448                ((u32 *)&stats_flags)[0]);
5449         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5450                ((u32 *)&stats_flags)[1]);
5451
5452         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5453                ((u32 *)&stats_flags)[0]);
5454         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5455                ((u32 *)&stats_flags)[1]);
5456
5457         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5458                ((u32 *)&stats_flags)[0]);
5459         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5460                ((u32 *)&stats_flags)[1]);
5461
5462         REG_WR(bp, BAR_XSTRORM_INTMEM +
5463                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5464                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5465         REG_WR(bp, BAR_XSTRORM_INTMEM +
5466                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5467                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5468
5469         REG_WR(bp, BAR_TSTRORM_INTMEM +
5470                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5471                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5472         REG_WR(bp, BAR_TSTRORM_INTMEM +
5473                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5474                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5475
5476         REG_WR(bp, BAR_USTRORM_INTMEM +
5477                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5478                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5479         REG_WR(bp, BAR_USTRORM_INTMEM +
5480                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5481                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5482
5483         if (CHIP_IS_E1H(bp)) {
5484                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5485                         IS_E1HMF(bp));
5486                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5487                         IS_E1HMF(bp));
5488                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5489                         IS_E1HMF(bp));
5490                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5491                         IS_E1HMF(bp));
5492
5493                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5494                          bp->e1hov);
5495         }
5496
5497         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5498         max_agg_size =
5499                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5500                           SGE_PAGE_SIZE * PAGES_PER_SGE),
5501                     (u32)0xffff);
5502         for_each_rx_queue(bp, i) {
5503                 struct bnx2x_fastpath *fp = &bp->fp[i];
5504
5505                 REG_WR(bp, BAR_USTRORM_INTMEM +
5506                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5507                        U64_LO(fp->rx_comp_mapping));
5508                 REG_WR(bp, BAR_USTRORM_INTMEM +
5509                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5510                        U64_HI(fp->rx_comp_mapping));
5511
5512                 /* Next page */
5513                 REG_WR(bp, BAR_USTRORM_INTMEM +
5514                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5515                        U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5516                 REG_WR(bp, BAR_USTRORM_INTMEM +
5517                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5518                        U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5519
5520                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5521                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5522                          max_agg_size);
5523         }
5524
5525         /* dropless flow control */
5526         if (CHIP_IS_E1H(bp)) {
5527                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5528
5529                 rx_pause.bd_thr_low = 250;
5530                 rx_pause.cqe_thr_low = 250;
5531                 rx_pause.cos = 1;
5532                 rx_pause.sge_thr_low = 0;
5533                 rx_pause.bd_thr_high = 350;
5534                 rx_pause.cqe_thr_high = 350;
5535                 rx_pause.sge_thr_high = 0;
5536
5537                 for_each_rx_queue(bp, i) {
5538                         struct bnx2x_fastpath *fp = &bp->fp[i];
5539
5540                         if (!fp->disable_tpa) {
5541                                 rx_pause.sge_thr_low = 150;
5542                                 rx_pause.sge_thr_high = 250;
5543                         }
5544
5545
5546                         offset = BAR_USTRORM_INTMEM +
5547                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5548                                                                    fp->cl_id);
5549                         for (j = 0;
5550                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5551                              j++)
5552                                 REG_WR(bp, offset + j*4,
5553                                        ((u32 *)&rx_pause)[j]);
5554                 }
5555         }
5556
5557         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5558
5559         /* Init rate shaping and fairness contexts */
5560         if (IS_E1HMF(bp)) {
5561                 int vn;
5562
5563                 /* During init there is no active link
5564                    Until link is up, set link rate to 10Gbps */
5565                 bp->link_vars.line_speed = SPEED_10000;
5566                 bnx2x_init_port_minmax(bp);
5567
5568                 if (!BP_NOMCP(bp))
5569                         bp->mf_config =
5570                               SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
5571                 bnx2x_calc_vn_weight_sum(bp);
5572
5573                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5574                         bnx2x_init_vn_minmax(bp, 2*vn + port);
5575
5576                 /* Enable rate shaping and fairness */
5577                 bp->cmng.flags.cmng_enables |=
5578                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5579
5580         } else {
5581                 /* rate shaping and fairness are disabled */
5582                 DP(NETIF_MSG_IFUP,
5583                    "single function mode  minmax will be disabled\n");
5584         }
5585
5586
5587         /* Store it to internal memory */
5588         if (bp->port.pmf)
5589                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5590                         REG_WR(bp, BAR_XSTRORM_INTMEM +
5591                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5592                                ((u32 *)(&bp->cmng))[i]);
5593 }
5594
5595 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5596 {
5597         switch (load_code) {
5598         case FW_MSG_CODE_DRV_LOAD_COMMON:
5599                 bnx2x_init_internal_common(bp);
5600                 /* no break */
5601
5602         case FW_MSG_CODE_DRV_LOAD_PORT:
5603                 bnx2x_init_internal_port(bp);
5604                 /* no break */
5605
5606         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5607                 bnx2x_init_internal_func(bp);
5608                 break;
5609
5610         default:
5611                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5612                 break;
5613         }
5614 }
5615
5616 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5617 {
5618         int i;
5619
5620         for_each_queue(bp, i) {
5621                 struct bnx2x_fastpath *fp = &bp->fp[i];
5622
5623                 fp->bp = bp;
5624                 fp->state = BNX2X_FP_STATE_CLOSED;
5625                 fp->index = i;
5626                 fp->cl_id = BP_L_ID(bp) + i;
5627 #ifdef BCM_CNIC
5628                 fp->sb_id = fp->cl_id + 1;
5629 #else
5630                 fp->sb_id = fp->cl_id;
5631 #endif
5632                 /* Suitable Rx and Tx SBs are served by the same client */
5633                 if (i >= bp->num_rx_queues)
5634                         fp->cl_id -= bp->num_rx_queues;
5635                 DP(NETIF_MSG_IFUP,
5636                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
5637                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5638                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5639                               fp->sb_id);
5640                 bnx2x_update_fpsb_idx(fp);
5641         }
5642
5643         /* ensure status block indices were read */
5644         rmb();
5645
5646
5647         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5648                           DEF_SB_ID);
5649         bnx2x_update_dsb_idx(bp);
5650         bnx2x_update_coalesce(bp);
5651         bnx2x_init_rx_rings(bp);
5652         bnx2x_init_tx_ring(bp);
5653         bnx2x_init_sp_ring(bp);
5654         bnx2x_init_context(bp);
5655         bnx2x_init_internal(bp, load_code);
5656         bnx2x_init_ind_table(bp);
5657         bnx2x_stats_init(bp);
5658
5659         /* At this point, we are ready for interrupts */
5660         atomic_set(&bp->intr_sem, 0);
5661
5662         /* flush all before enabling interrupts */
5663         mb();
5664         mmiowb();
5665
5666         bnx2x_int_enable(bp);
5667
5668         /* Check for SPIO5 */
5669         bnx2x_attn_int_deasserted0(bp,
5670                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5671                                    AEU_INPUTS_ATTN_BITS_SPIO5);
5672 }
5673
5674 /* end of nic init */
5675
5676 /*
5677  * gzip service functions
5678  */
5679
5680 static int bnx2x_gunzip_init(struct bnx2x *bp)
5681 {
5682         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5683                                               &bp->gunzip_mapping);
5684         if (bp->gunzip_buf  == NULL)
5685                 goto gunzip_nomem1;
5686
5687         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5688         if (bp->strm  == NULL)
5689                 goto gunzip_nomem2;
5690
5691         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5692                                       GFP_KERNEL);
5693         if (bp->strm->workspace == NULL)
5694                 goto gunzip_nomem3;
5695
5696         return 0;
5697
5698 gunzip_nomem3:
5699         kfree(bp->strm);
5700         bp->strm = NULL;
5701
5702 gunzip_nomem2:
5703         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5704                             bp->gunzip_mapping);
5705         bp->gunzip_buf = NULL;
5706
5707 gunzip_nomem1:
5708         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5709                " un-compression\n", bp->dev->name);
5710         return -ENOMEM;
5711 }
5712
5713 static void bnx2x_gunzip_end(struct bnx2x *bp)
5714 {
5715         kfree(bp->strm->workspace);
5716
5717         kfree(bp->strm);
5718         bp->strm = NULL;
5719
5720         if (bp->gunzip_buf) {
5721                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5722                                     bp->gunzip_mapping);
5723                 bp->gunzip_buf = NULL;
5724         }
5725 }
5726
5727 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5728 {
5729         int n, rc;
5730
5731         /* check gzip header */
5732         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5733                 BNX2X_ERR("Bad gzip header\n");
5734                 return -EINVAL;
5735         }
5736
5737         n = 10;
5738
5739 #define FNAME                           0x8
5740
5741         if (zbuf[3] & FNAME)
5742                 while ((zbuf[n++] != 0) && (n < len));
5743
5744         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5745         bp->strm->avail_in = len - n;
5746         bp->strm->next_out = bp->gunzip_buf;
5747         bp->strm->avail_out = FW_BUF_SIZE;
5748
5749         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5750         if (rc != Z_OK)
5751                 return rc;
5752
5753         rc = zlib_inflate(bp->strm, Z_FINISH);
5754         if ((rc != Z_OK) && (rc != Z_STREAM_END))
5755                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5756                        bp->dev->name, bp->strm->msg);
5757
5758         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5759         if (bp->gunzip_outlen & 0x3)
5760                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5761                                     " gunzip_outlen (%d) not aligned\n",
5762                        bp->dev->name, bp->gunzip_outlen);
5763         bp->gunzip_outlen >>= 2;
5764
5765         zlib_inflateEnd(bp->strm);
5766
5767         if (rc == Z_STREAM_END)
5768                 return 0;
5769
5770         return rc;
5771 }
5772
5773 /* nic load/unload */
5774
5775 /*
5776  * General service functions
5777  */
5778
5779 /* send a NIG loopback debug packet */
5780 static void bnx2x_lb_pckt(struct bnx2x *bp)
5781 {
5782         u32 wb_write[3];
5783
5784         /* Ethernet source and destination addresses */
5785         wb_write[0] = 0x55555555;
5786         wb_write[1] = 0x55555555;
5787         wb_write[2] = 0x20;             /* SOP */
5788         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5789
5790         /* NON-IP protocol */
5791         wb_write[0] = 0x09000000;
5792         wb_write[1] = 0x55555555;
5793         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
5794         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5795 }
5796
5797 /* some of the internal memories
5798  * are not directly readable from the driver
5799  * to test them we send debug packets
5800  */
5801 static int bnx2x_int_mem_test(struct bnx2x *bp)
5802 {
5803         int factor;
5804         int count, i;
5805         u32 val = 0;
5806
5807         if (CHIP_REV_IS_FPGA(bp))
5808                 factor = 120;
5809         else if (CHIP_REV_IS_EMUL(bp))
5810                 factor = 200;
5811         else
5812                 factor = 1;
5813
5814         DP(NETIF_MSG_HW, "start part1\n");
5815
5816         /* Disable inputs of parser neighbor blocks */
5817         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5818         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5819         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5820         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5821
5822         /*  Write 0 to parser credits for CFC search request */
5823         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5824
5825         /* send Ethernet packet */
5826         bnx2x_lb_pckt(bp);
5827
5828         /* TODO do i reset NIG statistic? */
5829         /* Wait until NIG register shows 1 packet of size 0x10 */
5830         count = 1000 * factor;
5831         while (count) {
5832
5833                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5834                 val = *bnx2x_sp(bp, wb_data[0]);
5835                 if (val == 0x10)
5836                         break;
5837
5838                 msleep(10);
5839                 count--;
5840         }
5841         if (val != 0x10) {
5842                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5843                 return -1;
5844         }
5845
5846         /* Wait until PRS register shows 1 packet */
5847         count = 1000 * factor;
5848         while (count) {
5849                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5850                 if (val == 1)
5851                         break;
5852
5853                 msleep(10);
5854                 count--;
5855         }
5856         if (val != 0x1) {
5857                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5858                 return -2;
5859         }
5860
5861         /* Reset and init BRB, PRS */
5862         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5863         msleep(50);
5864         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5865         msleep(50);
5866         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5867         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5868
5869         DP(NETIF_MSG_HW, "part2\n");
5870
5871         /* Disable inputs of parser neighbor blocks */
5872         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5873         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5874         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5875         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5876
5877         /* Write 0 to parser credits for CFC search request */
5878         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5879
5880         /* send 10 Ethernet packets */
5881         for (i = 0; i < 10; i++)
5882                 bnx2x_lb_pckt(bp);
5883
5884         /* Wait until NIG register shows 10 + 1
5885            packets of size 11*0x10 = 0xb0 */
5886         count = 1000 * factor;
5887         while (count) {
5888
5889                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5890                 val = *bnx2x_sp(bp, wb_data[0]);
5891                 if (val == 0xb0)
5892                         break;
5893
5894                 msleep(10);
5895                 count--;
5896         }
5897         if (val != 0xb0) {
5898                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5899                 return -3;
5900         }
5901
5902         /* Wait until PRS register shows 2 packets */
5903         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5904         if (val != 2)
5905                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5906
5907         /* Write 1 to parser credits for CFC search request */
5908         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5909
5910         /* Wait until PRS register shows 3 packets */
5911         msleep(10 * factor);
5912         /* Wait until NIG register shows 1 packet of size 0x10 */
5913         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5914         if (val != 3)
5915                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5916
5917         /* clear NIG EOP FIFO */
5918         for (i = 0; i < 11; i++)
5919                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5920         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5921         if (val != 1) {
5922                 BNX2X_ERR("clear of NIG failed\n");
5923                 return -4;
5924         }
5925
5926         /* Reset and init BRB, PRS, NIG */
5927         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5928         msleep(50);
5929         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5930         msleep(50);
5931         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5932         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5933 #ifndef BCM_CNIC
5934         /* set NIC mode */
5935         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5936 #endif
5937
5938         /* Enable inputs of parser neighbor blocks */
5939         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5940         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5941         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5942         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5943
5944         DP(NETIF_MSG_HW, "done\n");
5945
5946         return 0; /* OK */
5947 }
5948
5949 static void enable_blocks_attention(struct bnx2x *bp)
5950 {
5951         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5952         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5953         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5954         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5955         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5956         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5957         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5958         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5959         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5960 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5961 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5962         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5963         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5964         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5965 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5966 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5967         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5968         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5969         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5970         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5971 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5972 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5973         if (CHIP_REV_IS_FPGA(bp))
5974                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5975         else
5976                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5977         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5978         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5979         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5980 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5981 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5982         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5983         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5984 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5985         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5986 }
5987
5988
5989 static void bnx2x_reset_common(struct bnx2x *bp)
5990 {
5991         /* reset_common */
5992         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5993                0xd3ffff7f);
5994         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5995 }
5996
5997 static void bnx2x_init_pxp(struct bnx2x *bp)
5998 {
5999         u16 devctl;
6000         int r_order, w_order;
6001
6002         pci_read_config_word(bp->pdev,
6003                              bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
6004         DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6005         w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6006         if (bp->mrrs == -1)
6007                 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6008         else {
6009                 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6010                 r_order = bp->mrrs;
6011         }
6012
6013         bnx2x_init_pxp_arb(bp, r_order, w_order);
6014 }
6015
6016 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6017 {
6018         u32 val;
6019         u8 port;
6020         u8 is_required = 0;
6021
6022         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6023               SHARED_HW_CFG_FAN_FAILURE_MASK;
6024
6025         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6026                 is_required = 1;
6027
6028         /*
6029          * The fan failure mechanism is usually related to the PHY type since
6030          * the power consumption of the board is affected by the PHY. Currently,
6031          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6032          */
6033         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6034                 for (port = PORT_0; port < PORT_MAX; port++) {
6035                         u32 phy_type =
6036                                 SHMEM_RD(bp, dev_info.port_hw_config[port].
6037                                          external_phy_config) &
6038                                 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6039                         is_required |=
6040                                 ((phy_type ==
6041                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
6042                                  (phy_type ==
6043                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
6044                                  (phy_type ==
6045                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6046                 }
6047
6048         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6049
6050         if (is_required == 0)
6051                 return;
6052
6053         /* Fan failure is indicated by SPIO 5 */
6054         bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6055                        MISC_REGISTERS_SPIO_INPUT_HI_Z);
6056
6057         /* set to active low mode */
6058         val = REG_RD(bp, MISC_REG_SPIO_INT);
6059         val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6060                                 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6061         REG_WR(bp, MISC_REG_SPIO_INT, val);
6062
6063         /* enable interrupt to signal the IGU */
6064         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6065         val |= (1 << MISC_REGISTERS_SPIO_5);
6066         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6067 }
6068
6069 static int bnx2x_init_common(struct bnx2x *bp)
6070 {
6071         u32 val, i;
6072 #ifdef BCM_CNIC
6073         u32 wb_write[2];
6074 #endif
6075
6076         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
6077
6078         bnx2x_reset_common(bp);
6079         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6080         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
6081
6082         bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
6083         if (CHIP_IS_E1H(bp))
6084                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
6085
6086         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6087         msleep(30);
6088         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6089
6090         bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
6091         if (CHIP_IS_E1(bp)) {
6092                 /* enable HW interrupt from PXP on USDM overflow
6093                    bit 16 on INT_MASK_0 */
6094                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6095         }
6096
6097         bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
6098         bnx2x_init_pxp(bp);
6099
6100 #ifdef __BIG_ENDIAN
6101         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6102         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6103         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6104         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6105         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6106         /* make sure this value is 0 */
6107         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6108
6109 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6110         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6111         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6112         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6113         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6114 #endif
6115
6116         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
6117 #ifdef BCM_CNIC
6118         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6119         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6120         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6121 #endif
6122
6123         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6124                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6125
6126         /* let the HW do it's magic ... */
6127         msleep(100);
6128         /* finish PXP init */
6129         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6130         if (val != 1) {
6131                 BNX2X_ERR("PXP2 CFG failed\n");
6132                 return -EBUSY;
6133         }
6134         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6135         if (val != 1) {
6136                 BNX2X_ERR("PXP2 RD_INIT failed\n");
6137                 return -EBUSY;
6138         }
6139
6140         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6141         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6142
6143         bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
6144
6145         /* clean the DMAE memory */
6146         bp->dmae_ready = 1;
6147         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6148
6149         bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6150         bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6151         bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6152         bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
6153
6154         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6155         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6156         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6157         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6158
6159         bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
6160
6161 #ifdef BCM_CNIC
6162         wb_write[0] = 0;
6163         wb_write[1] = 0;
6164         for (i = 0; i < 64; i++) {
6165                 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6166                 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6167
6168                 if (CHIP_IS_E1H(bp)) {
6169                         REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6170                         bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6171                                           wb_write, 2);
6172                 }
6173         }
6174 #endif
6175         /* soft reset pulse */
6176         REG_WR(bp, QM_REG_SOFT_RESET, 1);
6177         REG_WR(bp, QM_REG_SOFT_RESET, 0);
6178
6179 #ifdef BCM_CNIC
6180         bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
6181 #endif
6182
6183         bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
6184         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6185         if (!CHIP_REV_IS_SLOW(bp)) {
6186                 /* enable hw interrupt from doorbell Q */
6187                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6188         }
6189
6190         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6191         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6192         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6193 #ifndef BCM_CNIC
6194         /* set NIC mode */
6195         REG_WR(bp, PRS_REG_NIC_MODE, 1);
6196 #endif
6197         if (CHIP_IS_E1H(bp))
6198                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
6199
6200         bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6201         bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6202         bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6203         bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
6204
6205         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6206         bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6207         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6208         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6209
6210         bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6211         bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6212         bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6213         bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
6214
6215         /* sync semi rtc */
6216         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6217                0x80000000);
6218         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6219                0x80000000);
6220
6221         bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6222         bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6223         bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
6224
6225         REG_WR(bp, SRC_REG_SOFT_RST, 1);
6226         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6227                 REG_WR(bp, i, 0xc0cac01a);
6228                 /* TODO: replace with something meaningful */
6229         }
6230         bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6231 #ifdef BCM_CNIC
6232         REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6233         REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6234         REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6235         REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6236         REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6237         REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6238         REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6239         REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6240         REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6241         REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6242 #endif
6243         REG_WR(bp, SRC_REG_SOFT_RST, 0);
6244
6245         if (sizeof(union cdu_context) != 1024)
6246                 /* we currently assume that a context is 1024 bytes */
6247                 printk(KERN_ALERT PFX "please adjust the size of"
6248                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
6249
6250         bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
6251         val = (4 << 24) + (0 << 12) + 1024;
6252         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6253
6254         bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
6255         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6256         /* enable context validation interrupt from CFC */
6257         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6258
6259         /* set the thresholds to prevent CFC/CDU race */
6260         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6261
6262         bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6263         bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
6264
6265         bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
6266         /* Reset PCIE errors for debug */
6267         REG_WR(bp, 0x2814, 0xffffffff);
6268         REG_WR(bp, 0x3820, 0xffffffff);
6269
6270         bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
6271         bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
6272         bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
6273         bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
6274
6275         bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
6276         if (CHIP_IS_E1H(bp)) {
6277                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6278                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6279         }
6280
6281         if (CHIP_REV_IS_SLOW(bp))
6282                 msleep(200);
6283
6284         /* finish CFC init */
6285         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6286         if (val != 1) {
6287                 BNX2X_ERR("CFC LL_INIT failed\n");
6288                 return -EBUSY;
6289         }
6290         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6291         if (val != 1) {
6292                 BNX2X_ERR("CFC AC_INIT failed\n");
6293                 return -EBUSY;
6294         }
6295         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6296         if (val != 1) {
6297                 BNX2X_ERR("CFC CAM_INIT failed\n");
6298                 return -EBUSY;
6299         }
6300         REG_WR(bp, CFC_REG_DEBUG0, 0);
6301
6302         /* read NIG statistic
6303            to see if this is our first up since powerup */
6304         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6305         val = *bnx2x_sp(bp, wb_data[0]);
6306
6307         /* do internal memory self test */
6308         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6309                 BNX2X_ERR("internal mem self test failed\n");
6310                 return -EBUSY;
6311         }
6312
6313         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6314         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6315         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6316         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6317         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6318                 bp->port.need_hw_lock = 1;
6319                 break;
6320
6321         default:
6322                 break;
6323         }
6324
6325         bnx2x_setup_fan_failure_detection(bp);
6326
6327         /* clear PXP2 attentions */
6328         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6329
6330         enable_blocks_attention(bp);
6331
6332         if (!BP_NOMCP(bp)) {
6333                 bnx2x_acquire_phy_lock(bp);
6334                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6335                 bnx2x_release_phy_lock(bp);
6336         } else
6337                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6338
6339         return 0;
6340 }
6341
6342 static int bnx2x_init_port(struct bnx2x *bp)
6343 {
6344         int port = BP_PORT(bp);
6345         int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
6346         u32 low, high;
6347         u32 val;
6348
6349         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
6350
6351         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6352
6353         bnx2x_init_block(bp, PXP_BLOCK, init_stage);
6354         bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
6355
6356         bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6357         bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6358         bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6359         bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6360
6361 #ifdef BCM_CNIC
6362         REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
6363
6364         bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6365         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6366         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
6367 #endif
6368         bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6369
6370         bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
6371         if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6372                 /* no pause for emulation and FPGA */
6373                 low = 0;
6374                 high = 513;
6375         } else {
6376                 if (IS_E1HMF(bp))
6377                         low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6378                 else if (bp->dev->mtu > 4096) {
6379                         if (bp->flags & ONE_PORT_FLAG)
6380                                 low = 160;
6381                         else {
6382                                 val = bp->dev->mtu;
6383                                 /* (24*1024 + val*4)/256 */
6384                                 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6385                         }
6386                 } else
6387                         low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6388                 high = low + 56;        /* 14*1024/256 */
6389         }
6390         REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6391         REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6392
6393
6394         bnx2x_init_block(bp, PRS_BLOCK, init_stage);
6395
6396         bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
6397         bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
6398         bnx2x_init_block(bp, USDM_BLOCK, init_stage);
6399         bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
6400
6401         bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6402         bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6403         bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6404         bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
6405
6406         bnx2x_init_block(bp, UPB_BLOCK, init_stage);
6407         bnx2x_init_block(bp, XPB_BLOCK, init_stage);
6408
6409         bnx2x_init_block(bp, PBF_BLOCK, init_stage);
6410
6411         /* configure PBF to work without PAUSE mtu 9000 */
6412         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6413
6414         /* update threshold */
6415         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6416         /* update init credit */
6417         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6418
6419         /* probe changes */
6420         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6421         msleep(5);
6422         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6423
6424 #ifdef BCM_CNIC
6425         bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
6426 #endif
6427         bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6428         bnx2x_init_block(bp, CFC_BLOCK, init_stage);
6429
6430         if (CHIP_IS_E1(bp)) {
6431                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6432                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6433         }
6434         bnx2x_init_block(bp, HC_BLOCK, init_stage);
6435
6436         bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6437         /* init aeu_mask_attn_func_0/1:
6438          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6439          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6440          *             bits 4-7 are used for "per vn group attention" */
6441         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6442                (IS_E1HMF(bp) ? 0xF7 : 0x7));
6443
6444         bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6445         bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6446         bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6447         bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6448         bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6449
6450         bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6451
6452         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6453
6454         if (CHIP_IS_E1H(bp)) {
6455                 /* 0x2 disable e1hov, 0x1 enable */
6456                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6457                        (IS_E1HMF(bp) ? 0x1 : 0x2));
6458
6459                 {
6460                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6461                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6462                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6463                 }
6464         }
6465
6466         bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6467         bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6468
6469         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6470         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6471                 {
6472                 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6473
6474                 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6475                                MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6476
6477                 /* The GPIO should be swapped if the swap register is
6478                    set and active */
6479                 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6480                 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6481
6482                 /* Select function upon port-swap configuration */
6483                 if (port == 0) {
6484                         offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6485                         aeu_gpio_mask = (swap_val && swap_override) ?
6486                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6487                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6488                 } else {
6489                         offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6490                         aeu_gpio_mask = (swap_val && swap_override) ?
6491                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6492                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6493                 }
6494                 val = REG_RD(bp, offset);
6495                 /* add GPIO3 to group */
6496                 val |= aeu_gpio_mask;
6497                 REG_WR(bp, offset, val);
6498                 }
6499                 break;
6500
6501         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6502         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6503                 /* add SPIO 5 to group 0 */
6504                 {
6505                 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6506                                        MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6507                 val = REG_RD(bp, reg_addr);
6508                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6509                 REG_WR(bp, reg_addr, val);
6510                 }
6511                 break;
6512
6513         default:
6514                 break;
6515         }
6516
6517         bnx2x__link_reset(bp);
6518
6519         return 0;
6520 }
6521
6522 #define ILT_PER_FUNC            (768/2)
6523 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
6524 /* the phys address is shifted right 12 bits and has an added
6525    1=valid bit added to the 53rd bit
6526    then since this is a wide register(TM)
6527    we split it into two 32 bit writes
6528  */
6529 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6530 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
6531 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
6532 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
6533
6534 #ifdef BCM_CNIC
6535 #define CNIC_ILT_LINES          127
6536 #define CNIC_CTX_PER_ILT        16
6537 #else
6538 #define CNIC_ILT_LINES          0
6539 #endif
6540
6541 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6542 {
6543         int reg;
6544
6545         if (CHIP_IS_E1H(bp))
6546                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6547         else /* E1 */
6548                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6549
6550         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6551 }
6552
6553 static int bnx2x_init_func(struct bnx2x *bp)
6554 {
6555         int port = BP_PORT(bp);
6556         int func = BP_FUNC(bp);
6557         u32 addr, val;
6558         int i;
6559
6560         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
6561
6562         /* set MSI reconfigure capability */
6563         addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6564         val = REG_RD(bp, addr);
6565         val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6566         REG_WR(bp, addr, val);
6567
6568         i = FUNC_ILT_BASE(func);
6569
6570         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6571         if (CHIP_IS_E1H(bp)) {
6572                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6573                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6574         } else /* E1 */
6575                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6576                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6577
6578 #ifdef BCM_CNIC
6579         i += 1 + CNIC_ILT_LINES;
6580         bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6581         if (CHIP_IS_E1(bp))
6582                 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6583         else {
6584                 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6585                 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6586         }
6587
6588         i++;
6589         bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6590         if (CHIP_IS_E1(bp))
6591                 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6592         else {
6593                 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
6594                 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
6595         }
6596
6597         i++;
6598         bnx2x_ilt_wr(bp, i, bp->t1_mapping);
6599         if (CHIP_IS_E1(bp))
6600                 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6601         else {
6602                 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
6603                 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
6604         }
6605
6606         /* tell the searcher where the T2 table is */
6607         REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
6608
6609         bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
6610                     U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
6611
6612         bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
6613                     U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
6614                     U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
6615
6616         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
6617 #endif
6618
6619         if (CHIP_IS_E1H(bp)) {
6620                 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6621                 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6622                 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6623                 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6624                 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6625                 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6626                 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6627                 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6628                 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
6629
6630                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6631                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6632         }
6633
6634         /* HC init per function */
6635         if (CHIP_IS_E1H(bp)) {
6636                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6637
6638                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6639                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6640         }
6641         bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
6642
6643         /* Reset PCIE errors for debug */
6644         REG_WR(bp, 0x2114, 0xffffffff);
6645         REG_WR(bp, 0x2120, 0xffffffff);
6646
6647         return 0;
6648 }
6649
6650 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6651 {
6652         int i, rc = 0;
6653
6654         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
6655            BP_FUNC(bp), load_code);
6656
6657         bp->dmae_ready = 0;
6658         mutex_init(&bp->dmae_mutex);
6659         rc = bnx2x_gunzip_init(bp);
6660         if (rc)
6661                 return rc;
6662
6663         switch (load_code) {
6664         case FW_MSG_CODE_DRV_LOAD_COMMON:
6665                 rc = bnx2x_init_common(bp);
6666                 if (rc)
6667                         goto init_hw_err;
6668                 /* no break */
6669
6670         case FW_MSG_CODE_DRV_LOAD_PORT:
6671                 bp->dmae_ready = 1;
6672                 rc = bnx2x_init_port(bp);
6673                 if (rc)
6674                         goto init_hw_err;
6675                 /* no break */
6676
6677         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6678                 bp->dmae_ready = 1;
6679                 rc = bnx2x_init_func(bp);
6680                 if (rc)
6681                         goto init_hw_err;
6682                 break;
6683
6684         default:
6685                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6686                 break;
6687         }
6688
6689         if (!BP_NOMCP(bp)) {
6690                 int func = BP_FUNC(bp);
6691
6692                 bp->fw_drv_pulse_wr_seq =
6693                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6694                                  DRV_PULSE_SEQ_MASK);
6695                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6696         }
6697
6698         /* this needs to be done before gunzip end */
6699         bnx2x_zero_def_sb(bp);
6700         for_each_queue(bp, i)
6701                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6702 #ifdef BCM_CNIC
6703         bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6704 #endif
6705
6706 init_hw_err:
6707         bnx2x_gunzip_end(bp);
6708
6709         return rc;
6710 }
6711
6712 static void bnx2x_free_mem(struct bnx2x *bp)
6713 {
6714
6715 #define BNX2X_PCI_FREE(x, y, size) \
6716         do { \
6717                 if (x) { \
6718                         pci_free_consistent(bp->pdev, size, x, y); \
6719                         x = NULL; \
6720                         y = 0; \
6721                 } \
6722         } while (0)
6723
6724 #define BNX2X_FREE(x) \
6725         do { \
6726                 if (x) { \
6727                         vfree(x); \
6728                         x = NULL; \
6729                 } \
6730         } while (0)
6731
6732         int i;
6733
6734         /* fastpath */
6735         /* Common */
6736         for_each_queue(bp, i) {
6737
6738                 /* status blocks */
6739                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6740                                bnx2x_fp(bp, i, status_blk_mapping),
6741                                sizeof(struct host_status_block));
6742         }
6743         /* Rx */
6744         for_each_rx_queue(bp, i) {
6745
6746                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6747                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6748                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6749                                bnx2x_fp(bp, i, rx_desc_mapping),
6750                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
6751
6752                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6753                                bnx2x_fp(bp, i, rx_comp_mapping),
6754                                sizeof(struct eth_fast_path_rx_cqe) *
6755                                NUM_RCQ_BD);
6756
6757                 /* SGE ring */
6758                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6759                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6760                                bnx2x_fp(bp, i, rx_sge_mapping),
6761                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6762         }
6763         /* Tx */
6764         for_each_tx_queue(bp, i) {
6765
6766                 /* fastpath tx rings: tx_buf tx_desc */
6767                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6768                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6769                                bnx2x_fp(bp, i, tx_desc_mapping),
6770                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6771         }
6772         /* end of fastpath */
6773
6774         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6775                        sizeof(struct host_def_status_block));
6776
6777         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6778                        sizeof(struct bnx2x_slowpath));
6779
6780 #ifdef BCM_CNIC
6781         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6782         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6783         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6784         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6785         BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
6786                        sizeof(struct host_status_block));
6787 #endif
6788         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6789
6790 #undef BNX2X_PCI_FREE
6791 #undef BNX2X_KFREE
6792 }
6793
6794 static int bnx2x_alloc_mem(struct bnx2x *bp)
6795 {
6796
6797 #define BNX2X_PCI_ALLOC(x, y, size) \
6798         do { \
6799                 x = pci_alloc_consistent(bp->pdev, size, y); \
6800                 if (x == NULL) \
6801                         goto alloc_mem_err; \
6802                 memset(x, 0, size); \
6803         } while (0)
6804
6805 #define BNX2X_ALLOC(x, size) \
6806         do { \
6807                 x = vmalloc(size); \
6808                 if (x == NULL) \
6809                         goto alloc_mem_err; \
6810                 memset(x, 0, size); \
6811         } while (0)
6812
6813         int i;
6814
6815         /* fastpath */
6816         /* Common */
6817         for_each_queue(bp, i) {
6818                 bnx2x_fp(bp, i, bp) = bp;
6819
6820                 /* status blocks */
6821                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6822                                 &bnx2x_fp(bp, i, status_blk_mapping),
6823                                 sizeof(struct host_status_block));
6824         }
6825         /* Rx */
6826         for_each_rx_queue(bp, i) {
6827
6828                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6829                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6830                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6831                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6832                                 &bnx2x_fp(bp, i, rx_desc_mapping),
6833                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6834
6835                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6836                                 &bnx2x_fp(bp, i, rx_comp_mapping),
6837                                 sizeof(struct eth_fast_path_rx_cqe) *
6838                                 NUM_RCQ_BD);
6839
6840                 /* SGE ring */
6841                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6842                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6843                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6844                                 &bnx2x_fp(bp, i, rx_sge_mapping),
6845                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6846         }
6847         /* Tx */
6848         for_each_tx_queue(bp, i) {
6849
6850                 /* fastpath tx rings: tx_buf tx_desc */
6851                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6852                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6853                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6854                                 &bnx2x_fp(bp, i, tx_desc_mapping),
6855                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6856         }
6857         /* end of fastpath */
6858
6859         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6860                         sizeof(struct host_def_status_block));
6861
6862         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6863                         sizeof(struct bnx2x_slowpath));
6864
6865 #ifdef BCM_CNIC
6866         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6867
6868         /* allocate searcher T2 table
6869            we allocate 1/4 of alloc num for T2
6870           (which is not entered into the ILT) */
6871         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6872
6873         /* Initialize T2 (for 1024 connections) */
6874         for (i = 0; i < 16*1024; i += 64)
6875                 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6876
6877         /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
6878         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6879
6880         /* QM queues (128*MAX_CONN) */
6881         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6882
6883         BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
6884                         sizeof(struct host_status_block));
6885 #endif
6886
6887         /* Slow path ring */
6888         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6889
6890         return 0;
6891
6892 alloc_mem_err:
6893         bnx2x_free_mem(bp);
6894         return -ENOMEM;
6895
6896 #undef BNX2X_PCI_ALLOC
6897 #undef BNX2X_ALLOC
6898 }
6899
6900 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6901 {
6902         int i;
6903
6904         for_each_tx_queue(bp, i) {
6905                 struct bnx2x_fastpath *fp = &bp->fp[i];
6906
6907                 u16 bd_cons = fp->tx_bd_cons;
6908                 u16 sw_prod = fp->tx_pkt_prod;
6909                 u16 sw_cons = fp->tx_pkt_cons;
6910
6911                 while (sw_cons != sw_prod) {
6912                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6913                         sw_cons++;
6914                 }
6915         }
6916 }
6917
6918 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6919 {
6920         int i, j;
6921
6922         for_each_rx_queue(bp, j) {
6923                 struct bnx2x_fastpath *fp = &bp->fp[j];
6924
6925                 for (i = 0; i < NUM_RX_BD; i++) {
6926                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6927                         struct sk_buff *skb = rx_buf->skb;
6928
6929                         if (skb == NULL)
6930                                 continue;
6931
6932                         pci_unmap_single(bp->pdev,
6933                                          pci_unmap_addr(rx_buf, mapping),
6934                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6935
6936                         rx_buf->skb = NULL;
6937                         dev_kfree_skb(skb);
6938                 }
6939                 if (!fp->disable_tpa)
6940                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6941                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
6942                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
6943         }
6944 }
6945
6946 static void bnx2x_free_skbs(struct bnx2x *bp)
6947 {
6948         bnx2x_free_tx_skbs(bp);
6949         bnx2x_free_rx_skbs(bp);
6950 }
6951
6952 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6953 {
6954         int i, offset = 1;
6955
6956         free_irq(bp->msix_table[0].vector, bp->dev);
6957         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6958            bp->msix_table[0].vector);
6959
6960 #ifdef BCM_CNIC
6961         offset++;
6962 #endif
6963         for_each_queue(bp, i) {
6964                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
6965                    "state %x\n", i, bp->msix_table[i + offset].vector,
6966                    bnx2x_fp(bp, i, state));
6967
6968                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6969         }
6970 }
6971
6972 static void bnx2x_free_irq(struct bnx2x *bp)
6973 {
6974         if (bp->flags & USING_MSIX_FLAG) {
6975                 bnx2x_free_msix_irqs(bp);
6976                 pci_disable_msix(bp->pdev);
6977                 bp->flags &= ~USING_MSIX_FLAG;
6978
6979         } else if (bp->flags & USING_MSI_FLAG) {
6980                 free_irq(bp->pdev->irq, bp->dev);
6981                 pci_disable_msi(bp->pdev);
6982                 bp->flags &= ~USING_MSI_FLAG;
6983
6984         } else
6985                 free_irq(bp->pdev->irq, bp->dev);
6986 }
6987
6988 static int bnx2x_enable_msix(struct bnx2x *bp)
6989 {
6990         int i, rc, offset = 1;
6991         int igu_vec = 0;
6992
6993         bp->msix_table[0].entry = igu_vec;
6994         DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6995
6996 #ifdef BCM_CNIC
6997         igu_vec = BP_L_ID(bp) + offset;
6998         bp->msix_table[1].entry = igu_vec;
6999         DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
7000         offset++;
7001 #endif
7002         for_each_queue(bp, i) {
7003                 igu_vec = BP_L_ID(bp) + offset + i;
7004                 bp->msix_table[i + offset].entry = igu_vec;
7005                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
7006                    "(fastpath #%u)\n", i + offset, igu_vec, i);
7007         }
7008
7009         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
7010                              BNX2X_NUM_QUEUES(bp) + offset);
7011         if (rc) {
7012                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
7013                 return rc;
7014         }
7015
7016         bp->flags |= USING_MSIX_FLAG;
7017
7018         return 0;
7019 }
7020
7021 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7022 {
7023         int i, rc, offset = 1;
7024
7025         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7026                          bp->dev->name, bp->dev);
7027         if (rc) {
7028                 BNX2X_ERR("request sp irq failed\n");
7029                 return -EBUSY;
7030         }
7031
7032 #ifdef BCM_CNIC
7033         offset++;
7034 #endif
7035         for_each_queue(bp, i) {
7036                 struct bnx2x_fastpath *fp = &bp->fp[i];
7037
7038                 if (i < bp->num_rx_queues)
7039                         sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
7040                 else
7041                         sprintf(fp->name, "%s-tx-%d",
7042                                 bp->dev->name, i - bp->num_rx_queues);
7043
7044                 rc = request_irq(bp->msix_table[i + offset].vector,
7045                                  bnx2x_msix_fp_int, 0, fp->name, fp);
7046                 if (rc) {
7047                         BNX2X_ERR("request fp #%d irq failed  rc %d\n", i, rc);
7048                         bnx2x_free_msix_irqs(bp);
7049                         return -EBUSY;
7050                 }
7051
7052                 fp->state = BNX2X_FP_STATE_IRQ;
7053         }
7054
7055         i = BNX2X_NUM_QUEUES(bp);
7056         printk(KERN_INFO PFX "%s: using MSI-X  IRQs: sp %d  fp[%d] %d"
7057                " ... fp[%d] %d\n",
7058                bp->dev->name, bp->msix_table[0].vector,
7059                0, bp->msix_table[offset].vector,
7060                i - 1, bp->msix_table[offset + i - 1].vector);
7061
7062         return 0;
7063 }
7064
7065 static int bnx2x_enable_msi(struct bnx2x *bp)
7066 {
7067         int rc;
7068
7069         rc = pci_enable_msi(bp->pdev);
7070         if (rc) {
7071                 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7072                 return -1;
7073         }
7074         bp->flags |= USING_MSI_FLAG;
7075
7076         return 0;
7077 }
7078
7079 static int bnx2x_req_irq(struct bnx2x *bp)
7080 {
7081         unsigned long flags;
7082         int rc;
7083
7084         if (bp->flags & USING_MSI_FLAG)
7085                 flags = 0;
7086         else
7087                 flags = IRQF_SHARED;
7088
7089         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
7090                          bp->dev->name, bp->dev);
7091         if (!rc)
7092                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7093
7094         return rc;
7095 }
7096
7097 static void bnx2x_napi_enable(struct bnx2x *bp)
7098 {
7099         int i;
7100
7101         for_each_rx_queue(bp, i)
7102                 napi_enable(&bnx2x_fp(bp, i, napi));
7103 }
7104
7105 static void bnx2x_napi_disable(struct bnx2x *bp)
7106 {
7107         int i;
7108
7109         for_each_rx_queue(bp, i)
7110                 napi_disable(&bnx2x_fp(bp, i, napi));
7111 }
7112
7113 static void bnx2x_netif_start(struct bnx2x *bp)
7114 {
7115         int intr_sem;
7116
7117         intr_sem = atomic_dec_and_test(&bp->intr_sem);
7118         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7119
7120         if (intr_sem) {
7121                 if (netif_running(bp->dev)) {
7122                         bnx2x_napi_enable(bp);
7123                         bnx2x_int_enable(bp);
7124                         if (bp->state == BNX2X_STATE_OPEN)
7125                                 netif_tx_wake_all_queues(bp->dev);
7126                 }
7127         }
7128 }
7129
7130 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
7131 {
7132         bnx2x_int_disable_sync(bp, disable_hw);
7133         bnx2x_napi_disable(bp);
7134         netif_tx_disable(bp->dev);
7135         bp->dev->trans_start = jiffies; /* prevent tx timeout */
7136 }
7137
7138 /*
7139  * Init service functions
7140  */
7141
7142 /**
7143  * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7144  *
7145  * @param bp driver descriptor
7146  * @param set set or clear an entry (1 or 0)
7147  * @param mac pointer to a buffer containing a MAC
7148  * @param cl_bit_vec bit vector of clients to register a MAC for
7149  * @param cam_offset offset in a CAM to use
7150  * @param with_bcast set broadcast MAC as well
7151  */
7152 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7153                                       u32 cl_bit_vec, u8 cam_offset,
7154                                       u8 with_bcast)
7155 {
7156         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
7157         int port = BP_PORT(bp);
7158
7159         /* CAM allocation
7160          * unicasts 0-31:port0 32-63:port1
7161          * multicast 64-127:port0 128-191:port1
7162          */
7163         config->hdr.length = 1 + (with_bcast ? 1 : 0);
7164         config->hdr.offset = cam_offset;
7165         config->hdr.client_id = 0xff;
7166         config->hdr.reserved1 = 0;
7167
7168         /* primary MAC */
7169         config->config_table[0].cam_entry.msb_mac_addr =
7170                                         swab16(*(u16 *)&mac[0]);
7171         config->config_table[0].cam_entry.middle_mac_addr =
7172                                         swab16(*(u16 *)&mac[2]);
7173         config->config_table[0].cam_entry.lsb_mac_addr =
7174                                         swab16(*(u16 *)&mac[4]);
7175         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
7176         if (set)
7177                 config->config_table[0].target_table_entry.flags = 0;
7178         else
7179                 CAM_INVALIDATE(config->config_table[0]);
7180         config->config_table[0].target_table_entry.clients_bit_vector =
7181                                                 cpu_to_le32(cl_bit_vec);
7182         config->config_table[0].target_table_entry.vlan_id = 0;
7183
7184         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7185            (set ? "setting" : "clearing"),
7186            config->config_table[0].cam_entry.msb_mac_addr,
7187            config->config_table[0].cam_entry.middle_mac_addr,
7188            config->config_table[0].cam_entry.lsb_mac_addr);
7189
7190         /* broadcast */
7191         if (with_bcast) {
7192                 config->config_table[1].cam_entry.msb_mac_addr =
7193                         cpu_to_le16(0xffff);
7194                 config->config_table[1].cam_entry.middle_mac_addr =
7195                         cpu_to_le16(0xffff);
7196                 config->config_table[1].cam_entry.lsb_mac_addr =
7197                         cpu_to_le16(0xffff);
7198                 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7199                 if (set)
7200                         config->config_table[1].target_table_entry.flags =
7201                                         TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7202                 else
7203                         CAM_INVALIDATE(config->config_table[1]);
7204                 config->config_table[1].target_table_entry.clients_bit_vector =
7205                                                         cpu_to_le32(cl_bit_vec);
7206                 config->config_table[1].target_table_entry.vlan_id = 0;
7207         }
7208
7209         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7210                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7211                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7212 }
7213
7214 /**
7215  * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7216  *
7217  * @param bp driver descriptor
7218  * @param set set or clear an entry (1 or 0)
7219  * @param mac pointer to a buffer containing a MAC
7220  * @param cl_bit_vec bit vector of clients to register a MAC for
7221  * @param cam_offset offset in a CAM to use
7222  */
7223 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7224                                        u32 cl_bit_vec, u8 cam_offset)
7225 {
7226         struct mac_configuration_cmd_e1h *config =
7227                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7228
7229         config->hdr.length = 1;
7230         config->hdr.offset = cam_offset;
7231         config->hdr.client_id = 0xff;
7232         config->hdr.reserved1 = 0;
7233
7234         /* primary MAC */
7235         config->config_table[0].msb_mac_addr =
7236                                         swab16(*(u16 *)&mac[0]);
7237         config->config_table[0].middle_mac_addr =
7238                                         swab16(*(u16 *)&mac[2]);
7239         config->config_table[0].lsb_mac_addr =
7240                                         swab16(*(u16 *)&mac[4]);
7241         config->config_table[0].clients_bit_vector =
7242                                         cpu_to_le32(cl_bit_vec);
7243         config->config_table[0].vlan_id = 0;
7244         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
7245         if (set)
7246                 config->config_table[0].flags = BP_PORT(bp);
7247         else
7248                 config->config_table[0].flags =
7249                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
7250
7251         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID mask %d\n",
7252            (set ? "setting" : "clearing"),
7253            config->config_table[0].msb_mac_addr,
7254            config->config_table[0].middle_mac_addr,
7255            config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
7256
7257         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7258                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7259                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7260 }
7261
7262 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7263                              int *state_p, int poll)
7264 {
7265         /* can take a while if any port is running */
7266         int cnt = 5000;
7267
7268         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7269            poll ? "polling" : "waiting", state, idx);
7270
7271         might_sleep();
7272         while (cnt--) {
7273                 if (poll) {
7274                         bnx2x_rx_int(bp->fp, 10);
7275                         /* if index is different from 0
7276                          * the reply for some commands will
7277                          * be on the non default queue
7278                          */
7279                         if (idx)
7280                                 bnx2x_rx_int(&bp->fp[idx], 10);
7281                 }
7282
7283                 mb(); /* state is changed by bnx2x_sp_event() */
7284                 if (*state_p == state) {
7285 #ifdef BNX2X_STOP_ON_ERROR
7286                         DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
7287 #endif
7288                         return 0;
7289                 }
7290
7291                 msleep(1);
7292
7293                 if (bp->panic)
7294                         return -EIO;
7295         }
7296
7297         /* timeout! */
7298         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7299                   poll ? "polling" : "waiting", state, idx);
7300 #ifdef BNX2X_STOP_ON_ERROR
7301         bnx2x_panic();
7302 #endif
7303
7304         return -EBUSY;
7305 }
7306
7307 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7308 {
7309         bp->set_mac_pending++;
7310         smp_wmb();
7311
7312         bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7313                                    (1 << bp->fp->cl_id), BP_FUNC(bp));
7314
7315         /* Wait for a completion */
7316         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7317 }
7318
7319 static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7320 {
7321         bp->set_mac_pending++;
7322         smp_wmb();
7323
7324         bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7325                                   (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7326                                   1);
7327
7328         /* Wait for a completion */
7329         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7330 }
7331
7332 #ifdef BCM_CNIC
7333 /**
7334  * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7335  * MAC(s). This function will wait until the ramdord completion
7336  * returns.
7337  *
7338  * @param bp driver handle
7339  * @param set set or clear the CAM entry
7340  *
7341  * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7342  */
7343 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7344 {
7345         u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7346
7347         bp->set_mac_pending++;
7348         smp_wmb();
7349
7350         /* Send a SET_MAC ramrod */
7351         if (CHIP_IS_E1(bp))
7352                 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7353                                   cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7354                                   1);
7355         else
7356                 /* CAM allocation for E1H
7357                 * unicasts: by func number
7358                 * multicast: 20+FUNC*20, 20 each
7359                 */
7360                 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7361                                    cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7362
7363         /* Wait for a completion when setting */
7364         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7365
7366         return 0;
7367 }
7368 #endif
7369
7370 static int bnx2x_setup_leading(struct bnx2x *bp)
7371 {
7372         int rc;
7373
7374         /* reset IGU state */
7375         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7376
7377         /* SETUP ramrod */
7378         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7379
7380         /* Wait for completion */
7381         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7382
7383         return rc;
7384 }
7385
7386 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7387 {
7388         struct bnx2x_fastpath *fp = &bp->fp[index];
7389
7390         /* reset IGU state */
7391         bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7392
7393         /* SETUP ramrod */
7394         fp->state = BNX2X_FP_STATE_OPENING;
7395         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7396                       fp->cl_id, 0);
7397
7398         /* Wait for completion */
7399         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7400                                  &(fp->state), 0);
7401 }
7402
7403 static int bnx2x_poll(struct napi_struct *napi, int budget);
7404
7405 static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out,
7406                                     int *num_tx_queues_out)
7407 {
7408         int _num_rx_queues = 0, _num_tx_queues = 0;
7409
7410         switch (bp->multi_mode) {
7411         case ETH_RSS_MODE_DISABLED:
7412                 _num_rx_queues = 1;
7413                 _num_tx_queues = 1;
7414                 break;
7415
7416         case ETH_RSS_MODE_REGULAR:
7417                 if (num_rx_queues)
7418                         _num_rx_queues = min_t(u32, num_rx_queues,
7419                                                BNX2X_MAX_QUEUES(bp));
7420                 else
7421                         _num_rx_queues = min_t(u32, num_online_cpus(),
7422                                                BNX2X_MAX_QUEUES(bp));
7423
7424                 if (num_tx_queues)
7425                         _num_tx_queues = min_t(u32, num_tx_queues,
7426                                                BNX2X_MAX_QUEUES(bp));
7427                 else
7428                         _num_tx_queues = min_t(u32, num_online_cpus(),
7429                                                BNX2X_MAX_QUEUES(bp));
7430
7431                 /* There must be not more Tx queues than Rx queues */
7432                 if (_num_tx_queues > _num_rx_queues) {
7433                         BNX2X_ERR("number of tx queues (%d) > "
7434                                   "number of rx queues (%d)"
7435                                   "  defaulting to %d\n",
7436                                   _num_tx_queues, _num_rx_queues,
7437                                   _num_rx_queues);
7438                         _num_tx_queues = _num_rx_queues;
7439                 }
7440                 break;
7441
7442
7443         default:
7444                 _num_rx_queues = 1;
7445                 _num_tx_queues = 1;
7446                 break;
7447         }
7448
7449         *num_rx_queues_out = _num_rx_queues;
7450         *num_tx_queues_out = _num_tx_queues;
7451 }
7452
7453 static int bnx2x_set_int_mode(struct bnx2x *bp)
7454 {
7455         int rc = 0;
7456
7457         switch (int_mode) {
7458         case INT_MODE_INTx:
7459         case INT_MODE_MSI:
7460                 bp->num_rx_queues = 1;
7461                 bp->num_tx_queues = 1;
7462                 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7463                 break;
7464
7465         case INT_MODE_MSIX:
7466         default:
7467                 /* Set interrupt mode according to bp->multi_mode value */
7468                 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues,
7469                                         &bp->num_tx_queues);
7470
7471                 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n",
7472                    bp->num_rx_queues, bp->num_tx_queues);
7473
7474                 /* if we can't use MSI-X we only need one fp,
7475                  * so try to enable MSI-X with the requested number of fp's
7476                  * and fallback to MSI or legacy INTx with one fp
7477                  */
7478                 rc = bnx2x_enable_msix(bp);
7479                 if (rc) {
7480                         /* failed to enable MSI-X */
7481                         if (bp->multi_mode)
7482                                 BNX2X_ERR("Multi requested but failed to "
7483                                           "enable MSI-X (rx %d tx %d), "
7484                                           "set number of queues to 1\n",
7485                                           bp->num_rx_queues, bp->num_tx_queues);
7486                         bp->num_rx_queues = 1;
7487                         bp->num_tx_queues = 1;
7488                 }
7489                 break;
7490         }
7491         bp->dev->real_num_tx_queues = bp->num_tx_queues;
7492         return rc;
7493 }
7494
7495 #ifdef BCM_CNIC
7496 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7497 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7498 #endif
7499
7500 /* must be called with rtnl_lock */
7501 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7502 {
7503         u32 load_code;
7504         int i, rc;
7505
7506 #ifdef BNX2X_STOP_ON_ERROR
7507         if (unlikely(bp->panic))
7508                 return -EPERM;
7509 #endif
7510
7511         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7512
7513         rc = bnx2x_set_int_mode(bp);
7514
7515         if (bnx2x_alloc_mem(bp))
7516                 return -ENOMEM;
7517
7518         for_each_rx_queue(bp, i)
7519                 bnx2x_fp(bp, i, disable_tpa) =
7520                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
7521
7522         for_each_rx_queue(bp, i)
7523                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7524                                bnx2x_poll, 128);
7525
7526         bnx2x_napi_enable(bp);
7527
7528         if (bp->flags & USING_MSIX_FLAG) {
7529                 rc = bnx2x_req_msix_irqs(bp);
7530                 if (rc) {
7531                         pci_disable_msix(bp->pdev);
7532                         goto load_error1;
7533                 }
7534         } else {
7535                 /* Fall to INTx if failed to enable MSI-X due to lack of
7536                    memory (in bnx2x_set_int_mode()) */
7537                 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7538                         bnx2x_enable_msi(bp);
7539                 bnx2x_ack_int(bp);
7540                 rc = bnx2x_req_irq(bp);
7541                 if (rc) {
7542                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
7543                         if (bp->flags & USING_MSI_FLAG)
7544                                 pci_disable_msi(bp->pdev);
7545                         goto load_error1;
7546                 }
7547                 if (bp->flags & USING_MSI_FLAG) {
7548                         bp->dev->irq = bp->pdev->irq;
7549                         printk(KERN_INFO PFX "%s: using MSI  IRQ %d\n",
7550                                bp->dev->name, bp->pdev->irq);
7551                 }
7552         }
7553
7554         /* Send LOAD_REQUEST command to MCP
7555            Returns the type of LOAD command:
7556            if it is the first port to be initialized
7557            common blocks should be initialized, otherwise - not
7558         */
7559         if (!BP_NOMCP(bp)) {
7560                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7561                 if (!load_code) {
7562                         BNX2X_ERR("MCP response failure, aborting\n");
7563                         rc = -EBUSY;
7564                         goto load_error2;
7565                 }
7566                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7567                         rc = -EBUSY; /* other port in diagnostic mode */
7568                         goto load_error2;
7569                 }
7570
7571         } else {
7572                 int port = BP_PORT(bp);
7573
7574                 DP(NETIF_MSG_IFUP, "NO MCP - load counts      %d, %d, %d\n",
7575                    load_count[0], load_count[1], load_count[2]);
7576                 load_count[0]++;
7577                 load_count[1 + port]++;
7578                 DP(NETIF_MSG_IFUP, "NO MCP - new load counts  %d, %d, %d\n",
7579                    load_count[0], load_count[1], load_count[2]);
7580                 if (load_count[0] == 1)
7581                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7582                 else if (load_count[1 + port] == 1)
7583                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7584                 else
7585                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7586         }
7587
7588         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7589             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7590                 bp->port.pmf = 1;
7591         else
7592                 bp->port.pmf = 0;
7593         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7594
7595         /* Initialize HW */
7596         rc = bnx2x_init_hw(bp, load_code);
7597         if (rc) {
7598                 BNX2X_ERR("HW init failed, aborting\n");
7599                 goto load_error2;
7600         }
7601
7602         /* Setup NIC internals and enable interrupts */
7603         bnx2x_nic_init(bp, load_code);
7604
7605         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7606             (bp->common.shmem2_base))
7607                 SHMEM2_WR(bp, dcc_support,
7608                           (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7609                            SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7610
7611         /* Send LOAD_DONE command to MCP */
7612         if (!BP_NOMCP(bp)) {
7613                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7614                 if (!load_code) {
7615                         BNX2X_ERR("MCP response failure, aborting\n");
7616                         rc = -EBUSY;
7617                         goto load_error3;
7618                 }
7619         }
7620
7621         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7622
7623         rc = bnx2x_setup_leading(bp);
7624         if (rc) {
7625                 BNX2X_ERR("Setup leading failed!\n");
7626 #ifndef BNX2X_STOP_ON_ERROR
7627                 goto load_error3;
7628 #else
7629                 bp->panic = 1;
7630                 return -EBUSY;
7631 #endif
7632         }
7633
7634         if (CHIP_IS_E1H(bp))
7635                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
7636                         DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
7637                         bp->flags |= MF_FUNC_DIS;
7638                 }
7639
7640         if (bp->state == BNX2X_STATE_OPEN) {
7641 #ifdef BCM_CNIC
7642                 /* Enable Timer scan */
7643                 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
7644 #endif
7645                 for_each_nondefault_queue(bp, i) {
7646                         rc = bnx2x_setup_multi(bp, i);
7647                         if (rc)
7648 #ifdef BCM_CNIC
7649                                 goto load_error4;
7650 #else
7651                                 goto load_error3;
7652 #endif
7653                 }
7654
7655                 if (CHIP_IS_E1(bp))
7656                         bnx2x_set_eth_mac_addr_e1(bp, 1);
7657                 else
7658                         bnx2x_set_eth_mac_addr_e1h(bp, 1);
7659 #ifdef BCM_CNIC
7660                 /* Set iSCSI L2 MAC */
7661                 mutex_lock(&bp->cnic_mutex);
7662                 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
7663                         bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7664                         bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7665                 }
7666                 mutex_unlock(&bp->cnic_mutex);
7667 #endif
7668         }
7669
7670         if (bp->port.pmf)
7671                 bnx2x_initial_phy_init(bp, load_mode);
7672
7673         /* Start fast path */
7674         switch (load_mode) {
7675         case LOAD_NORMAL:
7676                 if (bp->state == BNX2X_STATE_OPEN) {
7677                         /* Tx queue should be only reenabled */
7678                         netif_tx_wake_all_queues(bp->dev);
7679                 }
7680                 /* Initialize the receive filter. */
7681                 bnx2x_set_rx_mode(bp->dev);
7682                 break;
7683
7684         case LOAD_OPEN:
7685                 netif_tx_start_all_queues(bp->dev);
7686                 if (bp->state != BNX2X_STATE_OPEN)
7687                         netif_tx_disable(bp->dev);
7688                 /* Initialize the receive filter. */
7689                 bnx2x_set_rx_mode(bp->dev);
7690                 break;
7691
7692         case LOAD_DIAG:
7693                 /* Initialize the receive filter. */
7694                 bnx2x_set_rx_mode(bp->dev);
7695                 bp->state = BNX2X_STATE_DIAG;
7696                 break;
7697
7698         default:
7699                 break;
7700         }
7701
7702         if (!bp->port.pmf)
7703                 bnx2x__link_status_update(bp);
7704
7705         /* start the timer */
7706         mod_timer(&bp->timer, jiffies + bp->current_interval);
7707
7708 #ifdef BCM_CNIC
7709         bnx2x_setup_cnic_irq_info(bp);
7710         if (bp->state == BNX2X_STATE_OPEN)
7711                 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
7712 #endif
7713
7714         return 0;
7715
7716 #ifdef BCM_CNIC
7717 load_error4:
7718         /* Disable Timer scan */
7719         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
7720 #endif
7721 load_error3:
7722         bnx2x_int_disable_sync(bp, 1);
7723         if (!BP_NOMCP(bp)) {
7724                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7725                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7726         }
7727         bp->port.pmf = 0;
7728         /* Free SKBs, SGEs, TPA pool and driver internals */
7729         bnx2x_free_skbs(bp);
7730         for_each_rx_queue(bp, i)
7731                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7732 load_error2:
7733         /* Release IRQs */
7734         bnx2x_free_irq(bp);
7735 load_error1:
7736         bnx2x_napi_disable(bp);
7737         for_each_rx_queue(bp, i)
7738                 netif_napi_del(&bnx2x_fp(bp, i, napi));
7739         bnx2x_free_mem(bp);
7740
7741         return rc;
7742 }
7743
7744 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7745 {
7746         struct bnx2x_fastpath *fp = &bp->fp[index];
7747         int rc;
7748
7749         /* halt the connection */
7750         fp->state = BNX2X_FP_STATE_HALTING;
7751         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7752
7753         /* Wait for completion */
7754         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7755                                &(fp->state), 1);
7756         if (rc) /* timeout */
7757                 return rc;
7758
7759         /* delete cfc entry */
7760         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7761
7762         /* Wait for completion */
7763         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7764                                &(fp->state), 1);
7765         return rc;
7766 }
7767
7768 static int bnx2x_stop_leading(struct bnx2x *bp)
7769 {
7770         __le16 dsb_sp_prod_idx;
7771         /* if the other port is handling traffic,
7772            this can take a lot of time */
7773         int cnt = 500;
7774         int rc;
7775
7776         might_sleep();
7777
7778         /* Send HALT ramrod */
7779         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7780         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7781
7782         /* Wait for completion */
7783         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7784                                &(bp->fp[0].state), 1);
7785         if (rc) /* timeout */
7786                 return rc;
7787
7788         dsb_sp_prod_idx = *bp->dsb_sp_prod;
7789
7790         /* Send PORT_DELETE ramrod */
7791         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7792
7793         /* Wait for completion to arrive on default status block
7794            we are going to reset the chip anyway
7795            so there is not much to do if this times out
7796          */
7797         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7798                 if (!cnt) {
7799                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7800                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7801                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
7802 #ifdef BNX2X_STOP_ON_ERROR
7803                         bnx2x_panic();
7804 #endif
7805                         rc = -EBUSY;
7806                         break;
7807                 }
7808                 cnt--;
7809                 msleep(1);
7810                 rmb(); /* Refresh the dsb_sp_prod */
7811         }
7812         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7813         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7814
7815         return rc;
7816 }
7817
7818 static void bnx2x_reset_func(struct bnx2x *bp)
7819 {
7820         int port = BP_PORT(bp);
7821         int func = BP_FUNC(bp);
7822         int base, i;
7823
7824         /* Configure IGU */
7825         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7826         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7827
7828 #ifdef BCM_CNIC
7829         /* Disable Timer scan */
7830         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
7831         /*
7832          * Wait for at least 10ms and up to 2 second for the timers scan to
7833          * complete
7834          */
7835         for (i = 0; i < 200; i++) {
7836                 msleep(10);
7837                 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7838                         break;
7839         }
7840 #endif
7841         /* Clear ILT */
7842         base = FUNC_ILT_BASE(func);
7843         for (i = base; i < base + ILT_PER_FUNC; i++)
7844                 bnx2x_ilt_wr(bp, i, 0);
7845 }
7846
7847 static void bnx2x_reset_port(struct bnx2x *bp)
7848 {
7849         int port = BP_PORT(bp);
7850         u32 val;
7851
7852         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7853
7854         /* Do not rcv packets to BRB */
7855         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7856         /* Do not direct rcv packets that are not for MCP to the BRB */
7857         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7858                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7859
7860         /* Configure AEU */
7861         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7862
7863         msleep(100);
7864         /* Check for BRB port occupancy */
7865         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7866         if (val)
7867                 DP(NETIF_MSG_IFDOWN,
7868                    "BRB1 is not empty  %d blocks are occupied\n", val);
7869
7870         /* TODO: Close Doorbell port? */
7871 }
7872
7873 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7874 {
7875         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
7876            BP_FUNC(bp), reset_code);
7877
7878         switch (reset_code) {
7879         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7880                 bnx2x_reset_port(bp);
7881                 bnx2x_reset_func(bp);
7882                 bnx2x_reset_common(bp);
7883                 break;
7884
7885         case FW_MSG_CODE_DRV_UNLOAD_PORT:
7886                 bnx2x_reset_port(bp);
7887                 bnx2x_reset_func(bp);
7888                 break;
7889
7890         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7891                 bnx2x_reset_func(bp);
7892                 break;
7893
7894         default:
7895                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7896                 break;
7897         }
7898 }
7899
7900 /* must be called with rtnl_lock */
7901 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7902 {
7903         int port = BP_PORT(bp);
7904         u32 reset_code = 0;
7905         int i, cnt, rc;
7906
7907 #ifdef BCM_CNIC
7908         bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
7909 #endif
7910         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7911
7912         /* Set "drop all" */
7913         bp->rx_mode = BNX2X_RX_MODE_NONE;
7914         bnx2x_set_storm_rx_mode(bp);
7915
7916         /* Disable HW interrupts, NAPI and Tx */
7917         bnx2x_netif_stop(bp, 1);
7918
7919         del_timer_sync(&bp->timer);
7920         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7921                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7922         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7923
7924         /* Release IRQs */
7925         bnx2x_free_irq(bp);
7926
7927         /* Wait until tx fastpath tasks complete */
7928         for_each_tx_queue(bp, i) {
7929                 struct bnx2x_fastpath *fp = &bp->fp[i];
7930
7931                 cnt = 1000;
7932                 while (bnx2x_has_tx_work_unload(fp)) {
7933
7934                         bnx2x_tx_int(fp);
7935                         if (!cnt) {
7936                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
7937                                           i);
7938 #ifdef BNX2X_STOP_ON_ERROR
7939                                 bnx2x_panic();
7940                                 return -EBUSY;
7941 #else
7942                                 break;
7943 #endif
7944                         }
7945                         cnt--;
7946                         msleep(1);
7947                 }
7948         }
7949         /* Give HW time to discard old tx messages */
7950         msleep(1);
7951
7952         if (CHIP_IS_E1(bp)) {
7953                 struct mac_configuration_cmd *config =
7954                                                 bnx2x_sp(bp, mcast_config);
7955
7956                 bnx2x_set_eth_mac_addr_e1(bp, 0);
7957
7958                 for (i = 0; i < config->hdr.length; i++)
7959                         CAM_INVALIDATE(config->config_table[i]);
7960
7961                 config->hdr.length = i;
7962                 if (CHIP_REV_IS_SLOW(bp))
7963                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7964                 else
7965                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7966                 config->hdr.client_id = bp->fp->cl_id;
7967                 config->hdr.reserved1 = 0;
7968
7969                 bp->set_mac_pending++;
7970                 smp_wmb();
7971
7972                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7973                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7974                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7975
7976         } else { /* E1H */
7977                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7978
7979                 bnx2x_set_eth_mac_addr_e1h(bp, 0);
7980
7981                 for (i = 0; i < MC_HASH_SIZE; i++)
7982                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7983
7984                 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7985         }
7986 #ifdef BCM_CNIC
7987         /* Clear iSCSI L2 MAC */
7988         mutex_lock(&bp->cnic_mutex);
7989         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7990                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7991                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7992         }
7993         mutex_unlock(&bp->cnic_mutex);
7994 #endif
7995
7996         if (unload_mode == UNLOAD_NORMAL)
7997                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7998
7999         else if (bp->flags & NO_WOL_FLAG)
8000                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
8001
8002         else if (bp->wol) {
8003                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
8004                 u8 *mac_addr = bp->dev->dev_addr;
8005                 u32 val;
8006                 /* The mac address is written to entries 1-4 to
8007                    preserve entry 0 which is used by the PMF */
8008                 u8 entry = (BP_E1HVN(bp) + 1)*8;
8009
8010                 val = (mac_addr[0] << 8) | mac_addr[1];
8011                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
8012
8013                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8014                       (mac_addr[4] << 8) | mac_addr[5];
8015                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
8016
8017                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
8018
8019         } else
8020                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8021
8022         /* Close multi and leading connections
8023            Completions for ramrods are collected in a synchronous way */
8024         for_each_nondefault_queue(bp, i)
8025                 if (bnx2x_stop_multi(bp, i))
8026                         goto unload_error;
8027
8028         rc = bnx2x_stop_leading(bp);
8029         if (rc) {
8030                 BNX2X_ERR("Stop leading failed!\n");
8031 #ifdef BNX2X_STOP_ON_ERROR
8032                 return -EBUSY;
8033 #else
8034                 goto unload_error;
8035 #endif
8036         }
8037
8038 unload_error:
8039         if (!BP_NOMCP(bp))
8040                 reset_code = bnx2x_fw_command(bp, reset_code);
8041         else {
8042                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts      %d, %d, %d\n",
8043                    load_count[0], load_count[1], load_count[2]);
8044                 load_count[0]--;
8045                 load_count[1 + port]--;
8046                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts  %d, %d, %d\n",
8047                    load_count[0], load_count[1], load_count[2]);
8048                 if (load_count[0] == 0)
8049                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
8050                 else if (load_count[1 + port] == 0)
8051                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8052                 else
8053                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8054         }
8055
8056         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
8057             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
8058                 bnx2x__link_reset(bp);
8059
8060         /* Reset the chip */
8061         bnx2x_reset_chip(bp, reset_code);
8062
8063         /* Report UNLOAD_DONE to MCP */
8064         if (!BP_NOMCP(bp))
8065                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8066
8067         bp->port.pmf = 0;
8068
8069         /* Free SKBs, SGEs, TPA pool and driver internals */
8070         bnx2x_free_skbs(bp);
8071         for_each_rx_queue(bp, i)
8072                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8073         for_each_rx_queue(bp, i)
8074                 netif_napi_del(&bnx2x_fp(bp, i, napi));
8075         bnx2x_free_mem(bp);
8076
8077         bp->state = BNX2X_STATE_CLOSED;
8078
8079         netif_carrier_off(bp->dev);
8080
8081         return 0;
8082 }
8083
8084 static void bnx2x_reset_task(struct work_struct *work)
8085 {
8086         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
8087
8088 #ifdef BNX2X_STOP_ON_ERROR
8089         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8090                   " so reset not done to allow debug dump,\n"
8091                   " you will need to reboot when done\n");
8092         return;
8093 #endif
8094
8095         rtnl_lock();
8096
8097         if (!netif_running(bp->dev))
8098                 goto reset_task_exit;
8099
8100         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8101         bnx2x_nic_load(bp, LOAD_NORMAL);
8102
8103 reset_task_exit:
8104         rtnl_unlock();
8105 }
8106
8107 /* end of nic load/unload */
8108
8109 /* ethtool_ops */
8110
8111 /*
8112  * Init service functions
8113  */
8114
8115 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8116 {
8117         switch (func) {
8118         case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8119         case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8120         case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8121         case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
8122         case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
8123         case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
8124         case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
8125         case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
8126         default:
8127                 BNX2X_ERR("Unsupported function index: %d\n", func);
8128                 return (u32)(-1);
8129         }
8130 }
8131
8132 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
8133 {
8134         u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
8135
8136         /* Flush all outstanding writes */
8137         mmiowb();
8138
8139         /* Pretend to be function 0 */
8140         REG_WR(bp, reg, 0);
8141         /* Flush the GRC transaction (in the chip) */
8142         new_val = REG_RD(bp, reg);
8143         if (new_val != 0) {
8144                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
8145                           new_val);
8146                 BUG();
8147         }
8148
8149         /* From now we are in the "like-E1" mode */
8150         bnx2x_int_disable(bp);
8151
8152         /* Flush all outstanding writes */
8153         mmiowb();
8154
8155         /* Restore the original funtion settings */
8156         REG_WR(bp, reg, orig_func);
8157         new_val = REG_RD(bp, reg);
8158         if (new_val != orig_func) {
8159                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
8160                           orig_func, new_val);
8161                 BUG();
8162         }
8163 }
8164
8165 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
8166 {
8167         if (CHIP_IS_E1H(bp))
8168                 bnx2x_undi_int_disable_e1h(bp, func);
8169         else
8170                 bnx2x_int_disable(bp);
8171 }
8172
8173 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8174 {
8175         u32 val;
8176
8177         /* Check if there is any driver already loaded */
8178         val = REG_RD(bp, MISC_REG_UNPREPARED);
8179         if (val == 0x1) {
8180                 /* Check if it is the UNDI driver
8181                  * UNDI driver initializes CID offset for normal bell to 0x7
8182                  */
8183                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8184                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8185                 if (val == 0x7) {
8186                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8187                         /* save our func */
8188                         int func = BP_FUNC(bp);
8189                         u32 swap_en;
8190                         u32 swap_val;
8191
8192                         /* clear the UNDI indication */
8193                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
8194
8195                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
8196
8197                         /* try unload UNDI on port 0 */
8198                         bp->func = 0;
8199                         bp->fw_seq =
8200                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8201                                 DRV_MSG_SEQ_NUMBER_MASK);
8202                         reset_code = bnx2x_fw_command(bp, reset_code);
8203
8204                         /* if UNDI is loaded on the other port */
8205                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
8206
8207                                 /* send "DONE" for previous unload */
8208                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8209
8210                                 /* unload UNDI on port 1 */
8211                                 bp->func = 1;
8212                                 bp->fw_seq =
8213                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8214                                         DRV_MSG_SEQ_NUMBER_MASK);
8215                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8216
8217                                 bnx2x_fw_command(bp, reset_code);
8218                         }
8219
8220                         /* now it's safe to release the lock */
8221                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8222
8223                         bnx2x_undi_int_disable(bp, func);
8224
8225                         /* close input traffic and wait for it */
8226                         /* Do not rcv packets to BRB */
8227                         REG_WR(bp,
8228                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
8229                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
8230                         /* Do not direct rcv packets that are not for MCP to
8231                          * the BRB */
8232                         REG_WR(bp,
8233                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
8234                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8235                         /* clear AEU */
8236                         REG_WR(bp,
8237                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8238                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
8239                         msleep(10);
8240
8241                         /* save NIG port swap info */
8242                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
8243                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
8244                         /* reset device */
8245                         REG_WR(bp,
8246                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8247                                0xd3ffffff);
8248                         REG_WR(bp,
8249                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8250                                0x1403);
8251                         /* take the NIG out of reset and restore swap values */
8252                         REG_WR(bp,
8253                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
8254                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
8255                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
8256                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
8257
8258                         /* send unload done to the MCP */
8259                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8260
8261                         /* restore our func and fw_seq */
8262                         bp->func = func;
8263                         bp->fw_seq =
8264                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8265                                 DRV_MSG_SEQ_NUMBER_MASK);
8266
8267                 } else
8268                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8269         }
8270 }
8271
8272 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8273 {
8274         u32 val, val2, val3, val4, id;
8275         u16 pmc;
8276
8277         /* Get the chip revision id and number. */
8278         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
8279         val = REG_RD(bp, MISC_REG_CHIP_NUM);
8280         id = ((val & 0xffff) << 16);
8281         val = REG_RD(bp, MISC_REG_CHIP_REV);
8282         id |= ((val & 0xf) << 12);
8283         val = REG_RD(bp, MISC_REG_CHIP_METAL);
8284         id |= ((val & 0xff) << 4);
8285         val = REG_RD(bp, MISC_REG_BOND_ID);
8286         id |= (val & 0xf);
8287         bp->common.chip_id = id;
8288         bp->link_params.chip_id = bp->common.chip_id;
8289         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
8290
8291         val = (REG_RD(bp, 0x2874) & 0x55);
8292         if ((bp->common.chip_id & 0x1) ||
8293             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
8294                 bp->flags |= ONE_PORT_FLAG;
8295                 BNX2X_DEV_INFO("single port device\n");
8296         }
8297
8298         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8299         bp->common.flash_size = (NVRAM_1MB_SIZE <<
8300                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
8301         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8302                        bp->common.flash_size, bp->common.flash_size);
8303
8304         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8305         bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
8306         bp->link_params.shmem_base = bp->common.shmem_base;
8307         BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
8308                        bp->common.shmem_base, bp->common.shmem2_base);
8309
8310         if (!bp->common.shmem_base ||
8311             (bp->common.shmem_base < 0xA0000) ||
8312             (bp->common.shmem_base >= 0xC0000)) {
8313                 BNX2X_DEV_INFO("MCP not active\n");
8314                 bp->flags |= NO_MCP_FLAG;
8315                 return;
8316         }
8317
8318         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8319         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8320                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8321                 BNX2X_ERR("BAD MCP validity signature\n");
8322
8323         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
8324         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
8325
8326         bp->link_params.hw_led_mode = ((bp->common.hw_config &
8327                                         SHARED_HW_CFG_LED_MODE_MASK) >>
8328                                        SHARED_HW_CFG_LED_MODE_SHIFT);
8329
8330         bp->link_params.feature_config_flags = 0;
8331         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8332         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8333                 bp->link_params.feature_config_flags |=
8334                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8335         else
8336                 bp->link_params.feature_config_flags &=
8337                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8338
8339         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8340         bp->common.bc_ver = val;
8341         BNX2X_DEV_INFO("bc_ver %X\n", val);
8342         if (val < BNX2X_BC_VER) {
8343                 /* for now only warn
8344                  * later we might need to enforce this */
8345                 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8346                           " please upgrade BC\n", BNX2X_BC_VER, val);
8347         }
8348         bp->link_params.feature_config_flags |=
8349                 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8350                 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
8351
8352         if (BP_E1HVN(bp) == 0) {
8353                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8354                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8355         } else {
8356                 /* no WOL capability for E1HVN != 0 */
8357                 bp->flags |= NO_WOL_FLAG;
8358         }
8359         BNX2X_DEV_INFO("%sWoL capable\n",
8360                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
8361
8362         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8363         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8364         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8365         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8366
8367         printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8368                val, val2, val3, val4);
8369 }
8370
8371 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8372                                                     u32 switch_cfg)
8373 {
8374         int port = BP_PORT(bp);
8375         u32 ext_phy_type;
8376
8377         switch (switch_cfg) {
8378         case SWITCH_CFG_1G:
8379                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8380
8381                 ext_phy_type =
8382                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8383                 switch (ext_phy_type) {
8384                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8385                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8386                                        ext_phy_type);
8387
8388                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8389                                                SUPPORTED_10baseT_Full |
8390                                                SUPPORTED_100baseT_Half |
8391                                                SUPPORTED_100baseT_Full |
8392                                                SUPPORTED_1000baseT_Full |
8393                                                SUPPORTED_2500baseX_Full |
8394                                                SUPPORTED_TP |
8395                                                SUPPORTED_FIBRE |
8396                                                SUPPORTED_Autoneg |
8397                                                SUPPORTED_Pause |
8398                                                SUPPORTED_Asym_Pause);
8399                         break;
8400
8401                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8402                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8403                                        ext_phy_type);
8404
8405                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8406                                                SUPPORTED_10baseT_Full |
8407                                                SUPPORTED_100baseT_Half |
8408                                                SUPPORTED_100baseT_Full |
8409                                                SUPPORTED_1000baseT_Full |
8410                                                SUPPORTED_TP |
8411                                                SUPPORTED_FIBRE |
8412                                                SUPPORTED_Autoneg |
8413                                                SUPPORTED_Pause |
8414                                                SUPPORTED_Asym_Pause);
8415                         break;
8416
8417                 default:
8418                         BNX2X_ERR("NVRAM config error. "
8419                                   "BAD SerDes ext_phy_config 0x%x\n",
8420                                   bp->link_params.ext_phy_config);
8421                         return;
8422                 }
8423
8424                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8425                                            port*0x10);
8426                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8427                 break;
8428
8429         case SWITCH_CFG_10G:
8430                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8431
8432                 ext_phy_type =
8433                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8434                 switch (ext_phy_type) {
8435                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8436                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8437                                        ext_phy_type);
8438
8439                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8440                                                SUPPORTED_10baseT_Full |
8441                                                SUPPORTED_100baseT_Half |
8442                                                SUPPORTED_100baseT_Full |
8443                                                SUPPORTED_1000baseT_Full |
8444                                                SUPPORTED_2500baseX_Full |
8445                                                SUPPORTED_10000baseT_Full |
8446                                                SUPPORTED_TP |
8447                                                SUPPORTED_FIBRE |
8448                                                SUPPORTED_Autoneg |
8449                                                SUPPORTED_Pause |
8450                                                SUPPORTED_Asym_Pause);
8451                         break;
8452
8453                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8454                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
8455                                        ext_phy_type);
8456
8457                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8458                                                SUPPORTED_1000baseT_Full |
8459                                                SUPPORTED_FIBRE |
8460                                                SUPPORTED_Autoneg |
8461                                                SUPPORTED_Pause |
8462                                                SUPPORTED_Asym_Pause);
8463                         break;
8464
8465                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8466                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
8467                                        ext_phy_type);
8468
8469                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8470                                                SUPPORTED_2500baseX_Full |
8471                                                SUPPORTED_1000baseT_Full |
8472                                                SUPPORTED_FIBRE |
8473                                                SUPPORTED_Autoneg |
8474                                                SUPPORTED_Pause |
8475                                                SUPPORTED_Asym_Pause);
8476                         break;
8477
8478                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8479                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8480                                        ext_phy_type);
8481
8482                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8483                                                SUPPORTED_FIBRE |
8484                                                SUPPORTED_Pause |
8485                                                SUPPORTED_Asym_Pause);
8486                         break;
8487
8488                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8489                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
8490                                        ext_phy_type);
8491
8492                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8493                                                SUPPORTED_1000baseT_Full |
8494                                                SUPPORTED_FIBRE |
8495                                                SUPPORTED_Pause |
8496                                                SUPPORTED_Asym_Pause);
8497                         break;
8498
8499                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8500                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
8501                                        ext_phy_type);
8502
8503                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8504                                                SUPPORTED_1000baseT_Full |
8505                                                SUPPORTED_Autoneg |
8506                                                SUPPORTED_FIBRE |
8507                                                SUPPORTED_Pause |
8508                                                SUPPORTED_Asym_Pause);
8509                         break;
8510
8511                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8512                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8513                                        ext_phy_type);
8514
8515                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8516                                                SUPPORTED_1000baseT_Full |
8517                                                SUPPORTED_Autoneg |
8518                                                SUPPORTED_FIBRE |
8519                                                SUPPORTED_Pause |
8520                                                SUPPORTED_Asym_Pause);
8521                         break;
8522
8523                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8524                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8525                                        ext_phy_type);
8526
8527                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8528                                                SUPPORTED_TP |
8529                                                SUPPORTED_Autoneg |
8530                                                SUPPORTED_Pause |
8531                                                SUPPORTED_Asym_Pause);
8532                         break;
8533
8534                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8535                         BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8536                                        ext_phy_type);
8537
8538                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8539                                                SUPPORTED_10baseT_Full |
8540                                                SUPPORTED_100baseT_Half |
8541                                                SUPPORTED_100baseT_Full |
8542                                                SUPPORTED_1000baseT_Full |
8543                                                SUPPORTED_10000baseT_Full |
8544                                                SUPPORTED_TP |
8545                                                SUPPORTED_Autoneg |
8546                                                SUPPORTED_Pause |
8547                                                SUPPORTED_Asym_Pause);
8548                         break;
8549
8550                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8551                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8552                                   bp->link_params.ext_phy_config);
8553                         break;
8554
8555                 default:
8556                         BNX2X_ERR("NVRAM config error. "
8557                                   "BAD XGXS ext_phy_config 0x%x\n",
8558                                   bp->link_params.ext_phy_config);
8559                         return;
8560                 }
8561
8562                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8563                                            port*0x18);
8564                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8565
8566                 break;
8567
8568         default:
8569                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
8570                           bp->port.link_config);
8571                 return;
8572         }
8573         bp->link_params.phy_addr = bp->port.phy_addr;
8574
8575         /* mask what we support according to speed_cap_mask */
8576         if (!(bp->link_params.speed_cap_mask &
8577                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
8578                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
8579
8580         if (!(bp->link_params.speed_cap_mask &
8581                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
8582                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
8583
8584         if (!(bp->link_params.speed_cap_mask &
8585                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
8586                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
8587
8588         if (!(bp->link_params.speed_cap_mask &
8589                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
8590                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
8591
8592         if (!(bp->link_params.speed_cap_mask &
8593                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
8594                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8595                                         SUPPORTED_1000baseT_Full);
8596
8597         if (!(bp->link_params.speed_cap_mask &
8598                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
8599                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
8600
8601         if (!(bp->link_params.speed_cap_mask &
8602                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
8603                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
8604
8605         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
8606 }
8607
8608 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8609 {
8610         bp->link_params.req_duplex = DUPLEX_FULL;
8611
8612         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
8613         case PORT_FEATURE_LINK_SPEED_AUTO:
8614                 if (bp->port.supported & SUPPORTED_Autoneg) {
8615                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8616                         bp->port.advertising = bp->port.supported;
8617                 } else {
8618                         u32 ext_phy_type =
8619                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8620
8621                         if ((ext_phy_type ==
8622                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8623                             (ext_phy_type ==
8624                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
8625                                 /* force 10G, no AN */
8626                                 bp->link_params.req_line_speed = SPEED_10000;
8627                                 bp->port.advertising =
8628                                                 (ADVERTISED_10000baseT_Full |
8629                                                  ADVERTISED_FIBRE);
8630                                 break;
8631                         }
8632                         BNX2X_ERR("NVRAM config error. "
8633                                   "Invalid link_config 0x%x"
8634                                   "  Autoneg not supported\n",
8635                                   bp->port.link_config);
8636                         return;
8637                 }
8638                 break;
8639
8640         case PORT_FEATURE_LINK_SPEED_10M_FULL:
8641                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
8642                         bp->link_params.req_line_speed = SPEED_10;
8643                         bp->port.advertising = (ADVERTISED_10baseT_Full |
8644                                                 ADVERTISED_TP);
8645                 } else {
8646                         BNX2X_ERR("NVRAM config error. "
8647                                   "Invalid link_config 0x%x"
8648                                   "  speed_cap_mask 0x%x\n",
8649                                   bp->port.link_config,
8650                                   bp->link_params.speed_cap_mask);
8651                         return;
8652                 }
8653                 break;
8654
8655         case PORT_FEATURE_LINK_SPEED_10M_HALF:
8656                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
8657                         bp->link_params.req_line_speed = SPEED_10;
8658                         bp->link_params.req_duplex = DUPLEX_HALF;
8659                         bp->port.advertising = (ADVERTISED_10baseT_Half |
8660                                                 ADVERTISED_TP);
8661                 } else {
8662                         BNX2X_ERR("NVRAM config error. "
8663                                   "Invalid link_config 0x%x"
8664                                   "  speed_cap_mask 0x%x\n",
8665                                   bp->port.link_config,
8666                                   bp->link_params.speed_cap_mask);
8667                         return;
8668                 }
8669                 break;
8670
8671         case PORT_FEATURE_LINK_SPEED_100M_FULL:
8672                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
8673                         bp->link_params.req_line_speed = SPEED_100;
8674                         bp->port.advertising = (ADVERTISED_100baseT_Full |
8675                                                 ADVERTISED_TP);
8676                 } else {
8677                         BNX2X_ERR("NVRAM config error. "
8678                                   "Invalid link_config 0x%x"
8679                                   "  speed_cap_mask 0x%x\n",
8680                                   bp->port.link_config,
8681                                   bp->link_params.speed_cap_mask);
8682                         return;
8683                 }
8684                 break;
8685
8686         case PORT_FEATURE_LINK_SPEED_100M_HALF:
8687                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
8688                         bp->link_params.req_line_speed = SPEED_100;
8689                         bp->link_params.req_duplex = DUPLEX_HALF;
8690                         bp->port.advertising = (ADVERTISED_100baseT_Half |
8691                                                 ADVERTISED_TP);
8692                 } else {
8693                         BNX2X_ERR("NVRAM config error. "
8694                                   "Invalid link_config 0x%x"
8695                                   "  speed_cap_mask 0x%x\n",
8696                                   bp->port.link_config,
8697                                   bp->link_params.speed_cap_mask);
8698                         return;
8699                 }
8700                 break;
8701
8702         case PORT_FEATURE_LINK_SPEED_1G:
8703                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
8704                         bp->link_params.req_line_speed = SPEED_1000;
8705                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
8706                                                 ADVERTISED_TP);
8707                 } else {
8708                         BNX2X_ERR("NVRAM config error. "
8709                                   "Invalid link_config 0x%x"
8710                                   "  speed_cap_mask 0x%x\n",
8711                                   bp->port.link_config,
8712                                   bp->link_params.speed_cap_mask);
8713                         return;
8714                 }
8715                 break;
8716
8717         case PORT_FEATURE_LINK_SPEED_2_5G:
8718                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
8719                         bp->link_params.req_line_speed = SPEED_2500;
8720                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
8721                                                 ADVERTISED_TP);
8722                 } else {
8723                         BNX2X_ERR("NVRAM config error. "
8724                                   "Invalid link_config 0x%x"
8725                                   "  speed_cap_mask 0x%x\n",
8726                                   bp->port.link_config,
8727                                   bp->link_params.speed_cap_mask);
8728                         return;
8729                 }
8730                 break;
8731
8732         case PORT_FEATURE_LINK_SPEED_10G_CX4:
8733         case PORT_FEATURE_LINK_SPEED_10G_KX4:
8734         case PORT_FEATURE_LINK_SPEED_10G_KR:
8735                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
8736                         bp->link_params.req_line_speed = SPEED_10000;
8737                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
8738                                                 ADVERTISED_FIBRE);
8739                 } else {
8740                         BNX2X_ERR("NVRAM config error. "
8741                                   "Invalid link_config 0x%x"
8742                                   "  speed_cap_mask 0x%x\n",
8743                                   bp->port.link_config,
8744                                   bp->link_params.speed_cap_mask);
8745                         return;
8746                 }
8747                 break;
8748
8749         default:
8750                 BNX2X_ERR("NVRAM config error. "
8751                           "BAD link speed link_config 0x%x\n",
8752                           bp->port.link_config);
8753                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8754                 bp->port.advertising = bp->port.supported;
8755                 break;
8756         }
8757
8758         bp->link_params.req_flow_ctrl = (bp->port.link_config &
8759                                          PORT_FEATURE_FLOW_CONTROL_MASK);
8760         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8761             !(bp->port.supported & SUPPORTED_Autoneg))
8762                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8763
8764         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
8765                        "  advertising 0x%x\n",
8766                        bp->link_params.req_line_speed,
8767                        bp->link_params.req_duplex,
8768                        bp->link_params.req_flow_ctrl, bp->port.advertising);
8769 }
8770
8771 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8772 {
8773         mac_hi = cpu_to_be16(mac_hi);
8774         mac_lo = cpu_to_be32(mac_lo);
8775         memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8776         memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8777 }
8778
8779 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8780 {
8781         int port = BP_PORT(bp);
8782         u32 val, val2;
8783         u32 config;
8784         u16 i;
8785         u32 ext_phy_type;
8786
8787         bp->link_params.bp = bp;
8788         bp->link_params.port = port;
8789
8790         bp->link_params.lane_config =
8791                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8792         bp->link_params.ext_phy_config =
8793                 SHMEM_RD(bp,
8794                          dev_info.port_hw_config[port].external_phy_config);
8795         /* BCM8727_NOC => BCM8727 no over current */
8796         if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8797             PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8798                 bp->link_params.ext_phy_config &=
8799                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8800                 bp->link_params.ext_phy_config |=
8801                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8802                 bp->link_params.feature_config_flags |=
8803                         FEATURE_CONFIG_BCM8727_NOC;
8804         }
8805
8806         bp->link_params.speed_cap_mask =
8807                 SHMEM_RD(bp,
8808                          dev_info.port_hw_config[port].speed_capability_mask);
8809
8810         bp->port.link_config =
8811                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8812
8813         /* Get the 4 lanes xgxs config rx and tx */
8814         for (i = 0; i < 2; i++) {
8815                 val = SHMEM_RD(bp,
8816                            dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8817                 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8818                 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8819
8820                 val = SHMEM_RD(bp,
8821                            dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8822                 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8823                 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8824         }
8825
8826         /* If the device is capable of WoL, set the default state according
8827          * to the HW
8828          */
8829         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8830         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8831                    (config & PORT_FEATURE_WOL_ENABLED));
8832
8833         BNX2X_DEV_INFO("lane_config 0x%08x  ext_phy_config 0x%08x"
8834                        "  speed_cap_mask 0x%08x  link_config 0x%08x\n",
8835                        bp->link_params.lane_config,
8836                        bp->link_params.ext_phy_config,
8837                        bp->link_params.speed_cap_mask, bp->port.link_config);
8838
8839         bp->link_params.switch_cfg |= (bp->port.link_config &
8840                                        PORT_FEATURE_CONNECTED_SWITCH_MASK);
8841         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8842
8843         bnx2x_link_settings_requested(bp);
8844
8845         /*
8846          * If connected directly, work with the internal PHY, otherwise, work
8847          * with the external PHY
8848          */
8849         ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8850         if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8851                 bp->mdio.prtad = bp->link_params.phy_addr;
8852
8853         else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8854                  (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8855                 bp->mdio.prtad =
8856                         XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
8857
8858         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8859         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8860         bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8861         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8862         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8863
8864 #ifdef BCM_CNIC
8865         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8866         val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8867         bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8868 #endif
8869 }
8870
8871 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8872 {
8873         int func = BP_FUNC(bp);
8874         u32 val, val2;
8875         int rc = 0;
8876
8877         bnx2x_get_common_hwinfo(bp);
8878
8879         bp->e1hov = 0;
8880         bp->e1hmf = 0;
8881         if (CHIP_IS_E1H(bp)) {
8882                 bp->mf_config =
8883                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8884
8885                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
8886                        FUNC_MF_CFG_E1HOV_TAG_MASK);
8887                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
8888                         bp->e1hmf = 1;
8889                 BNX2X_DEV_INFO("%s function mode\n",
8890                                IS_E1HMF(bp) ? "multi" : "single");
8891
8892                 if (IS_E1HMF(bp)) {
8893                         val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8894                                                                 e1hov_tag) &
8895                                FUNC_MF_CFG_E1HOV_TAG_MASK);
8896                         if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8897                                 bp->e1hov = val;
8898                                 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8899                                                "(0x%04x)\n",
8900                                                func, bp->e1hov, bp->e1hov);
8901                         } else {
8902                                 BNX2X_ERR("!!!  No valid E1HOV for func %d,"
8903                                           "  aborting\n", func);
8904                                 rc = -EPERM;
8905                         }
8906                 } else {
8907                         if (BP_E1HVN(bp)) {
8908                                 BNX2X_ERR("!!!  VN %d in single function mode,"
8909                                           "  aborting\n", BP_E1HVN(bp));
8910                                 rc = -EPERM;
8911                         }
8912                 }
8913         }
8914
8915         if (!BP_NOMCP(bp)) {
8916                 bnx2x_get_port_hwinfo(bp);
8917
8918                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8919                               DRV_MSG_SEQ_NUMBER_MASK);
8920                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8921         }
8922
8923         if (IS_E1HMF(bp)) {
8924                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8925                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
8926                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8927                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8928                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8929                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8930                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8931                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8932                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
8933                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
8934                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8935                                ETH_ALEN);
8936                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8937                                ETH_ALEN);
8938                 }
8939
8940                 return rc;
8941         }
8942
8943         if (BP_NOMCP(bp)) {
8944                 /* only supposed to happen on emulation/FPGA */
8945                 BNX2X_ERR("warning random MAC workaround active\n");
8946                 random_ether_addr(bp->dev->dev_addr);
8947                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8948         }
8949
8950         return rc;
8951 }
8952
8953 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8954 {
8955         int func = BP_FUNC(bp);
8956         int timer_interval;
8957         int rc;
8958
8959         /* Disable interrupt handling until HW is initialized */
8960         atomic_set(&bp->intr_sem, 1);
8961         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8962
8963         mutex_init(&bp->port.phy_mutex);
8964         mutex_init(&bp->fw_mb_mutex);
8965 #ifdef BCM_CNIC
8966         mutex_init(&bp->cnic_mutex);
8967 #endif
8968
8969         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8970         INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8971
8972         rc = bnx2x_get_hwinfo(bp);
8973
8974         /* need to reset chip if undi was active */
8975         if (!BP_NOMCP(bp))
8976                 bnx2x_undi_unload(bp);
8977
8978         if (CHIP_REV_IS_FPGA(bp))
8979                 printk(KERN_ERR PFX "FPGA detected\n");
8980
8981         if (BP_NOMCP(bp) && (func == 0))
8982                 printk(KERN_ERR PFX
8983                        "MCP disabled, must load devices in order!\n");
8984
8985         /* Set multi queue mode */
8986         if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8987             ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8988                 printk(KERN_ERR PFX
8989                       "Multi disabled since int_mode requested is not MSI-X\n");
8990                 multi_mode = ETH_RSS_MODE_DISABLED;
8991         }
8992         bp->multi_mode = multi_mode;
8993
8994
8995         /* Set TPA flags */
8996         if (disable_tpa) {
8997                 bp->flags &= ~TPA_ENABLE_FLAG;
8998                 bp->dev->features &= ~NETIF_F_LRO;
8999         } else {
9000                 bp->flags |= TPA_ENABLE_FLAG;
9001                 bp->dev->features |= NETIF_F_LRO;
9002         }
9003
9004         if (CHIP_IS_E1(bp))
9005                 bp->dropless_fc = 0;
9006         else
9007                 bp->dropless_fc = dropless_fc;
9008
9009         bp->mrrs = mrrs;
9010
9011         bp->tx_ring_size = MAX_TX_AVAIL;
9012         bp->rx_ring_size = MAX_RX_AVAIL;
9013
9014         bp->rx_csum = 1;
9015
9016         bp->tx_ticks = 50;
9017         bp->rx_ticks = 25;
9018
9019         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
9020         bp->current_interval = (poll ? poll : timer_interval);
9021
9022         init_timer(&bp->timer);
9023         bp->timer.expires = jiffies + bp->current_interval;
9024         bp->timer.data = (unsigned long) bp;
9025         bp->timer.function = bnx2x_timer;
9026
9027         return rc;
9028 }
9029
9030 /*
9031  * ethtool service functions
9032  */
9033
9034 /* All ethtool functions called with rtnl_lock */
9035
9036 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9037 {
9038         struct bnx2x *bp = netdev_priv(dev);
9039
9040         cmd->supported = bp->port.supported;
9041         cmd->advertising = bp->port.advertising;
9042
9043         if ((bp->state == BNX2X_STATE_OPEN) &&
9044             !(bp->flags & MF_FUNC_DIS) &&
9045             (bp->link_vars.link_up)) {
9046                 cmd->speed = bp->link_vars.line_speed;
9047                 cmd->duplex = bp->link_vars.duplex;
9048                 if (IS_E1HMF(bp)) {
9049                         u16 vn_max_rate;
9050
9051                         vn_max_rate =
9052                                 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
9053                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
9054                         if (vn_max_rate < cmd->speed)
9055                                 cmd->speed = vn_max_rate;
9056                 }
9057         } else {
9058                 cmd->speed = -1;
9059                 cmd->duplex = -1;
9060         }
9061
9062         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
9063                 u32 ext_phy_type =
9064                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9065
9066                 switch (ext_phy_type) {
9067                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
9068                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
9069                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
9070                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9071                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9072                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
9073                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
9074                         cmd->port = PORT_FIBRE;
9075                         break;
9076
9077                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
9078                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
9079                         cmd->port = PORT_TP;
9080                         break;
9081
9082                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9083                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9084                                   bp->link_params.ext_phy_config);
9085                         break;
9086
9087                 default:
9088                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
9089                            bp->link_params.ext_phy_config);
9090                         break;
9091                 }
9092         } else
9093                 cmd->port = PORT_TP;
9094
9095         cmd->phy_address = bp->mdio.prtad;
9096         cmd->transceiver = XCVR_INTERNAL;
9097
9098         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9099                 cmd->autoneg = AUTONEG_ENABLE;
9100         else
9101                 cmd->autoneg = AUTONEG_DISABLE;
9102
9103         cmd->maxtxpkt = 0;
9104         cmd->maxrxpkt = 0;
9105
9106         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9107            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
9108            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
9109            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
9110            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9111            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9112            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9113
9114         return 0;
9115 }
9116
9117 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9118 {
9119         struct bnx2x *bp = netdev_priv(dev);
9120         u32 advertising;
9121
9122         if (IS_E1HMF(bp))
9123                 return 0;
9124
9125         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9126            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
9127            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
9128            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
9129            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9130            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9131            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9132
9133         if (cmd->autoneg == AUTONEG_ENABLE) {
9134                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9135                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
9136                         return -EINVAL;
9137                 }
9138
9139                 /* advertise the requested speed and duplex if supported */
9140                 cmd->advertising &= bp->port.supported;
9141
9142                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9143                 bp->link_params.req_duplex = DUPLEX_FULL;
9144                 bp->port.advertising |= (ADVERTISED_Autoneg |
9145                                          cmd->advertising);
9146
9147         } else { /* forced speed */
9148                 /* advertise the requested speed and duplex if supported */
9149                 switch (cmd->speed) {
9150                 case SPEED_10:
9151                         if (cmd->duplex == DUPLEX_FULL) {
9152                                 if (!(bp->port.supported &
9153                                       SUPPORTED_10baseT_Full)) {
9154                                         DP(NETIF_MSG_LINK,
9155                                            "10M full not supported\n");
9156                                         return -EINVAL;
9157                                 }
9158
9159                                 advertising = (ADVERTISED_10baseT_Full |
9160                                                ADVERTISED_TP);
9161                         } else {
9162                                 if (!(bp->port.supported &
9163                                       SUPPORTED_10baseT_Half)) {
9164                                         DP(NETIF_MSG_LINK,
9165                                            "10M half not supported\n");
9166                                         return -EINVAL;
9167                                 }
9168
9169                                 advertising = (ADVERTISED_10baseT_Half |
9170                                                ADVERTISED_TP);
9171                         }
9172                         break;
9173
9174                 case SPEED_100:
9175                         if (cmd->duplex == DUPLEX_FULL) {
9176                                 if (!(bp->port.supported &
9177                                                 SUPPORTED_100baseT_Full)) {
9178                                         DP(NETIF_MSG_LINK,
9179                                            "100M full not supported\n");
9180                                         return -EINVAL;
9181                                 }
9182
9183                                 advertising = (ADVERTISED_100baseT_Full |
9184                                                ADVERTISED_TP);
9185                         } else {
9186                                 if (!(bp->port.supported &
9187                                                 SUPPORTED_100baseT_Half)) {
9188                                         DP(NETIF_MSG_LINK,
9189                                            "100M half not supported\n");
9190                                         return -EINVAL;
9191                                 }
9192
9193                                 advertising = (ADVERTISED_100baseT_Half |
9194                                                ADVERTISED_TP);
9195                         }
9196                         break;
9197
9198                 case SPEED_1000:
9199                         if (cmd->duplex != DUPLEX_FULL) {
9200                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
9201                                 return -EINVAL;
9202                         }
9203
9204                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
9205                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
9206                                 return -EINVAL;
9207                         }
9208
9209                         advertising = (ADVERTISED_1000baseT_Full |
9210                                        ADVERTISED_TP);
9211                         break;
9212
9213                 case SPEED_2500:
9214                         if (cmd->duplex != DUPLEX_FULL) {
9215                                 DP(NETIF_MSG_LINK,
9216                                    "2.5G half not supported\n");
9217                                 return -EINVAL;
9218                         }
9219
9220                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
9221                                 DP(NETIF_MSG_LINK,
9222                                    "2.5G full not supported\n");
9223                                 return -EINVAL;
9224                         }
9225
9226                         advertising = (ADVERTISED_2500baseX_Full |
9227                                        ADVERTISED_TP);
9228                         break;
9229
9230                 case SPEED_10000:
9231                         if (cmd->duplex != DUPLEX_FULL) {
9232                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
9233                                 return -EINVAL;
9234                         }
9235
9236                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
9237                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
9238                                 return -EINVAL;
9239                         }
9240
9241                         advertising = (ADVERTISED_10000baseT_Full |
9242                                        ADVERTISED_FIBRE);
9243                         break;
9244
9245                 default:
9246                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
9247                         return -EINVAL;
9248                 }
9249
9250                 bp->link_params.req_line_speed = cmd->speed;
9251                 bp->link_params.req_duplex = cmd->duplex;
9252                 bp->port.advertising = advertising;
9253         }
9254
9255         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
9256            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
9257            bp->link_params.req_line_speed, bp->link_params.req_duplex,
9258            bp->port.advertising);
9259
9260         if (netif_running(dev)) {
9261                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9262                 bnx2x_link_set(bp);
9263         }
9264
9265         return 0;
9266 }
9267
9268 #define IS_E1_ONLINE(info)      (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
9269 #define IS_E1H_ONLINE(info)     (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
9270
9271 static int bnx2x_get_regs_len(struct net_device *dev)
9272 {
9273         struct bnx2x *bp = netdev_priv(dev);
9274         int regdump_len = 0;
9275         int i;
9276
9277         if (CHIP_IS_E1(bp)) {
9278                 for (i = 0; i < REGS_COUNT; i++)
9279                         if (IS_E1_ONLINE(reg_addrs[i].info))
9280                                 regdump_len += reg_addrs[i].size;
9281
9282                 for (i = 0; i < WREGS_COUNT_E1; i++)
9283                         if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
9284                                 regdump_len += wreg_addrs_e1[i].size *
9285                                         (1 + wreg_addrs_e1[i].read_regs_count);
9286
9287         } else { /* E1H */
9288                 for (i = 0; i < REGS_COUNT; i++)
9289                         if (IS_E1H_ONLINE(reg_addrs[i].info))
9290                                 regdump_len += reg_addrs[i].size;
9291
9292                 for (i = 0; i < WREGS_COUNT_E1H; i++)
9293                         if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
9294                                 regdump_len += wreg_addrs_e1h[i].size *
9295                                         (1 + wreg_addrs_e1h[i].read_regs_count);
9296         }
9297         regdump_len *= 4;
9298         regdump_len += sizeof(struct dump_hdr);
9299
9300         return regdump_len;
9301 }
9302
9303 static void bnx2x_get_regs(struct net_device *dev,
9304                            struct ethtool_regs *regs, void *_p)
9305 {
9306         u32 *p = _p, i, j;
9307         struct bnx2x *bp = netdev_priv(dev);
9308         struct dump_hdr dump_hdr = {0};
9309
9310         regs->version = 0;
9311         memset(p, 0, regs->len);
9312
9313         if (!netif_running(bp->dev))
9314                 return;
9315
9316         dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9317         dump_hdr.dump_sign = dump_sign_all;
9318         dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9319         dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9320         dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9321         dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9322         dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9323
9324         memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9325         p += dump_hdr.hdr_size + 1;
9326
9327         if (CHIP_IS_E1(bp)) {
9328                 for (i = 0; i < REGS_COUNT; i++)
9329                         if (IS_E1_ONLINE(reg_addrs[i].info))
9330                                 for (j = 0; j < reg_addrs[i].size; j++)
9331                                         *p++ = REG_RD(bp,
9332                                                       reg_addrs[i].addr + j*4);
9333
9334         } else { /* E1H */
9335                 for (i = 0; i < REGS_COUNT; i++)
9336                         if (IS_E1H_ONLINE(reg_addrs[i].info))
9337                                 for (j = 0; j < reg_addrs[i].size; j++)
9338                                         *p++ = REG_RD(bp,
9339                                                       reg_addrs[i].addr + j*4);
9340         }
9341 }
9342
9343 #define PHY_FW_VER_LEN                  10
9344
9345 static void bnx2x_get_drvinfo(struct net_device *dev,
9346                               struct ethtool_drvinfo *info)
9347 {
9348         struct bnx2x *bp = netdev_priv(dev);
9349         u8 phy_fw_ver[PHY_FW_VER_LEN];
9350
9351         strcpy(info->driver, DRV_MODULE_NAME);
9352         strcpy(info->version, DRV_MODULE_VERSION);
9353
9354         phy_fw_ver[0] = '\0';
9355         if (bp->port.pmf) {
9356                 bnx2x_acquire_phy_lock(bp);
9357                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9358                                              (bp->state != BNX2X_STATE_CLOSED),
9359                                              phy_fw_ver, PHY_FW_VER_LEN);
9360                 bnx2x_release_phy_lock(bp);
9361         }
9362
9363         snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9364                  (bp->common.bc_ver & 0xff0000) >> 16,
9365                  (bp->common.bc_ver & 0xff00) >> 8,
9366                  (bp->common.bc_ver & 0xff),
9367                  ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9368         strcpy(info->bus_info, pci_name(bp->pdev));
9369         info->n_stats = BNX2X_NUM_STATS;
9370         info->testinfo_len = BNX2X_NUM_TESTS;
9371         info->eedump_len = bp->common.flash_size;
9372         info->regdump_len = bnx2x_get_regs_len(dev);
9373 }
9374
9375 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9376 {
9377         struct bnx2x *bp = netdev_priv(dev);
9378
9379         if (bp->flags & NO_WOL_FLAG) {
9380                 wol->supported = 0;
9381                 wol->wolopts = 0;
9382         } else {
9383                 wol->supported = WAKE_MAGIC;
9384                 if (bp->wol)
9385                         wol->wolopts = WAKE_MAGIC;
9386                 else
9387                         wol->wolopts = 0;
9388         }
9389         memset(&wol->sopass, 0, sizeof(wol->sopass));
9390 }
9391
9392 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9393 {
9394         struct bnx2x *bp = netdev_priv(dev);
9395
9396         if (wol->wolopts & ~WAKE_MAGIC)
9397                 return -EINVAL;
9398
9399         if (wol->wolopts & WAKE_MAGIC) {
9400                 if (bp->flags & NO_WOL_FLAG)
9401                         return -EINVAL;
9402
9403                 bp->wol = 1;
9404         } else
9405                 bp->wol = 0;
9406
9407         return 0;
9408 }
9409
9410 static u32 bnx2x_get_msglevel(struct net_device *dev)
9411 {
9412         struct bnx2x *bp = netdev_priv(dev);
9413
9414         return bp->msglevel;
9415 }
9416
9417 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9418 {
9419         struct bnx2x *bp = netdev_priv(dev);
9420
9421         if (capable(CAP_NET_ADMIN))
9422                 bp->msglevel = level;
9423 }
9424
9425 static int bnx2x_nway_reset(struct net_device *dev)
9426 {
9427         struct bnx2x *bp = netdev_priv(dev);
9428
9429         if (!bp->port.pmf)
9430                 return 0;
9431
9432         if (netif_running(dev)) {
9433                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9434                 bnx2x_link_set(bp);
9435         }
9436
9437         return 0;
9438 }
9439
9440 static u32 bnx2x_get_link(struct net_device *dev)
9441 {
9442         struct bnx2x *bp = netdev_priv(dev);
9443
9444         if (bp->flags & MF_FUNC_DIS)
9445                 return 0;
9446
9447         return bp->link_vars.link_up;
9448 }
9449
9450 static int bnx2x_get_eeprom_len(struct net_device *dev)
9451 {
9452         struct bnx2x *bp = netdev_priv(dev);
9453
9454         return bp->common.flash_size;
9455 }
9456
9457 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9458 {
9459         int port = BP_PORT(bp);
9460         int count, i;
9461         u32 val = 0;
9462
9463         /* adjust timeout for emulation/FPGA */
9464         count = NVRAM_TIMEOUT_COUNT;
9465         if (CHIP_REV_IS_SLOW(bp))
9466                 count *= 100;
9467
9468         /* request access to nvram interface */
9469         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9470                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9471
9472         for (i = 0; i < count*10; i++) {
9473                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9474                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9475                         break;
9476
9477                 udelay(5);
9478         }
9479
9480         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
9481                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
9482                 return -EBUSY;
9483         }
9484
9485         return 0;
9486 }
9487
9488 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9489 {
9490         int port = BP_PORT(bp);
9491         int count, i;
9492         u32 val = 0;
9493
9494         /* adjust timeout for emulation/FPGA */
9495         count = NVRAM_TIMEOUT_COUNT;
9496         if (CHIP_REV_IS_SLOW(bp))
9497                 count *= 100;
9498
9499         /* relinquish nvram interface */
9500         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9501                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9502
9503         for (i = 0; i < count*10; i++) {
9504                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9505                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9506                         break;
9507
9508                 udelay(5);
9509         }
9510
9511         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
9512                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
9513                 return -EBUSY;
9514         }
9515
9516         return 0;
9517 }
9518
9519 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9520 {
9521         u32 val;
9522
9523         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9524
9525         /* enable both bits, even on read */
9526         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9527                (val | MCPR_NVM_ACCESS_ENABLE_EN |
9528                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
9529 }
9530
9531 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9532 {
9533         u32 val;
9534
9535         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9536
9537         /* disable both bits, even after read */
9538         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9539                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9540                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9541 }
9542
9543 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
9544                                   u32 cmd_flags)
9545 {
9546         int count, i, rc;
9547         u32 val;
9548
9549         /* build the command word */
9550         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9551
9552         /* need to clear DONE bit separately */
9553         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9554
9555         /* address of the NVRAM to read from */
9556         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9557                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9558
9559         /* issue a read command */
9560         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9561
9562         /* adjust timeout for emulation/FPGA */
9563         count = NVRAM_TIMEOUT_COUNT;
9564         if (CHIP_REV_IS_SLOW(bp))
9565                 count *= 100;
9566
9567         /* wait for completion */
9568         *ret_val = 0;
9569         rc = -EBUSY;
9570         for (i = 0; i < count; i++) {
9571                 udelay(5);
9572                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9573
9574                 if (val & MCPR_NVM_COMMAND_DONE) {
9575                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
9576                         /* we read nvram data in cpu order
9577                          * but ethtool sees it as an array of bytes
9578                          * converting to big-endian will do the work */
9579                         *ret_val = cpu_to_be32(val);
9580                         rc = 0;
9581                         break;
9582                 }
9583         }
9584
9585         return rc;
9586 }
9587
9588 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9589                             int buf_size)
9590 {
9591         int rc;
9592         u32 cmd_flags;
9593         __be32 val;
9594
9595         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9596                 DP(BNX2X_MSG_NVM,
9597                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
9598                    offset, buf_size);
9599                 return -EINVAL;
9600         }
9601
9602         if (offset + buf_size > bp->common.flash_size) {
9603                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9604                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9605                    offset, buf_size, bp->common.flash_size);
9606                 return -EINVAL;
9607         }
9608
9609         /* request access to nvram interface */
9610         rc = bnx2x_acquire_nvram_lock(bp);
9611         if (rc)
9612                 return rc;
9613
9614         /* enable access to nvram interface */
9615         bnx2x_enable_nvram_access(bp);
9616
9617         /* read the first word(s) */
9618         cmd_flags = MCPR_NVM_COMMAND_FIRST;
9619         while ((buf_size > sizeof(u32)) && (rc == 0)) {
9620                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9621                 memcpy(ret_buf, &val, 4);
9622
9623                 /* advance to the next dword */
9624                 offset += sizeof(u32);
9625                 ret_buf += sizeof(u32);
9626                 buf_size -= sizeof(u32);
9627                 cmd_flags = 0;
9628         }
9629
9630         if (rc == 0) {
9631                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9632                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9633                 memcpy(ret_buf, &val, 4);
9634         }
9635
9636         /* disable access to nvram interface */
9637         bnx2x_disable_nvram_access(bp);
9638         bnx2x_release_nvram_lock(bp);
9639
9640         return rc;
9641 }
9642
9643 static int bnx2x_get_eeprom(struct net_device *dev,
9644                             struct ethtool_eeprom *eeprom, u8 *eebuf)
9645 {
9646         struct bnx2x *bp = netdev_priv(dev);
9647         int rc;
9648
9649         if (!netif_running(dev))
9650                 return -EAGAIN;
9651
9652         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9653            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
9654            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9655            eeprom->len, eeprom->len);
9656
9657         /* parameters already validated in ethtool_get_eeprom */
9658
9659         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9660
9661         return rc;
9662 }
9663
9664 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9665                                    u32 cmd_flags)
9666 {
9667         int count, i, rc;
9668
9669         /* build the command word */
9670         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9671
9672         /* need to clear DONE bit separately */
9673         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9674
9675         /* write the data */
9676         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9677
9678         /* address of the NVRAM to write to */
9679         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9680                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9681
9682         /* issue the write command */
9683         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9684
9685         /* adjust timeout for emulation/FPGA */
9686         count = NVRAM_TIMEOUT_COUNT;
9687         if (CHIP_REV_IS_SLOW(bp))
9688                 count *= 100;
9689
9690         /* wait for completion */
9691         rc = -EBUSY;
9692         for (i = 0; i < count; i++) {
9693                 udelay(5);
9694                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9695                 if (val & MCPR_NVM_COMMAND_DONE) {
9696                         rc = 0;
9697                         break;
9698                 }
9699         }
9700
9701         return rc;
9702 }
9703
9704 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
9705
9706 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9707                               int buf_size)
9708 {
9709         int rc;
9710         u32 cmd_flags;
9711         u32 align_offset;
9712         __be32 val;
9713
9714         if (offset + buf_size > bp->common.flash_size) {
9715                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9716                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9717                    offset, buf_size, bp->common.flash_size);
9718                 return -EINVAL;
9719         }
9720
9721         /* request access to nvram interface */
9722         rc = bnx2x_acquire_nvram_lock(bp);
9723         if (rc)
9724                 return rc;
9725
9726         /* enable access to nvram interface */
9727         bnx2x_enable_nvram_access(bp);
9728
9729         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9730         align_offset = (offset & ~0x03);
9731         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9732
9733         if (rc == 0) {
9734                 val &= ~(0xff << BYTE_OFFSET(offset));
9735                 val |= (*data_buf << BYTE_OFFSET(offset));
9736
9737                 /* nvram data is returned as an array of bytes
9738                  * convert it back to cpu order */
9739                 val = be32_to_cpu(val);
9740
9741                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9742                                              cmd_flags);
9743         }
9744
9745         /* disable access to nvram interface */
9746         bnx2x_disable_nvram_access(bp);
9747         bnx2x_release_nvram_lock(bp);
9748
9749         return rc;
9750 }
9751
9752 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9753                              int buf_size)
9754 {
9755         int rc;
9756         u32 cmd_flags;
9757         u32 val;
9758         u32 written_so_far;
9759
9760         if (buf_size == 1)      /* ethtool */
9761                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
9762
9763         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9764                 DP(BNX2X_MSG_NVM,
9765                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
9766                    offset, buf_size);
9767                 return -EINVAL;
9768         }
9769
9770         if (offset + buf_size > bp->common.flash_size) {
9771                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9772                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9773                    offset, buf_size, bp->common.flash_size);
9774                 return -EINVAL;
9775         }
9776
9777         /* request access to nvram interface */
9778         rc = bnx2x_acquire_nvram_lock(bp);
9779         if (rc)
9780                 return rc;
9781
9782         /* enable access to nvram interface */
9783         bnx2x_enable_nvram_access(bp);
9784
9785         written_so_far = 0;
9786         cmd_flags = MCPR_NVM_COMMAND_FIRST;
9787         while ((written_so_far < buf_size) && (rc == 0)) {
9788                 if (written_so_far == (buf_size - sizeof(u32)))
9789                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
9790                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9791                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
9792                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9793                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9794
9795                 memcpy(&val, data_buf, 4);
9796
9797                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9798
9799                 /* advance to the next dword */
9800                 offset += sizeof(u32);
9801                 data_buf += sizeof(u32);
9802                 written_so_far += sizeof(u32);
9803                 cmd_flags = 0;
9804         }
9805
9806         /* disable access to nvram interface */
9807         bnx2x_disable_nvram_access(bp);
9808         bnx2x_release_nvram_lock(bp);
9809
9810         return rc;
9811 }
9812
9813 static int bnx2x_set_eeprom(struct net_device *dev,
9814                             struct ethtool_eeprom *eeprom, u8 *eebuf)
9815 {
9816         struct bnx2x *bp = netdev_priv(dev);
9817         int port = BP_PORT(bp);
9818         int rc = 0;
9819
9820         if (!netif_running(dev))
9821                 return -EAGAIN;
9822
9823         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9824            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
9825            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9826            eeprom->len, eeprom->len);
9827
9828         /* parameters already validated in ethtool_set_eeprom */
9829
9830         /* PHY eeprom can be accessed only by the PMF */
9831         if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9832             !bp->port.pmf)
9833                 return -EINVAL;
9834
9835         if (eeprom->magic == 0x50485950) {
9836                 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9837                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9838
9839                 bnx2x_acquire_phy_lock(bp);
9840                 rc |= bnx2x_link_reset(&bp->link_params,
9841                                        &bp->link_vars, 0);
9842                 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9843                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9844                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9845                                        MISC_REGISTERS_GPIO_HIGH, port);
9846                 bnx2x_release_phy_lock(bp);
9847                 bnx2x_link_report(bp);
9848
9849         } else if (eeprom->magic == 0x50485952) {
9850                 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
9851                 if (bp->state == BNX2X_STATE_OPEN) {
9852                         bnx2x_acquire_phy_lock(bp);
9853                         rc |= bnx2x_link_reset(&bp->link_params,
9854                                                &bp->link_vars, 1);
9855
9856                         rc |= bnx2x_phy_init(&bp->link_params,
9857                                              &bp->link_vars);
9858                         bnx2x_release_phy_lock(bp);
9859                         bnx2x_calc_fc_adv(bp);
9860                 }
9861         } else if (eeprom->magic == 0x53985943) {
9862                 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9863                 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9864                                        PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9865                         u8 ext_phy_addr =
9866                              XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
9867
9868                         /* DSP Remove Download Mode */
9869                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9870                                        MISC_REGISTERS_GPIO_LOW, port);
9871
9872                         bnx2x_acquire_phy_lock(bp);
9873
9874                         bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9875
9876                         /* wait 0.5 sec to allow it to run */
9877                         msleep(500);
9878                         bnx2x_ext_phy_hw_reset(bp, port);
9879                         msleep(500);
9880                         bnx2x_release_phy_lock(bp);
9881                 }
9882         } else
9883                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
9884
9885         return rc;
9886 }
9887
9888 static int bnx2x_get_coalesce(struct net_device *dev,
9889                               struct ethtool_coalesce *coal)
9890 {
9891         struct bnx2x *bp = netdev_priv(dev);
9892
9893         memset(coal, 0, sizeof(struct ethtool_coalesce));
9894
9895         coal->rx_coalesce_usecs = bp->rx_ticks;
9896         coal->tx_coalesce_usecs = bp->tx_ticks;
9897
9898         return 0;
9899 }
9900
9901 #define BNX2X_MAX_COALES_TOUT  (0xf0*12) /* Maximal coalescing timeout in us */
9902 static int bnx2x_set_coalesce(struct net_device *dev,
9903                               struct ethtool_coalesce *coal)
9904 {
9905         struct bnx2x *bp = netdev_priv(dev);
9906
9907         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
9908         if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9909                 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
9910
9911         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
9912         if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9913                 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
9914
9915         if (netif_running(dev))
9916                 bnx2x_update_coalesce(bp);
9917
9918         return 0;
9919 }
9920
9921 static void bnx2x_get_ringparam(struct net_device *dev,
9922                                 struct ethtool_ringparam *ering)
9923 {
9924         struct bnx2x *bp = netdev_priv(dev);
9925
9926         ering->rx_max_pending = MAX_RX_AVAIL;
9927         ering->rx_mini_max_pending = 0;
9928         ering->rx_jumbo_max_pending = 0;
9929
9930         ering->rx_pending = bp->rx_ring_size;
9931         ering->rx_mini_pending = 0;
9932         ering->rx_jumbo_pending = 0;
9933
9934         ering->tx_max_pending = MAX_TX_AVAIL;
9935         ering->tx_pending = bp->tx_ring_size;
9936 }
9937
9938 static int bnx2x_set_ringparam(struct net_device *dev,
9939                                struct ethtool_ringparam *ering)
9940 {
9941         struct bnx2x *bp = netdev_priv(dev);
9942         int rc = 0;
9943
9944         if ((ering->rx_pending > MAX_RX_AVAIL) ||
9945             (ering->tx_pending > MAX_TX_AVAIL) ||
9946             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9947                 return -EINVAL;
9948
9949         bp->rx_ring_size = ering->rx_pending;
9950         bp->tx_ring_size = ering->tx_pending;
9951
9952         if (netif_running(dev)) {
9953                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9954                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9955         }
9956
9957         return rc;
9958 }
9959
9960 static void bnx2x_get_pauseparam(struct net_device *dev,
9961                                  struct ethtool_pauseparam *epause)
9962 {
9963         struct bnx2x *bp = netdev_priv(dev);
9964
9965         epause->autoneg = (bp->link_params.req_flow_ctrl ==
9966                            BNX2X_FLOW_CTRL_AUTO) &&
9967                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9968
9969         epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9970                             BNX2X_FLOW_CTRL_RX);
9971         epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9972                             BNX2X_FLOW_CTRL_TX);
9973
9974         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9975            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9976            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9977 }
9978
9979 static int bnx2x_set_pauseparam(struct net_device *dev,
9980                                 struct ethtool_pauseparam *epause)
9981 {
9982         struct bnx2x *bp = netdev_priv(dev);
9983
9984         if (IS_E1HMF(bp))
9985                 return 0;
9986
9987         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9988            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9989            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9990
9991         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9992
9993         if (epause->rx_pause)
9994                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9995
9996         if (epause->tx_pause)
9997                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9998
9999         if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
10000                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
10001
10002         if (epause->autoneg) {
10003                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
10004                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
10005                         return -EINVAL;
10006                 }
10007
10008                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
10009                         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
10010         }
10011
10012         DP(NETIF_MSG_LINK,
10013            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
10014
10015         if (netif_running(dev)) {
10016                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10017                 bnx2x_link_set(bp);
10018         }
10019
10020         return 0;
10021 }
10022
10023 static int bnx2x_set_flags(struct net_device *dev, u32 data)
10024 {
10025         struct bnx2x *bp = netdev_priv(dev);
10026         int changed = 0;
10027         int rc = 0;
10028
10029         /* TPA requires Rx CSUM offloading */
10030         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
10031                 if (!(dev->features & NETIF_F_LRO)) {
10032                         dev->features |= NETIF_F_LRO;
10033                         bp->flags |= TPA_ENABLE_FLAG;
10034                         changed = 1;
10035                 }
10036
10037         } else if (dev->features & NETIF_F_LRO) {
10038                 dev->features &= ~NETIF_F_LRO;
10039                 bp->flags &= ~TPA_ENABLE_FLAG;
10040                 changed = 1;
10041         }
10042
10043         if (changed && netif_running(dev)) {
10044                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10045                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10046         }
10047
10048         return rc;
10049 }
10050
10051 static u32 bnx2x_get_rx_csum(struct net_device *dev)
10052 {
10053         struct bnx2x *bp = netdev_priv(dev);
10054
10055         return bp->rx_csum;
10056 }
10057
10058 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
10059 {
10060         struct bnx2x *bp = netdev_priv(dev);
10061         int rc = 0;
10062
10063         bp->rx_csum = data;
10064
10065         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
10066            TPA'ed packets will be discarded due to wrong TCP CSUM */
10067         if (!data) {
10068                 u32 flags = ethtool_op_get_flags(dev);
10069
10070                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
10071         }
10072
10073         return rc;
10074 }
10075
10076 static int bnx2x_set_tso(struct net_device *dev, u32 data)
10077 {
10078         if (data) {
10079                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10080                 dev->features |= NETIF_F_TSO6;
10081         } else {
10082                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
10083                 dev->features &= ~NETIF_F_TSO6;
10084         }
10085
10086         return 0;
10087 }
10088
10089 static const struct {
10090         char string[ETH_GSTRING_LEN];
10091 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
10092         { "register_test (offline)" },
10093         { "memory_test (offline)" },
10094         { "loopback_test (offline)" },
10095         { "nvram_test (online)" },
10096         { "interrupt_test (online)" },
10097         { "link_test (online)" },
10098         { "idle check (online)" }
10099 };
10100
10101 static int bnx2x_test_registers(struct bnx2x *bp)
10102 {
10103         int idx, i, rc = -ENODEV;
10104         u32 wr_val = 0;
10105         int port = BP_PORT(bp);
10106         static const struct {
10107                 u32  offset0;
10108                 u32  offset1;
10109                 u32  mask;
10110         } reg_tbl[] = {
10111 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
10112                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
10113                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
10114                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
10115                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
10116                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
10117                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
10118                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
10119                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
10120                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
10121 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
10122                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
10123                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
10124                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
10125                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
10126                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
10127                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
10128                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
10129                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
10130                 { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
10131 /* 20 */        { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
10132                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
10133                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
10134                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
10135                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
10136                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
10137                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
10138                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
10139                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
10140                 { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
10141 /* 30 */        { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
10142                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
10143                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
10144                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
10145                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
10146                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
10147                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
10148
10149                 { 0xffffffff, 0, 0x00000000 }
10150         };
10151
10152         if (!netif_running(bp->dev))
10153                 return rc;
10154
10155         /* Repeat the test twice:
10156            First by writing 0x00000000, second by writing 0xffffffff */
10157         for (idx = 0; idx < 2; idx++) {
10158
10159                 switch (idx) {
10160                 case 0:
10161                         wr_val = 0;
10162                         break;
10163                 case 1:
10164                         wr_val = 0xffffffff;
10165                         break;
10166                 }
10167
10168                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
10169                         u32 offset, mask, save_val, val;
10170
10171                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
10172                         mask = reg_tbl[i].mask;
10173
10174                         save_val = REG_RD(bp, offset);
10175
10176                         REG_WR(bp, offset, wr_val);
10177                         val = REG_RD(bp, offset);
10178
10179                         /* Restore the original register's value */
10180                         REG_WR(bp, offset, save_val);
10181
10182                         /* verify that value is as expected value */
10183                         if ((val & mask) != (wr_val & mask))
10184                                 goto test_reg_exit;
10185                 }
10186         }
10187
10188         rc = 0;
10189
10190 test_reg_exit:
10191         return rc;
10192 }
10193
10194 static int bnx2x_test_memory(struct bnx2x *bp)
10195 {
10196         int i, j, rc = -ENODEV;
10197         u32 val;
10198         static const struct {
10199                 u32 offset;
10200                 int size;
10201         } mem_tbl[] = {
10202                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
10203                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
10204                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
10205                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
10206                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
10207                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
10208                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
10209
10210                 { 0xffffffff, 0 }
10211         };
10212         static const struct {
10213                 char *name;
10214                 u32 offset;
10215                 u32 e1_mask;
10216                 u32 e1h_mask;
10217         } prty_tbl[] = {
10218                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
10219                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
10220                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
10221                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
10222                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
10223                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
10224
10225                 { NULL, 0xffffffff, 0, 0 }
10226         };
10227
10228         if (!netif_running(bp->dev))
10229                 return rc;
10230
10231         /* Go through all the memories */
10232         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
10233                 for (j = 0; j < mem_tbl[i].size; j++)
10234                         REG_RD(bp, mem_tbl[i].offset + j*4);
10235
10236         /* Check the parity status */
10237         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
10238                 val = REG_RD(bp, prty_tbl[i].offset);
10239                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
10240                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
10241                         DP(NETIF_MSG_HW,
10242                            "%s is 0x%x\n", prty_tbl[i].name, val);
10243                         goto test_mem_exit;
10244                 }
10245         }
10246
10247         rc = 0;
10248
10249 test_mem_exit:
10250         return rc;
10251 }
10252
10253 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
10254 {
10255         int cnt = 1000;
10256
10257         if (link_up)
10258                 while (bnx2x_link_test(bp) && cnt--)
10259                         msleep(10);
10260 }
10261
10262 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10263 {
10264         unsigned int pkt_size, num_pkts, i;
10265         struct sk_buff *skb;
10266         unsigned char *packet;
10267         struct bnx2x_fastpath *fp_rx = &bp->fp[0];
10268         struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues];
10269         u16 tx_start_idx, tx_idx;
10270         u16 rx_start_idx, rx_idx;
10271         u16 pkt_prod, bd_prod;
10272         struct sw_tx_bd *tx_buf;
10273         struct eth_tx_start_bd *tx_start_bd;
10274         struct eth_tx_parse_bd *pbd = NULL;
10275         dma_addr_t mapping;
10276         union eth_rx_cqe *cqe;
10277         u8 cqe_fp_flags;
10278         struct sw_rx_bd *rx_buf;
10279         u16 len;
10280         int rc = -ENODEV;
10281
10282         /* check the loopback mode */
10283         switch (loopback_mode) {
10284         case BNX2X_PHY_LOOPBACK:
10285                 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
10286                         return -EINVAL;
10287                 break;
10288         case BNX2X_MAC_LOOPBACK:
10289                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
10290                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
10291                 break;
10292         default:
10293                 return -EINVAL;
10294         }
10295
10296         /* prepare the loopback packet */
10297         pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
10298                      bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
10299         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
10300         if (!skb) {
10301                 rc = -ENOMEM;
10302                 goto test_loopback_exit;
10303         }
10304         packet = skb_put(skb, pkt_size);
10305         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
10306         memset(packet + ETH_ALEN, 0, ETH_ALEN);
10307         memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
10308         for (i = ETH_HLEN; i < pkt_size; i++)
10309                 packet[i] = (unsigned char) (i & 0xff);
10310
10311         /* send the loopback packet */
10312         num_pkts = 0;
10313         tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10314         rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10315
10316         pkt_prod = fp_tx->tx_pkt_prod++;
10317         tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10318         tx_buf->first_bd = fp_tx->tx_bd_prod;
10319         tx_buf->skb = skb;
10320         tx_buf->flags = 0;
10321
10322         bd_prod = TX_BD(fp_tx->tx_bd_prod);
10323         tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
10324         mapping = pci_map_single(bp->pdev, skb->data,
10325                                  skb_headlen(skb), PCI_DMA_TODEVICE);
10326         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10327         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10328         tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10329         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10330         tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10331         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10332         tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10333                                 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10334
10335         /* turn on parsing and get a BD */
10336         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10337         pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10338
10339         memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10340
10341         wmb();
10342
10343         fp_tx->tx_db.data.prod += 2;
10344         barrier();
10345         DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw);
10346
10347         mmiowb();
10348
10349         num_pkts++;
10350         fp_tx->tx_bd_prod += 2; /* start + pbd */
10351         bp->dev->trans_start = jiffies;
10352
10353         udelay(100);
10354
10355         tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10356         if (tx_idx != tx_start_idx + num_pkts)
10357                 goto test_loopback_exit;
10358
10359         rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10360         if (rx_idx != rx_start_idx + num_pkts)
10361                 goto test_loopback_exit;
10362
10363         cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
10364         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10365         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10366                 goto test_loopback_rx_exit;
10367
10368         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10369         if (len != pkt_size)
10370                 goto test_loopback_rx_exit;
10371
10372         rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
10373         skb = rx_buf->skb;
10374         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10375         for (i = ETH_HLEN; i < pkt_size; i++)
10376                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10377                         goto test_loopback_rx_exit;
10378
10379         rc = 0;
10380
10381 test_loopback_rx_exit:
10382
10383         fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10384         fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10385         fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10386         fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
10387
10388         /* Update producers */
10389         bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10390                              fp_rx->rx_sge_prod);
10391
10392 test_loopback_exit:
10393         bp->link_params.loopback_mode = LOOPBACK_NONE;
10394
10395         return rc;
10396 }
10397
10398 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10399 {
10400         int rc = 0, res;
10401
10402         if (!netif_running(bp->dev))
10403                 return BNX2X_LOOPBACK_FAILED;
10404
10405         bnx2x_netif_stop(bp, 1);
10406         bnx2x_acquire_phy_lock(bp);
10407
10408         res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10409         if (res) {
10410                 DP(NETIF_MSG_PROBE, "  PHY loopback failed  (res %d)\n", res);
10411                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
10412         }
10413
10414         res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10415         if (res) {
10416                 DP(NETIF_MSG_PROBE, "  MAC loopback failed  (res %d)\n", res);
10417                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
10418         }
10419
10420         bnx2x_release_phy_lock(bp);
10421         bnx2x_netif_start(bp);
10422
10423         return rc;
10424 }
10425
10426 #define CRC32_RESIDUAL                  0xdebb20e3
10427
10428 static int bnx2x_test_nvram(struct bnx2x *bp)
10429 {
10430         static const struct {
10431                 int offset;
10432                 int size;
10433         } nvram_tbl[] = {
10434                 {     0,  0x14 }, /* bootstrap */
10435                 {  0x14,  0xec }, /* dir */
10436                 { 0x100, 0x350 }, /* manuf_info */
10437                 { 0x450,  0xf0 }, /* feature_info */
10438                 { 0x640,  0x64 }, /* upgrade_key_info */
10439                 { 0x6a4,  0x64 },
10440                 { 0x708,  0x70 }, /* manuf_key_info */
10441                 { 0x778,  0x70 },
10442                 {     0,     0 }
10443         };
10444         __be32 buf[0x350 / 4];
10445         u8 *data = (u8 *)buf;
10446         int i, rc;
10447         u32 magic, crc;
10448
10449         rc = bnx2x_nvram_read(bp, 0, data, 4);
10450         if (rc) {
10451                 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
10452                 goto test_nvram_exit;
10453         }
10454
10455         magic = be32_to_cpu(buf[0]);
10456         if (magic != 0x669955aa) {
10457                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10458                 rc = -ENODEV;
10459                 goto test_nvram_exit;
10460         }
10461
10462         for (i = 0; nvram_tbl[i].size; i++) {
10463
10464                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10465                                       nvram_tbl[i].size);
10466                 if (rc) {
10467                         DP(NETIF_MSG_PROBE,
10468                            "nvram_tbl[%d] read data (rc %d)\n", i, rc);
10469                         goto test_nvram_exit;
10470                 }
10471
10472                 crc = ether_crc_le(nvram_tbl[i].size, data);
10473                 if (crc != CRC32_RESIDUAL) {
10474                         DP(NETIF_MSG_PROBE,
10475                            "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
10476                         rc = -ENODEV;
10477                         goto test_nvram_exit;
10478                 }
10479         }
10480
10481 test_nvram_exit:
10482         return rc;
10483 }
10484
10485 static int bnx2x_test_intr(struct bnx2x *bp)
10486 {
10487         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10488         int i, rc;
10489
10490         if (!netif_running(bp->dev))
10491                 return -ENODEV;
10492
10493         config->hdr.length = 0;
10494         if (CHIP_IS_E1(bp))
10495                 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10496         else
10497                 config->hdr.offset = BP_FUNC(bp);
10498         config->hdr.client_id = bp->fp->cl_id;
10499         config->hdr.reserved1 = 0;
10500
10501         bp->set_mac_pending++;
10502         smp_wmb();
10503         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10504                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10505                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10506         if (rc == 0) {
10507                 for (i = 0; i < 10; i++) {
10508                         if (!bp->set_mac_pending)
10509                                 break;
10510                         smp_rmb();
10511                         msleep_interruptible(10);
10512                 }
10513                 if (i == 10)
10514                         rc = -ENODEV;
10515         }
10516
10517         return rc;
10518 }
10519
10520 static void bnx2x_self_test(struct net_device *dev,
10521                             struct ethtool_test *etest, u64 *buf)
10522 {
10523         struct bnx2x *bp = netdev_priv(dev);
10524
10525         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10526
10527         if (!netif_running(dev))
10528                 return;
10529
10530         /* offline tests are not supported in MF mode */
10531         if (IS_E1HMF(bp))
10532                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10533
10534         if (etest->flags & ETH_TEST_FL_OFFLINE) {
10535                 int port = BP_PORT(bp);
10536                 u32 val;
10537                 u8 link_up;
10538
10539                 /* save current value of input enable for TX port IF */
10540                 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10541                 /* disable input for TX port IF */
10542                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10543
10544                 link_up = (bnx2x_link_test(bp) == 0);
10545                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10546                 bnx2x_nic_load(bp, LOAD_DIAG);
10547                 /* wait until link state is restored */
10548                 bnx2x_wait_for_link(bp, link_up);
10549
10550                 if (bnx2x_test_registers(bp) != 0) {
10551                         buf[0] = 1;
10552                         etest->flags |= ETH_TEST_FL_FAILED;
10553                 }
10554                 if (bnx2x_test_memory(bp) != 0) {
10555                         buf[1] = 1;
10556                         etest->flags |= ETH_TEST_FL_FAILED;
10557                 }
10558                 buf[2] = bnx2x_test_loopback(bp, link_up);
10559                 if (buf[2] != 0)
10560                         etest->flags |= ETH_TEST_FL_FAILED;
10561
10562                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10563
10564                 /* restore input for TX port IF */
10565                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10566
10567                 bnx2x_nic_load(bp, LOAD_NORMAL);
10568                 /* wait until link state is restored */
10569                 bnx2x_wait_for_link(bp, link_up);
10570         }
10571         if (bnx2x_test_nvram(bp) != 0) {
10572                 buf[3] = 1;
10573                 etest->flags |= ETH_TEST_FL_FAILED;
10574         }
10575         if (bnx2x_test_intr(bp) != 0) {
10576                 buf[4] = 1;
10577                 etest->flags |= ETH_TEST_FL_FAILED;
10578         }
10579         if (bp->port.pmf)
10580                 if (bnx2x_link_test(bp) != 0) {
10581                         buf[5] = 1;
10582                         etest->flags |= ETH_TEST_FL_FAILED;
10583                 }
10584
10585 #ifdef BNX2X_EXTRA_DEBUG
10586         bnx2x_panic_dump(bp);
10587 #endif
10588 }
10589
10590 static const struct {
10591         long offset;
10592         int size;
10593         u8 string[ETH_GSTRING_LEN];
10594 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10595 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10596         { Q_STATS_OFFSET32(error_bytes_received_hi),
10597                                                 8, "[%d]: rx_error_bytes" },
10598         { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10599                                                 8, "[%d]: rx_ucast_packets" },
10600         { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10601                                                 8, "[%d]: rx_mcast_packets" },
10602         { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10603                                                 8, "[%d]: rx_bcast_packets" },
10604         { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10605         { Q_STATS_OFFSET32(rx_err_discard_pkt),
10606                                          4, "[%d]: rx_phy_ip_err_discards"},
10607         { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10608                                          4, "[%d]: rx_skb_alloc_discard" },
10609         { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10610
10611 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10612         { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10613                                                         8, "[%d]: tx_packets" }
10614 };
10615
10616 static const struct {
10617         long offset;
10618         int size;
10619         u32 flags;
10620 #define STATS_FLAGS_PORT                1
10621 #define STATS_FLAGS_FUNC                2
10622 #define STATS_FLAGS_BOTH                (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
10623         u8 string[ETH_GSTRING_LEN];
10624 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
10625 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10626                                 8, STATS_FLAGS_BOTH, "rx_bytes" },
10627         { STATS_OFFSET32(error_bytes_received_hi),
10628                                 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
10629         { STATS_OFFSET32(total_unicast_packets_received_hi),
10630                                 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
10631         { STATS_OFFSET32(total_multicast_packets_received_hi),
10632                                 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
10633         { STATS_OFFSET32(total_broadcast_packets_received_hi),
10634                                 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
10635         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
10636                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
10637         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
10638                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
10639         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10640                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10641         { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10642                                 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10643 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10644                                 8, STATS_FLAGS_PORT, "rx_fragments" },
10645         { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10646                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
10647         { STATS_OFFSET32(no_buff_discard_hi),
10648                                 8, STATS_FLAGS_BOTH, "rx_discards" },
10649         { STATS_OFFSET32(mac_filter_discard),
10650                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10651         { STATS_OFFSET32(xxoverflow_discard),
10652                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10653         { STATS_OFFSET32(brb_drop_hi),
10654                                 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10655         { STATS_OFFSET32(brb_truncate_hi),
10656                                 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10657         { STATS_OFFSET32(pause_frames_received_hi),
10658                                 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10659         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10660                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10661         { STATS_OFFSET32(nig_timer_max),
10662                         4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10663 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10664                                 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10665         { STATS_OFFSET32(rx_skb_alloc_failed),
10666                                 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10667         { STATS_OFFSET32(hw_csum_err),
10668                                 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10669
10670         { STATS_OFFSET32(total_bytes_transmitted_hi),
10671                                 8, STATS_FLAGS_BOTH, "tx_bytes" },
10672         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10673                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10674         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10675                                 8, STATS_FLAGS_BOTH, "tx_packets" },
10676         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10677                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10678         { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10679                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
10680         { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
10681                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
10682         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
10683                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
10684 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
10685                                 8, STATS_FLAGS_PORT, "tx_deferred" },
10686         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
10687                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
10688         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
10689                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
10690         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
10691                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
10692         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
10693                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
10694         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
10695                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
10696         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
10697                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
10698         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
10699                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
10700         { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
10701                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
10702         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
10703                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
10704 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
10705                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
10706         { STATS_OFFSET32(pause_frames_sent_hi),
10707                                 8, STATS_FLAGS_PORT, "tx_pause_frames" }
10708 };
10709
10710 #define IS_PORT_STAT(i) \
10711         ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10712 #define IS_FUNC_STAT(i)         (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10713 #define IS_E1HMF_MODE_STAT(bp) \
10714                         (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
10715
10716 static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10717 {
10718         struct bnx2x *bp = netdev_priv(dev);
10719         int i, num_stats;
10720
10721         switch(stringset) {
10722         case ETH_SS_STATS:
10723                 if (is_multi(bp)) {
10724                         num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
10725                         if (!IS_E1HMF_MODE_STAT(bp))
10726                                 num_stats += BNX2X_NUM_STATS;
10727                 } else {
10728                         if (IS_E1HMF_MODE_STAT(bp)) {
10729                                 num_stats = 0;
10730                                 for (i = 0; i < BNX2X_NUM_STATS; i++)
10731                                         if (IS_FUNC_STAT(i))
10732                                                 num_stats++;
10733                         } else
10734                                 num_stats = BNX2X_NUM_STATS;
10735                 }
10736                 return num_stats;
10737
10738         case ETH_SS_TEST:
10739                 return BNX2X_NUM_TESTS;
10740
10741         default:
10742                 return -EINVAL;
10743         }
10744 }
10745
10746 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10747 {
10748         struct bnx2x *bp = netdev_priv(dev);
10749         int i, j, k;
10750
10751         switch (stringset) {
10752         case ETH_SS_STATS:
10753                 if (is_multi(bp)) {
10754                         k = 0;
10755                         for_each_rx_queue(bp, i) {
10756                                 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10757                                         sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10758                                                 bnx2x_q_stats_arr[j].string, i);
10759                                 k += BNX2X_NUM_Q_STATS;
10760                         }
10761                         if (IS_E1HMF_MODE_STAT(bp))
10762                                 break;
10763                         for (j = 0; j < BNX2X_NUM_STATS; j++)
10764                                 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10765                                        bnx2x_stats_arr[j].string);
10766                 } else {
10767                         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10768                                 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10769                                         continue;
10770                                 strcpy(buf + j*ETH_GSTRING_LEN,
10771                                        bnx2x_stats_arr[i].string);
10772                                 j++;
10773                         }
10774                 }
10775                 break;
10776
10777         case ETH_SS_TEST:
10778                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10779                 break;
10780         }
10781 }
10782
10783 static void bnx2x_get_ethtool_stats(struct net_device *dev,
10784                                     struct ethtool_stats *stats, u64 *buf)
10785 {
10786         struct bnx2x *bp = netdev_priv(dev);
10787         u32 *hw_stats, *offset;
10788         int i, j, k;
10789
10790         if (is_multi(bp)) {
10791                 k = 0;
10792                 for_each_rx_queue(bp, i) {
10793                         hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10794                         for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10795                                 if (bnx2x_q_stats_arr[j].size == 0) {
10796                                         /* skip this counter */
10797                                         buf[k + j] = 0;
10798                                         continue;
10799                                 }
10800                                 offset = (hw_stats +
10801                                           bnx2x_q_stats_arr[j].offset);
10802                                 if (bnx2x_q_stats_arr[j].size == 4) {
10803                                         /* 4-byte counter */
10804                                         buf[k + j] = (u64) *offset;
10805                                         continue;
10806                                 }
10807                                 /* 8-byte counter */
10808                                 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10809                         }
10810                         k += BNX2X_NUM_Q_STATS;
10811                 }
10812                 if (IS_E1HMF_MODE_STAT(bp))
10813                         return;
10814                 hw_stats = (u32 *)&bp->eth_stats;
10815                 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10816                         if (bnx2x_stats_arr[j].size == 0) {
10817                                 /* skip this counter */
10818                                 buf[k + j] = 0;
10819                                 continue;
10820                         }
10821                         offset = (hw_stats + bnx2x_stats_arr[j].offset);
10822                         if (bnx2x_stats_arr[j].size == 4) {
10823                                 /* 4-byte counter */
10824                                 buf[k + j] = (u64) *offset;
10825                                 continue;
10826                         }
10827                         /* 8-byte counter */
10828                         buf[k + j] = HILO_U64(*offset, *(offset + 1));
10829                 }
10830         } else {
10831                 hw_stats = (u32 *)&bp->eth_stats;
10832                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10833                         if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10834                                 continue;
10835                         if (bnx2x_stats_arr[i].size == 0) {
10836                                 /* skip this counter */
10837                                 buf[j] = 0;
10838                                 j++;
10839                                 continue;
10840                         }
10841                         offset = (hw_stats + bnx2x_stats_arr[i].offset);
10842                         if (bnx2x_stats_arr[i].size == 4) {
10843                                 /* 4-byte counter */
10844                                 buf[j] = (u64) *offset;
10845                                 j++;
10846                                 continue;
10847                         }
10848                         /* 8-byte counter */
10849                         buf[j] = HILO_U64(*offset, *(offset + 1));
10850                         j++;
10851                 }
10852         }
10853 }
10854
10855 static int bnx2x_phys_id(struct net_device *dev, u32 data)
10856 {
10857         struct bnx2x *bp = netdev_priv(dev);
10858         int port = BP_PORT(bp);
10859         int i;
10860
10861         if (!netif_running(dev))
10862                 return 0;
10863
10864         if (!bp->port.pmf)
10865                 return 0;
10866
10867         if (data == 0)
10868                 data = 2;
10869
10870         for (i = 0; i < (data * 2); i++) {
10871                 if ((i % 2) == 0)
10872                         bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
10873                                       bp->link_params.hw_led_mode,
10874                                       bp->link_params.chip_id);
10875                 else
10876                         bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
10877                                       bp->link_params.hw_led_mode,
10878                                       bp->link_params.chip_id);
10879
10880                 msleep_interruptible(500);
10881                 if (signal_pending(current))
10882                         break;
10883         }
10884
10885         if (bp->link_vars.link_up)
10886                 bnx2x_set_led(bp, port, LED_MODE_OPER,
10887                               bp->link_vars.line_speed,
10888                               bp->link_params.hw_led_mode,
10889                               bp->link_params.chip_id);
10890
10891         return 0;
10892 }
10893
10894 static const struct ethtool_ops bnx2x_ethtool_ops = {
10895         .get_settings           = bnx2x_get_settings,
10896         .set_settings           = bnx2x_set_settings,
10897         .get_drvinfo            = bnx2x_get_drvinfo,
10898         .get_regs_len           = bnx2x_get_regs_len,
10899         .get_regs               = bnx2x_get_regs,
10900         .get_wol                = bnx2x_get_wol,
10901         .set_wol                = bnx2x_set_wol,
10902         .get_msglevel           = bnx2x_get_msglevel,
10903         .set_msglevel           = bnx2x_set_msglevel,
10904         .nway_reset             = bnx2x_nway_reset,
10905         .get_link               = bnx2x_get_link,
10906         .get_eeprom_len         = bnx2x_get_eeprom_len,
10907         .get_eeprom             = bnx2x_get_eeprom,
10908         .set_eeprom             = bnx2x_set_eeprom,
10909         .get_coalesce           = bnx2x_get_coalesce,
10910         .set_coalesce           = bnx2x_set_coalesce,
10911         .get_ringparam          = bnx2x_get_ringparam,
10912         .set_ringparam          = bnx2x_set_ringparam,
10913         .get_pauseparam         = bnx2x_get_pauseparam,
10914         .set_pauseparam         = bnx2x_set_pauseparam,
10915         .get_rx_csum            = bnx2x_get_rx_csum,
10916         .set_rx_csum            = bnx2x_set_rx_csum,
10917         .get_tx_csum            = ethtool_op_get_tx_csum,
10918         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
10919         .set_flags              = bnx2x_set_flags,
10920         .get_flags              = ethtool_op_get_flags,
10921         .get_sg                 = ethtool_op_get_sg,
10922         .set_sg                 = ethtool_op_set_sg,
10923         .get_tso                = ethtool_op_get_tso,
10924         .set_tso                = bnx2x_set_tso,
10925         .self_test              = bnx2x_self_test,
10926         .get_sset_count         = bnx2x_get_sset_count,
10927         .get_strings            = bnx2x_get_strings,
10928         .phys_id                = bnx2x_phys_id,
10929         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
10930 };
10931
10932 /* end of ethtool_ops */
10933
10934 /****************************************************************************
10935 * General service functions
10936 ****************************************************************************/
10937
10938 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10939 {
10940         u16 pmcsr;
10941
10942         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10943
10944         switch (state) {
10945         case PCI_D0:
10946                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10947                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10948                                        PCI_PM_CTRL_PME_STATUS));
10949
10950                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10951                         /* delay required during transition out of D3hot */
10952                         msleep(20);
10953                 break;
10954
10955         case PCI_D3hot:
10956                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10957                 pmcsr |= 3;
10958
10959                 if (bp->wol)
10960                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10961
10962                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10963                                       pmcsr);
10964
10965                 /* No more memory access after this point until
10966                 * device is brought back to D0.
10967                 */
10968                 break;
10969
10970         default:
10971                 return -EINVAL;
10972         }
10973         return 0;
10974 }
10975
10976 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10977 {
10978         u16 rx_cons_sb;
10979
10980         /* Tell compiler that status block fields can change */
10981         barrier();
10982         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10983         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10984                 rx_cons_sb++;
10985         return (fp->rx_comp_cons != rx_cons_sb);
10986 }
10987
10988 /*
10989  * net_device service functions
10990  */
10991
10992 static int bnx2x_poll(struct napi_struct *napi, int budget)
10993 {
10994         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10995                                                  napi);
10996         struct bnx2x *bp = fp->bp;
10997         int work_done = 0;
10998
10999 #ifdef BNX2X_STOP_ON_ERROR
11000         if (unlikely(bp->panic))
11001                 goto poll_panic;
11002 #endif
11003
11004         prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
11005         prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
11006
11007         bnx2x_update_fpsb_idx(fp);
11008
11009         if (bnx2x_has_rx_work(fp)) {
11010                 work_done = bnx2x_rx_int(fp, budget);
11011
11012                 /* must not complete if we consumed full budget */
11013                 if (work_done >= budget)
11014                         goto poll_again;
11015         }
11016
11017         /* bnx2x_has_rx_work() reads the status block, thus we need to
11018          * ensure that status block indices have been actually read
11019          * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work)
11020          * so that we won't write the "newer" value of the status block to IGU
11021          * (if there was a DMA right after bnx2x_has_rx_work and
11022          * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
11023          * may be postponed to right before bnx2x_ack_sb). In this case
11024          * there will never be another interrupt until there is another update
11025          * of the status block, while there is still unhandled work.
11026          */
11027         rmb();
11028
11029         if (!bnx2x_has_rx_work(fp)) {
11030 #ifdef BNX2X_STOP_ON_ERROR
11031 poll_panic:
11032 #endif
11033                 napi_complete(napi);
11034
11035                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
11036                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
11037                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
11038                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
11039         }
11040
11041 poll_again:
11042         return work_done;
11043 }
11044
11045
11046 /* we split the first BD into headers and data BDs
11047  * to ease the pain of our fellow microcode engineers
11048  * we use one mapping for both BDs
11049  * So far this has only been observed to happen
11050  * in Other Operating Systems(TM)
11051  */
11052 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
11053                                    struct bnx2x_fastpath *fp,
11054                                    struct sw_tx_bd *tx_buf,
11055                                    struct eth_tx_start_bd **tx_bd, u16 hlen,
11056                                    u16 bd_prod, int nbd)
11057 {
11058         struct eth_tx_start_bd *h_tx_bd = *tx_bd;
11059         struct eth_tx_bd *d_tx_bd;
11060         dma_addr_t mapping;
11061         int old_len = le16_to_cpu(h_tx_bd->nbytes);
11062
11063         /* first fix first BD */
11064         h_tx_bd->nbd = cpu_to_le16(nbd);
11065         h_tx_bd->nbytes = cpu_to_le16(hlen);
11066
11067         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
11068            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
11069            h_tx_bd->addr_lo, h_tx_bd->nbd);
11070
11071         /* now get a new data BD
11072          * (after the pbd) and fill it */
11073         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11074         d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11075
11076         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
11077                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
11078
11079         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11080         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11081         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
11082
11083         /* this marks the BD as one that has no individual mapping */
11084         tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
11085
11086         DP(NETIF_MSG_TX_QUEUED,
11087            "TSO split data size is %d (%x:%x)\n",
11088            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
11089
11090         /* update tx_bd */
11091         *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
11092
11093         return bd_prod;
11094 }
11095
11096 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
11097 {
11098         if (fix > 0)
11099                 csum = (u16) ~csum_fold(csum_sub(csum,
11100                                 csum_partial(t_header - fix, fix, 0)));
11101
11102         else if (fix < 0)
11103                 csum = (u16) ~csum_fold(csum_add(csum,
11104                                 csum_partial(t_header, -fix, 0)));
11105
11106         return swab16(csum);
11107 }
11108
11109 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
11110 {
11111         u32 rc;
11112
11113         if (skb->ip_summed != CHECKSUM_PARTIAL)
11114                 rc = XMIT_PLAIN;
11115
11116         else {
11117                 if (skb->protocol == htons(ETH_P_IPV6)) {
11118                         rc = XMIT_CSUM_V6;
11119                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
11120                                 rc |= XMIT_CSUM_TCP;
11121
11122                 } else {
11123                         rc = XMIT_CSUM_V4;
11124                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
11125                                 rc |= XMIT_CSUM_TCP;
11126                 }
11127         }
11128
11129         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
11130                 rc |= XMIT_GSO_V4;
11131
11132         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
11133                 rc |= XMIT_GSO_V6;
11134
11135         return rc;
11136 }
11137
11138 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
11139 /* check if packet requires linearization (packet is too fragmented)
11140    no need to check fragmentation if page size > 8K (there will be no
11141    violation to FW restrictions) */
11142 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
11143                              u32 xmit_type)
11144 {
11145         int to_copy = 0;
11146         int hlen = 0;
11147         int first_bd_sz = 0;
11148
11149         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
11150         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
11151
11152                 if (xmit_type & XMIT_GSO) {
11153                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
11154                         /* Check if LSO packet needs to be copied:
11155                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
11156                         int wnd_size = MAX_FETCH_BD - 3;
11157                         /* Number of windows to check */
11158                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
11159                         int wnd_idx = 0;
11160                         int frag_idx = 0;
11161                         u32 wnd_sum = 0;
11162
11163                         /* Headers length */
11164                         hlen = (int)(skb_transport_header(skb) - skb->data) +
11165                                 tcp_hdrlen(skb);
11166
11167                         /* Amount of data (w/o headers) on linear part of SKB*/
11168                         first_bd_sz = skb_headlen(skb) - hlen;
11169
11170                         wnd_sum  = first_bd_sz;
11171
11172                         /* Calculate the first sum - it's special */
11173                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
11174                                 wnd_sum +=
11175                                         skb_shinfo(skb)->frags[frag_idx].size;
11176
11177                         /* If there was data on linear skb data - check it */
11178                         if (first_bd_sz > 0) {
11179                                 if (unlikely(wnd_sum < lso_mss)) {
11180                                         to_copy = 1;
11181                                         goto exit_lbl;
11182                                 }
11183
11184                                 wnd_sum -= first_bd_sz;
11185                         }
11186
11187                         /* Others are easier: run through the frag list and
11188                            check all windows */
11189                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
11190                                 wnd_sum +=
11191                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
11192
11193                                 if (unlikely(wnd_sum < lso_mss)) {
11194                                         to_copy = 1;
11195                                         break;
11196                                 }
11197                                 wnd_sum -=
11198                                         skb_shinfo(skb)->frags[wnd_idx].size;
11199                         }
11200                 } else {
11201                         /* in non-LSO too fragmented packet should always
11202                            be linearized */
11203                         to_copy = 1;
11204                 }
11205         }
11206
11207 exit_lbl:
11208         if (unlikely(to_copy))
11209                 DP(NETIF_MSG_TX_QUEUED,
11210                    "Linearization IS REQUIRED for %s packet. "
11211                    "num_frags %d  hlen %d  first_bd_sz %d\n",
11212                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
11213                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
11214
11215         return to_copy;
11216 }
11217 #endif
11218
11219 /* called with netif_tx_lock
11220  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
11221  * netif_wake_queue()
11222  */
11223 static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11224 {
11225         struct bnx2x *bp = netdev_priv(dev);
11226         struct bnx2x_fastpath *fp, *fp_stat;
11227         struct netdev_queue *txq;
11228         struct sw_tx_bd *tx_buf;
11229         struct eth_tx_start_bd *tx_start_bd;
11230         struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
11231         struct eth_tx_parse_bd *pbd = NULL;
11232         u16 pkt_prod, bd_prod;
11233         int nbd, fp_index;
11234         dma_addr_t mapping;
11235         u32 xmit_type = bnx2x_xmit_type(bp, skb);
11236         int i;
11237         u8 hlen = 0;
11238         __le16 pkt_size = 0;
11239
11240 #ifdef BNX2X_STOP_ON_ERROR
11241         if (unlikely(bp->panic))
11242                 return NETDEV_TX_BUSY;
11243 #endif
11244
11245         fp_index = skb_get_queue_mapping(skb);
11246         txq = netdev_get_tx_queue(dev, fp_index);
11247
11248         fp = &bp->fp[fp_index + bp->num_rx_queues];
11249         fp_stat = &bp->fp[fp_index];
11250
11251         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
11252                 fp_stat->eth_q_stats.driver_xoff++;
11253                 netif_tx_stop_queue(txq);
11254                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
11255                 return NETDEV_TX_BUSY;
11256         }
11257
11258         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
11259            "  gso type %x  xmit_type %x\n",
11260            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
11261            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
11262
11263 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
11264         /* First, check if we need to linearize the skb (due to FW
11265            restrictions). No need to check fragmentation if page size > 8K
11266            (there will be no violation to FW restrictions) */
11267         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
11268                 /* Statistics of linearization */
11269                 bp->lin_cnt++;
11270                 if (skb_linearize(skb) != 0) {
11271                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
11272                            "silently dropping this SKB\n");
11273                         dev_kfree_skb_any(skb);
11274                         return NETDEV_TX_OK;
11275                 }
11276         }
11277 #endif
11278
11279         /*
11280         Please read carefully. First we use one BD which we mark as start,
11281         then we have a parsing info BD (used for TSO or xsum),
11282         and only then we have the rest of the TSO BDs.
11283         (don't forget to mark the last one as last,
11284         and to unmap only AFTER you write to the BD ...)
11285         And above all, all pdb sizes are in words - NOT DWORDS!
11286         */
11287
11288         pkt_prod = fp->tx_pkt_prod++;
11289         bd_prod = TX_BD(fp->tx_bd_prod);
11290
11291         /* get a tx_buf and first BD */
11292         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
11293         tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
11294
11295         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11296         tx_start_bd->general_data = (UNICAST_ADDRESS <<
11297                                      ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
11298         /* header nbd */
11299         tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
11300
11301         /* remember the first BD of the packet */
11302         tx_buf->first_bd = fp->tx_bd_prod;
11303         tx_buf->skb = skb;
11304         tx_buf->flags = 0;
11305
11306         DP(NETIF_MSG_TX_QUEUED,
11307            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
11308            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
11309
11310 #ifdef BCM_VLAN
11311         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
11312             (bp->flags & HW_VLAN_TX_FLAG)) {
11313                 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
11314                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
11315         } else
11316 #endif
11317                 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11318
11319         /* turn on parsing and get a BD */
11320         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11321         pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
11322
11323         memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
11324
11325         if (xmit_type & XMIT_CSUM) {
11326                 hlen = (skb_network_header(skb) - skb->data) / 2;
11327
11328                 /* for now NS flag is not used in Linux */
11329                 pbd->global_data =
11330                         (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11331                                  ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
11332
11333                 pbd->ip_hlen = (skb_transport_header(skb) -
11334                                 skb_network_header(skb)) / 2;
11335
11336                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
11337
11338                 pbd->total_hlen = cpu_to_le16(hlen);
11339                 hlen = hlen*2;
11340
11341                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
11342
11343                 if (xmit_type & XMIT_CSUM_V4)
11344                         tx_start_bd->bd_flags.as_bitfield |=
11345                                                 ETH_TX_BD_FLAGS_IP_CSUM;
11346                 else
11347                         tx_start_bd->bd_flags.as_bitfield |=
11348                                                 ETH_TX_BD_FLAGS_IPV6;
11349
11350                 if (xmit_type & XMIT_CSUM_TCP) {
11351                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11352
11353                 } else {
11354                         s8 fix = SKB_CS_OFF(skb); /* signed! */
11355
11356                         pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
11357
11358                         DP(NETIF_MSG_TX_QUEUED,
11359                            "hlen %d  fix %d  csum before fix %x\n",
11360                            le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
11361
11362                         /* HW bug: fixup the CSUM */
11363                         pbd->tcp_pseudo_csum =
11364                                 bnx2x_csum_fix(skb_transport_header(skb),
11365                                                SKB_CS(skb), fix);
11366
11367                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11368                            pbd->tcp_pseudo_csum);
11369                 }
11370         }
11371
11372         mapping = pci_map_single(bp->pdev, skb->data,
11373                                  skb_headlen(skb), PCI_DMA_TODEVICE);
11374
11375         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11376         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11377         nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11378         tx_start_bd->nbd = cpu_to_le16(nbd);
11379         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11380         pkt_size = tx_start_bd->nbytes;
11381
11382         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
11383            "  nbytes %d  flags %x  vlan %x\n",
11384            tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11385            le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11386            tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
11387
11388         if (xmit_type & XMIT_GSO) {
11389
11390                 DP(NETIF_MSG_TX_QUEUED,
11391                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
11392                    skb->len, hlen, skb_headlen(skb),
11393                    skb_shinfo(skb)->gso_size);
11394
11395                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
11396
11397                 if (unlikely(skb_headlen(skb) > hlen))
11398                         bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11399                                                  hlen, bd_prod, ++nbd);
11400
11401                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11402                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
11403                 pbd->tcp_flags = pbd_tcp_flags(skb);
11404
11405                 if (xmit_type & XMIT_GSO_V4) {
11406                         pbd->ip_id = swab16(ip_hdr(skb)->id);
11407                         pbd->tcp_pseudo_csum =
11408                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11409                                                           ip_hdr(skb)->daddr,
11410                                                           0, IPPROTO_TCP, 0));
11411
11412                 } else
11413                         pbd->tcp_pseudo_csum =
11414                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11415                                                         &ipv6_hdr(skb)->daddr,
11416                                                         0, IPPROTO_TCP, 0));
11417
11418                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11419         }
11420         tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
11421
11422         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11423                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
11424
11425                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11426                 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11427                 if (total_pkt_bd == NULL)
11428                         total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11429
11430                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11431                                        frag->size, PCI_DMA_TODEVICE);
11432
11433                 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11434                 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11435                 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11436                 le16_add_cpu(&pkt_size, frag->size);
11437
11438                 DP(NETIF_MSG_TX_QUEUED,
11439                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
11440                    i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11441                    le16_to_cpu(tx_data_bd->nbytes));
11442         }
11443
11444         DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
11445
11446         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11447
11448         /* now send a tx doorbell, counting the next BD
11449          * if the packet contains or ends with it
11450          */
11451         if (TX_BD_POFF(bd_prod) < nbd)
11452                 nbd++;
11453
11454         if (total_pkt_bd != NULL)
11455                 total_pkt_bd->total_pkt_bytes = pkt_size;
11456
11457         if (pbd)
11458                 DP(NETIF_MSG_TX_QUEUED,
11459                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
11460                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
11461                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11462                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
11463                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
11464
11465         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
11466
11467         /*
11468          * Make sure that the BD data is updated before updating the producer
11469          * since FW might read the BD right after the producer is updated.
11470          * This is only applicable for weak-ordered memory model archs such
11471          * as IA-64. The following barrier is also mandatory since FW will
11472          * assumes packets must have BDs.
11473          */
11474         wmb();
11475
11476         fp->tx_db.data.prod += nbd;
11477         barrier();
11478         DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw);
11479
11480         mmiowb();
11481
11482         fp->tx_bd_prod += nbd;
11483
11484         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
11485                 netif_tx_stop_queue(txq);
11486                 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11487                    if we put Tx into XOFF state. */
11488                 smp_mb();
11489                 fp_stat->eth_q_stats.driver_xoff++;
11490                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
11491                         netif_tx_wake_queue(txq);
11492         }
11493         fp_stat->tx_pkt++;
11494
11495         return NETDEV_TX_OK;
11496 }
11497
11498 /* called with rtnl_lock */
11499 static int bnx2x_open(struct net_device *dev)
11500 {
11501         struct bnx2x *bp = netdev_priv(dev);
11502
11503         netif_carrier_off(dev);
11504
11505         bnx2x_set_power_state(bp, PCI_D0);
11506
11507         return bnx2x_nic_load(bp, LOAD_OPEN);
11508 }
11509
11510 /* called with rtnl_lock */
11511 static int bnx2x_close(struct net_device *dev)
11512 {
11513         struct bnx2x *bp = netdev_priv(dev);
11514
11515         /* Unload the driver, release IRQs */
11516         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11517         if (atomic_read(&bp->pdev->enable_cnt) == 1)
11518                 if (!CHIP_REV_IS_SLOW(bp))
11519                         bnx2x_set_power_state(bp, PCI_D3hot);
11520
11521         return 0;
11522 }
11523
11524 /* called with netif_tx_lock from dev_mcast.c */
11525 static void bnx2x_set_rx_mode(struct net_device *dev)
11526 {
11527         struct bnx2x *bp = netdev_priv(dev);
11528         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11529         int port = BP_PORT(bp);
11530
11531         if (bp->state != BNX2X_STATE_OPEN) {
11532                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11533                 return;
11534         }
11535
11536         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11537
11538         if (dev->flags & IFF_PROMISC)
11539                 rx_mode = BNX2X_RX_MODE_PROMISC;
11540
11541         else if ((dev->flags & IFF_ALLMULTI) ||
11542                  ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11543                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11544
11545         else { /* some multicasts */
11546                 if (CHIP_IS_E1(bp)) {
11547                         int i, old, offset;
11548                         struct dev_mc_list *mclist;
11549                         struct mac_configuration_cmd *config =
11550                                                 bnx2x_sp(bp, mcast_config);
11551
11552                         for (i = 0, mclist = dev->mc_list;
11553                              mclist && (i < dev->mc_count);
11554                              i++, mclist = mclist->next) {
11555
11556                                 config->config_table[i].
11557                                         cam_entry.msb_mac_addr =
11558                                         swab16(*(u16 *)&mclist->dmi_addr[0]);
11559                                 config->config_table[i].
11560                                         cam_entry.middle_mac_addr =
11561                                         swab16(*(u16 *)&mclist->dmi_addr[2]);
11562                                 config->config_table[i].
11563                                         cam_entry.lsb_mac_addr =
11564                                         swab16(*(u16 *)&mclist->dmi_addr[4]);
11565                                 config->config_table[i].cam_entry.flags =
11566                                                         cpu_to_le16(port);
11567                                 config->config_table[i].
11568                                         target_table_entry.flags = 0;
11569                                 config->config_table[i].target_table_entry.
11570                                         clients_bit_vector =
11571                                                 cpu_to_le32(1 << BP_L_ID(bp));
11572                                 config->config_table[i].
11573                                         target_table_entry.vlan_id = 0;
11574
11575                                 DP(NETIF_MSG_IFUP,
11576                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11577                                    config->config_table[i].
11578                                                 cam_entry.msb_mac_addr,
11579                                    config->config_table[i].
11580                                                 cam_entry.middle_mac_addr,
11581                                    config->config_table[i].
11582                                                 cam_entry.lsb_mac_addr);
11583                         }
11584                         old = config->hdr.length;
11585                         if (old > i) {
11586                                 for (; i < old; i++) {
11587                                         if (CAM_IS_INVALID(config->
11588                                                            config_table[i])) {
11589                                                 /* already invalidated */
11590                                                 break;
11591                                         }
11592                                         /* invalidate */
11593                                         CAM_INVALIDATE(config->
11594                                                        config_table[i]);
11595                                 }
11596                         }
11597
11598                         if (CHIP_REV_IS_SLOW(bp))
11599                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11600                         else
11601                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
11602
11603                         config->hdr.length = i;
11604                         config->hdr.offset = offset;
11605                         config->hdr.client_id = bp->fp->cl_id;
11606                         config->hdr.reserved1 = 0;
11607
11608                         bp->set_mac_pending++;
11609                         smp_wmb();
11610
11611                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11612                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11613                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11614                                       0);
11615                 } else { /* E1H */
11616                         /* Accept one or more multicasts */
11617                         struct dev_mc_list *mclist;
11618                         u32 mc_filter[MC_HASH_SIZE];
11619                         u32 crc, bit, regidx;
11620                         int i;
11621
11622                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11623
11624                         for (i = 0, mclist = dev->mc_list;
11625                              mclist && (i < dev->mc_count);
11626                              i++, mclist = mclist->next) {
11627
11628                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11629                                    mclist->dmi_addr);
11630
11631                                 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11632                                 bit = (crc >> 24) & 0xff;
11633                                 regidx = bit >> 5;
11634                                 bit &= 0x1f;
11635                                 mc_filter[regidx] |= (1 << bit);
11636                         }
11637
11638                         for (i = 0; i < MC_HASH_SIZE; i++)
11639                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11640                                        mc_filter[i]);
11641                 }
11642         }
11643
11644         bp->rx_mode = rx_mode;
11645         bnx2x_set_storm_rx_mode(bp);
11646 }
11647
11648 /* called with rtnl_lock */
11649 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11650 {
11651         struct sockaddr *addr = p;
11652         struct bnx2x *bp = netdev_priv(dev);
11653
11654         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
11655                 return -EINVAL;
11656
11657         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
11658         if (netif_running(dev)) {
11659                 if (CHIP_IS_E1(bp))
11660                         bnx2x_set_eth_mac_addr_e1(bp, 1);
11661                 else
11662                         bnx2x_set_eth_mac_addr_e1h(bp, 1);
11663         }
11664
11665         return 0;
11666 }
11667
11668 /* called with rtnl_lock */
11669 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11670                            int devad, u16 addr)
11671 {
11672         struct bnx2x *bp = netdev_priv(netdev);
11673         u16 value;
11674         int rc;
11675         u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11676
11677         DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11678            prtad, devad, addr);
11679
11680         if (prtad != bp->mdio.prtad) {
11681                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11682                    prtad, bp->mdio.prtad);
11683                 return -EINVAL;
11684         }
11685
11686         /* The HW expects different devad if CL22 is used */
11687         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11688
11689         bnx2x_acquire_phy_lock(bp);
11690         rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11691                              devad, addr, &value);
11692         bnx2x_release_phy_lock(bp);
11693         DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
11694
11695         if (!rc)
11696                 rc = value;
11697         return rc;
11698 }
11699
11700 /* called with rtnl_lock */
11701 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11702                             u16 addr, u16 value)
11703 {
11704         struct bnx2x *bp = netdev_priv(netdev);
11705         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11706         int rc;
11707
11708         DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11709                            " value 0x%x\n", prtad, devad, addr, value);
11710
11711         if (prtad != bp->mdio.prtad) {
11712                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11713                    prtad, bp->mdio.prtad);
11714                 return -EINVAL;
11715         }
11716
11717         /* The HW expects different devad if CL22 is used */
11718         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11719
11720         bnx2x_acquire_phy_lock(bp);
11721         rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11722                               devad, addr, value);
11723         bnx2x_release_phy_lock(bp);
11724         return rc;
11725 }
11726
11727 /* called with rtnl_lock */
11728 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11729 {
11730         struct bnx2x *bp = netdev_priv(dev);
11731         struct mii_ioctl_data *mdio = if_mii(ifr);
11732
11733         DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11734            mdio->phy_id, mdio->reg_num, mdio->val_in);
11735
11736         if (!netif_running(dev))
11737                 return -EAGAIN;
11738
11739         return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
11740 }
11741
11742 /* called with rtnl_lock */
11743 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11744 {
11745         struct bnx2x *bp = netdev_priv(dev);
11746         int rc = 0;
11747
11748         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11749             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11750                 return -EINVAL;
11751
11752         /* This does not race with packet allocation
11753          * because the actual alloc size is
11754          * only updated as part of load
11755          */
11756         dev->mtu = new_mtu;
11757
11758         if (netif_running(dev)) {
11759                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11760                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11761         }
11762
11763         return rc;
11764 }
11765
11766 static void bnx2x_tx_timeout(struct net_device *dev)
11767 {
11768         struct bnx2x *bp = netdev_priv(dev);
11769
11770 #ifdef BNX2X_STOP_ON_ERROR
11771         if (!bp->panic)
11772                 bnx2x_panic();
11773 #endif
11774         /* This allows the netif to be shutdown gracefully before resetting */
11775         schedule_work(&bp->reset_task);
11776 }
11777
11778 #ifdef BCM_VLAN
11779 /* called with rtnl_lock */
11780 static void bnx2x_vlan_rx_register(struct net_device *dev,
11781                                    struct vlan_group *vlgrp)
11782 {
11783         struct bnx2x *bp = netdev_priv(dev);
11784
11785         bp->vlgrp = vlgrp;
11786
11787         /* Set flags according to the required capabilities */
11788         bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11789
11790         if (dev->features & NETIF_F_HW_VLAN_TX)
11791                 bp->flags |= HW_VLAN_TX_FLAG;
11792
11793         if (dev->features & NETIF_F_HW_VLAN_RX)
11794                 bp->flags |= HW_VLAN_RX_FLAG;
11795
11796         if (netif_running(dev))
11797                 bnx2x_set_client_config(bp);
11798 }
11799
11800 #endif
11801
11802 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11803 static void poll_bnx2x(struct net_device *dev)
11804 {
11805         struct bnx2x *bp = netdev_priv(dev);
11806
11807         disable_irq(bp->pdev->irq);
11808         bnx2x_interrupt(bp->pdev->irq, dev);
11809         enable_irq(bp->pdev->irq);
11810 }
11811 #endif
11812
11813 static const struct net_device_ops bnx2x_netdev_ops = {
11814         .ndo_open               = bnx2x_open,
11815         .ndo_stop               = bnx2x_close,
11816         .ndo_start_xmit         = bnx2x_start_xmit,
11817         .ndo_set_multicast_list = bnx2x_set_rx_mode,
11818         .ndo_set_mac_address    = bnx2x_change_mac_addr,
11819         .ndo_validate_addr      = eth_validate_addr,
11820         .ndo_do_ioctl           = bnx2x_ioctl,
11821         .ndo_change_mtu         = bnx2x_change_mtu,
11822         .ndo_tx_timeout         = bnx2x_tx_timeout,
11823 #ifdef BCM_VLAN
11824         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
11825 #endif
11826 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11827         .ndo_poll_controller    = poll_bnx2x,
11828 #endif
11829 };
11830
11831 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11832                                     struct net_device *dev)
11833 {
11834         struct bnx2x *bp;
11835         int rc;
11836
11837         SET_NETDEV_DEV(dev, &pdev->dev);
11838         bp = netdev_priv(dev);
11839
11840         bp->dev = dev;
11841         bp->pdev = pdev;
11842         bp->flags = 0;
11843         bp->func = PCI_FUNC(pdev->devfn);
11844
11845         rc = pci_enable_device(pdev);
11846         if (rc) {
11847                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11848                 goto err_out;
11849         }
11850
11851         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11852                 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11853                        " aborting\n");
11854                 rc = -ENODEV;
11855                 goto err_out_disable;
11856         }
11857
11858         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11859                 printk(KERN_ERR PFX "Cannot find second PCI device"
11860                        " base address, aborting\n");
11861                 rc = -ENODEV;
11862                 goto err_out_disable;
11863         }
11864
11865         if (atomic_read(&pdev->enable_cnt) == 1) {
11866                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11867                 if (rc) {
11868                         printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11869                                " aborting\n");
11870                         goto err_out_disable;
11871                 }
11872
11873                 pci_set_master(pdev);
11874                 pci_save_state(pdev);
11875         }
11876
11877         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11878         if (bp->pm_cap == 0) {
11879                 printk(KERN_ERR PFX "Cannot find power management"
11880                        " capability, aborting\n");
11881                 rc = -EIO;
11882                 goto err_out_release;
11883         }
11884
11885         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11886         if (bp->pcie_cap == 0) {
11887                 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11888                        " aborting\n");
11889                 rc = -EIO;
11890                 goto err_out_release;
11891         }
11892
11893         if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
11894                 bp->flags |= USING_DAC_FLAG;
11895                 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
11896                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11897                                " failed, aborting\n");
11898                         rc = -EIO;
11899                         goto err_out_release;
11900                 }
11901
11902         } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
11903                 printk(KERN_ERR PFX "System does not support DMA,"
11904                        " aborting\n");
11905                 rc = -EIO;
11906                 goto err_out_release;
11907         }
11908
11909         dev->mem_start = pci_resource_start(pdev, 0);
11910         dev->base_addr = dev->mem_start;
11911         dev->mem_end = pci_resource_end(pdev, 0);
11912
11913         dev->irq = pdev->irq;
11914
11915         bp->regview = pci_ioremap_bar(pdev, 0);
11916         if (!bp->regview) {
11917                 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11918                 rc = -ENOMEM;
11919                 goto err_out_release;
11920         }
11921
11922         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11923                                         min_t(u64, BNX2X_DB_SIZE,
11924                                               pci_resource_len(pdev, 2)));
11925         if (!bp->doorbells) {
11926                 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11927                 rc = -ENOMEM;
11928                 goto err_out_unmap;
11929         }
11930
11931         bnx2x_set_power_state(bp, PCI_D0);
11932
11933         /* clean indirect addresses */
11934         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11935                                PCICFG_VENDOR_ID_OFFSET);
11936         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11937         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11938         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11939         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11940
11941         dev->watchdog_timeo = TX_TIMEOUT;
11942
11943         dev->netdev_ops = &bnx2x_netdev_ops;
11944         dev->ethtool_ops = &bnx2x_ethtool_ops;
11945         dev->features |= NETIF_F_SG;
11946         dev->features |= NETIF_F_HW_CSUM;
11947         if (bp->flags & USING_DAC_FLAG)
11948                 dev->features |= NETIF_F_HIGHDMA;
11949         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11950         dev->features |= NETIF_F_TSO6;
11951 #ifdef BCM_VLAN
11952         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
11953         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11954
11955         dev->vlan_features |= NETIF_F_SG;
11956         dev->vlan_features |= NETIF_F_HW_CSUM;
11957         if (bp->flags & USING_DAC_FLAG)
11958                 dev->vlan_features |= NETIF_F_HIGHDMA;
11959         dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11960         dev->vlan_features |= NETIF_F_TSO6;
11961 #endif
11962
11963         /* get_port_hwinfo() will set prtad and mmds properly */
11964         bp->mdio.prtad = MDIO_PRTAD_NONE;
11965         bp->mdio.mmds = 0;
11966         bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11967         bp->mdio.dev = dev;
11968         bp->mdio.mdio_read = bnx2x_mdio_read;
11969         bp->mdio.mdio_write = bnx2x_mdio_write;
11970
11971         return 0;
11972
11973 err_out_unmap:
11974         if (bp->regview) {
11975                 iounmap(bp->regview);
11976                 bp->regview = NULL;
11977         }
11978         if (bp->doorbells) {
11979                 iounmap(bp->doorbells);
11980                 bp->doorbells = NULL;
11981         }
11982
11983 err_out_release:
11984         if (atomic_read(&pdev->enable_cnt) == 1)
11985                 pci_release_regions(pdev);
11986
11987 err_out_disable:
11988         pci_disable_device(pdev);
11989         pci_set_drvdata(pdev, NULL);
11990
11991 err_out:
11992         return rc;
11993 }
11994
11995 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11996                                                  int *width, int *speed)
11997 {
11998         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11999
12000         *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
12001
12002         /* return value of 1=2.5GHz 2=5GHz */
12003         *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
12004 }
12005
12006 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
12007 {
12008         const struct firmware *firmware = bp->firmware;
12009         struct bnx2x_fw_file_hdr *fw_hdr;
12010         struct bnx2x_fw_file_section *sections;
12011         u32 offset, len, num_ops;
12012         u16 *ops_offsets;
12013         int i;
12014         const u8 *fw_ver;
12015
12016         if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
12017                 return -EINVAL;
12018
12019         fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
12020         sections = (struct bnx2x_fw_file_section *)fw_hdr;
12021
12022         /* Make sure none of the offsets and sizes make us read beyond
12023          * the end of the firmware data */
12024         for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
12025                 offset = be32_to_cpu(sections[i].offset);
12026                 len = be32_to_cpu(sections[i].len);
12027                 if (offset + len > firmware->size) {
12028                         printk(KERN_ERR PFX "Section %d length is out of "
12029                                             "bounds\n", i);
12030                         return -EINVAL;
12031                 }
12032         }
12033
12034         /* Likewise for the init_ops offsets */
12035         offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
12036         ops_offsets = (u16 *)(firmware->data + offset);
12037         num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
12038
12039         for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
12040                 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
12041                         printk(KERN_ERR PFX "Section offset %d is out of "
12042                                             "bounds\n", i);
12043                         return -EINVAL;
12044                 }
12045         }
12046
12047         /* Check FW version */
12048         offset = be32_to_cpu(fw_hdr->fw_version.offset);
12049         fw_ver = firmware->data + offset;
12050         if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
12051             (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
12052             (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
12053             (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
12054                 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
12055                                     " Should be %d.%d.%d.%d\n",
12056                        fw_ver[0], fw_ver[1], fw_ver[2],
12057                        fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
12058                        BCM_5710_FW_MINOR_VERSION,
12059                        BCM_5710_FW_REVISION_VERSION,
12060                        BCM_5710_FW_ENGINEERING_VERSION);
12061                 return -EINVAL;
12062         }
12063
12064         return 0;
12065 }
12066
12067 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
12068 {
12069         const __be32 *source = (const __be32 *)_source;
12070         u32 *target = (u32 *)_target;
12071         u32 i;
12072
12073         for (i = 0; i < n/4; i++)
12074                 target[i] = be32_to_cpu(source[i]);
12075 }
12076
12077 /*
12078    Ops array is stored in the following format:
12079    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
12080  */
12081 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
12082 {
12083         const __be32 *source = (const __be32 *)_source;
12084         struct raw_op *target = (struct raw_op *)_target;
12085         u32 i, j, tmp;
12086
12087         for (i = 0, j = 0; i < n/8; i++, j += 2) {
12088                 tmp = be32_to_cpu(source[j]);
12089                 target[i].op = (tmp >> 24) & 0xff;
12090                 target[i].offset =  tmp & 0xffffff;
12091                 target[i].raw_data = be32_to_cpu(source[j+1]);
12092         }
12093 }
12094
12095 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
12096 {
12097         const __be16 *source = (const __be16 *)_source;
12098         u16 *target = (u16 *)_target;
12099         u32 i;
12100
12101         for (i = 0; i < n/2; i++)
12102                 target[i] = be16_to_cpu(source[i]);
12103 }
12104
12105 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
12106         do { \
12107                 u32 len = be32_to_cpu(fw_hdr->arr.len); \
12108                 bp->arr = kmalloc(len, GFP_KERNEL); \
12109                 if (!bp->arr) { \
12110                         printk(KERN_ERR PFX "Failed to allocate %d bytes " \
12111                                             "for "#arr"\n", len); \
12112                         goto lbl; \
12113                 } \
12114                 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
12115                      (u8 *)bp->arr, len); \
12116         } while (0)
12117
12118 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
12119 {
12120         char fw_file_name[40] = {0};
12121         struct bnx2x_fw_file_hdr *fw_hdr;
12122         int rc, offset;
12123
12124         /* Create a FW file name */
12125         if (CHIP_IS_E1(bp))
12126                 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
12127         else
12128                 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
12129
12130         sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
12131                 BCM_5710_FW_MAJOR_VERSION,
12132                 BCM_5710_FW_MINOR_VERSION,
12133                 BCM_5710_FW_REVISION_VERSION,
12134                 BCM_5710_FW_ENGINEERING_VERSION);
12135
12136         printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
12137
12138         rc = request_firmware(&bp->firmware, fw_file_name, dev);
12139         if (rc) {
12140                 printk(KERN_ERR PFX "Can't load firmware file %s\n",
12141                        fw_file_name);
12142                 goto request_firmware_exit;
12143         }
12144
12145         rc = bnx2x_check_firmware(bp);
12146         if (rc) {
12147                 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
12148                 goto request_firmware_exit;
12149         }
12150
12151         fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
12152
12153         /* Initialize the pointers to the init arrays */
12154         /* Blob */
12155         BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
12156
12157         /* Opcodes */
12158         BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
12159
12160         /* Offsets */
12161         BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
12162                             be16_to_cpu_n);
12163
12164         /* STORMs firmware */
12165         INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12166                         be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
12167         INIT_TSEM_PRAM_DATA(bp)      = bp->firmware->data +
12168                         be32_to_cpu(fw_hdr->tsem_pram_data.offset);
12169         INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12170                         be32_to_cpu(fw_hdr->usem_int_table_data.offset);
12171         INIT_USEM_PRAM_DATA(bp)      = bp->firmware->data +
12172                         be32_to_cpu(fw_hdr->usem_pram_data.offset);
12173         INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12174                         be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
12175         INIT_XSEM_PRAM_DATA(bp)      = bp->firmware->data +
12176                         be32_to_cpu(fw_hdr->xsem_pram_data.offset);
12177         INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12178                         be32_to_cpu(fw_hdr->csem_int_table_data.offset);
12179         INIT_CSEM_PRAM_DATA(bp)      = bp->firmware->data +
12180                         be32_to_cpu(fw_hdr->csem_pram_data.offset);
12181
12182         return 0;
12183
12184 init_offsets_alloc_err:
12185         kfree(bp->init_ops);
12186 init_ops_alloc_err:
12187         kfree(bp->init_data);
12188 request_firmware_exit:
12189         release_firmware(bp->firmware);
12190
12191         return rc;
12192 }
12193
12194
12195 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12196                                     const struct pci_device_id *ent)
12197 {
12198         struct net_device *dev = NULL;
12199         struct bnx2x *bp;
12200         int pcie_width, pcie_speed;
12201         int rc;
12202
12203         /* dev zeroed in init_etherdev */
12204         dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
12205         if (!dev) {
12206                 printk(KERN_ERR PFX "Cannot allocate net device\n");
12207                 return -ENOMEM;
12208         }
12209
12210         bp = netdev_priv(dev);
12211         bp->msglevel = debug;
12212
12213         pci_set_drvdata(pdev, dev);
12214
12215         rc = bnx2x_init_dev(pdev, dev);
12216         if (rc < 0) {
12217                 free_netdev(dev);
12218                 return rc;
12219         }
12220
12221         rc = bnx2x_init_bp(bp);
12222         if (rc)
12223                 goto init_one_exit;
12224
12225         /* Set init arrays */
12226         rc = bnx2x_init_firmware(bp, &pdev->dev);
12227         if (rc) {
12228                 printk(KERN_ERR PFX "Error loading firmware\n");
12229                 goto init_one_exit;
12230         }
12231
12232         rc = register_netdev(dev);
12233         if (rc) {
12234                 dev_err(&pdev->dev, "Cannot register net device\n");
12235                 goto init_one_exit;
12236         }
12237
12238         bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
12239         printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
12240                " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
12241                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
12242                pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
12243                dev->base_addr, bp->pdev->irq);
12244         printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
12245
12246         return 0;
12247
12248 init_one_exit:
12249         if (bp->regview)
12250                 iounmap(bp->regview);
12251
12252         if (bp->doorbells)
12253                 iounmap(bp->doorbells);
12254
12255         free_netdev(dev);
12256
12257         if (atomic_read(&pdev->enable_cnt) == 1)
12258                 pci_release_regions(pdev);
12259
12260         pci_disable_device(pdev);
12261         pci_set_drvdata(pdev, NULL);
12262
12263         return rc;
12264 }
12265
12266 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
12267 {
12268         struct net_device *dev = pci_get_drvdata(pdev);
12269         struct bnx2x *bp;
12270
12271         if (!dev) {
12272                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12273                 return;
12274         }
12275         bp = netdev_priv(dev);
12276
12277         unregister_netdev(dev);
12278
12279         kfree(bp->init_ops_offsets);
12280         kfree(bp->init_ops);
12281         kfree(bp->init_data);
12282         release_firmware(bp->firmware);
12283
12284         if (bp->regview)
12285                 iounmap(bp->regview);
12286
12287         if (bp->doorbells)
12288                 iounmap(bp->doorbells);
12289
12290         free_netdev(dev);
12291
12292         if (atomic_read(&pdev->enable_cnt) == 1)
12293                 pci_release_regions(pdev);
12294
12295         pci_disable_device(pdev);
12296         pci_set_drvdata(pdev, NULL);
12297 }
12298
12299 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12300 {
12301         struct net_device *dev = pci_get_drvdata(pdev);
12302         struct bnx2x *bp;
12303
12304         if (!dev) {
12305                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12306                 return -ENODEV;
12307         }
12308         bp = netdev_priv(dev);
12309
12310         rtnl_lock();
12311
12312         pci_save_state(pdev);
12313
12314         if (!netif_running(dev)) {
12315                 rtnl_unlock();
12316                 return 0;
12317         }
12318
12319         netif_device_detach(dev);
12320
12321         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
12322
12323         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
12324
12325         rtnl_unlock();
12326
12327         return 0;
12328 }
12329
12330 static int bnx2x_resume(struct pci_dev *pdev)
12331 {
12332         struct net_device *dev = pci_get_drvdata(pdev);
12333         struct bnx2x *bp;
12334         int rc;
12335
12336         if (!dev) {
12337                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12338                 return -ENODEV;
12339         }
12340         bp = netdev_priv(dev);
12341
12342         rtnl_lock();
12343
12344         pci_restore_state(pdev);
12345
12346         if (!netif_running(dev)) {
12347                 rtnl_unlock();
12348                 return 0;
12349         }
12350
12351         bnx2x_set_power_state(bp, PCI_D0);
12352         netif_device_attach(dev);
12353
12354         rc = bnx2x_nic_load(bp, LOAD_OPEN);
12355
12356         rtnl_unlock();
12357
12358         return rc;
12359 }
12360
12361 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12362 {
12363         int i;
12364
12365         bp->state = BNX2X_STATE_ERROR;
12366
12367         bp->rx_mode = BNX2X_RX_MODE_NONE;
12368
12369         bnx2x_netif_stop(bp, 0);
12370
12371         del_timer_sync(&bp->timer);
12372         bp->stats_state = STATS_STATE_DISABLED;
12373         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12374
12375         /* Release IRQs */
12376         bnx2x_free_irq(bp);
12377
12378         if (CHIP_IS_E1(bp)) {
12379                 struct mac_configuration_cmd *config =
12380                                                 bnx2x_sp(bp, mcast_config);
12381
12382                 for (i = 0; i < config->hdr.length; i++)
12383                         CAM_INVALIDATE(config->config_table[i]);
12384         }
12385
12386         /* Free SKBs, SGEs, TPA pool and driver internals */
12387         bnx2x_free_skbs(bp);
12388         for_each_rx_queue(bp, i)
12389                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
12390         for_each_rx_queue(bp, i)
12391                 netif_napi_del(&bnx2x_fp(bp, i, napi));
12392         bnx2x_free_mem(bp);
12393
12394         bp->state = BNX2X_STATE_CLOSED;
12395
12396         netif_carrier_off(bp->dev);
12397
12398         return 0;
12399 }
12400
12401 static void bnx2x_eeh_recover(struct bnx2x *bp)
12402 {
12403         u32 val;
12404
12405         mutex_init(&bp->port.phy_mutex);
12406
12407         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12408         bp->link_params.shmem_base = bp->common.shmem_base;
12409         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12410
12411         if (!bp->common.shmem_base ||
12412             (bp->common.shmem_base < 0xA0000) ||
12413             (bp->common.shmem_base >= 0xC0000)) {
12414                 BNX2X_DEV_INFO("MCP not active\n");
12415                 bp->flags |= NO_MCP_FLAG;
12416                 return;
12417         }
12418
12419         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12420         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12421                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12422                 BNX2X_ERR("BAD MCP validity signature\n");
12423
12424         if (!BP_NOMCP(bp)) {
12425                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12426                               & DRV_MSG_SEQ_NUMBER_MASK);
12427                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12428         }
12429 }
12430
12431 /**
12432  * bnx2x_io_error_detected - called when PCI error is detected
12433  * @pdev: Pointer to PCI device
12434  * @state: The current pci connection state
12435  *
12436  * This function is called after a PCI bus error affecting
12437  * this device has been detected.
12438  */
12439 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12440                                                 pci_channel_state_t state)
12441 {
12442         struct net_device *dev = pci_get_drvdata(pdev);
12443         struct bnx2x *bp = netdev_priv(dev);
12444
12445         rtnl_lock();
12446
12447         netif_device_detach(dev);
12448
12449         if (state == pci_channel_io_perm_failure) {
12450                 rtnl_unlock();
12451                 return PCI_ERS_RESULT_DISCONNECT;
12452         }
12453
12454         if (netif_running(dev))
12455                 bnx2x_eeh_nic_unload(bp);
12456
12457         pci_disable_device(pdev);
12458
12459         rtnl_unlock();
12460
12461         /* Request a slot reset */
12462         return PCI_ERS_RESULT_NEED_RESET;
12463 }
12464
12465 /**
12466  * bnx2x_io_slot_reset - called after the PCI bus has been reset
12467  * @pdev: Pointer to PCI device
12468  *
12469  * Restart the card from scratch, as if from a cold-boot.
12470  */
12471 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12472 {
12473         struct net_device *dev = pci_get_drvdata(pdev);
12474         struct bnx2x *bp = netdev_priv(dev);
12475
12476         rtnl_lock();
12477
12478         if (pci_enable_device(pdev)) {
12479                 dev_err(&pdev->dev,
12480                         "Cannot re-enable PCI device after reset\n");
12481                 rtnl_unlock();
12482                 return PCI_ERS_RESULT_DISCONNECT;
12483         }
12484
12485         pci_set_master(pdev);
12486         pci_restore_state(pdev);
12487
12488         if (netif_running(dev))
12489                 bnx2x_set_power_state(bp, PCI_D0);
12490
12491         rtnl_unlock();
12492
12493         return PCI_ERS_RESULT_RECOVERED;
12494 }
12495
12496 /**
12497  * bnx2x_io_resume - called when traffic can start flowing again
12498  * @pdev: Pointer to PCI device
12499  *
12500  * This callback is called when the error recovery driver tells us that
12501  * its OK to resume normal operation.
12502  */
12503 static void bnx2x_io_resume(struct pci_dev *pdev)
12504 {
12505         struct net_device *dev = pci_get_drvdata(pdev);
12506         struct bnx2x *bp = netdev_priv(dev);
12507
12508         rtnl_lock();
12509
12510         bnx2x_eeh_recover(bp);
12511
12512         if (netif_running(dev))
12513                 bnx2x_nic_load(bp, LOAD_NORMAL);
12514
12515         netif_device_attach(dev);
12516
12517         rtnl_unlock();
12518 }
12519
12520 static struct pci_error_handlers bnx2x_err_handler = {
12521         .error_detected = bnx2x_io_error_detected,
12522         .slot_reset     = bnx2x_io_slot_reset,
12523         .resume         = bnx2x_io_resume,
12524 };
12525
12526 static struct pci_driver bnx2x_pci_driver = {
12527         .name        = DRV_MODULE_NAME,
12528         .id_table    = bnx2x_pci_tbl,
12529         .probe       = bnx2x_init_one,
12530         .remove      = __devexit_p(bnx2x_remove_one),
12531         .suspend     = bnx2x_suspend,
12532         .resume      = bnx2x_resume,
12533         .err_handler = &bnx2x_err_handler,
12534 };
12535
12536 static int __init bnx2x_init(void)
12537 {
12538         int ret;
12539
12540         printk(KERN_INFO "%s", version);
12541
12542         bnx2x_wq = create_singlethread_workqueue("bnx2x");
12543         if (bnx2x_wq == NULL) {
12544                 printk(KERN_ERR PFX "Cannot create workqueue\n");
12545                 return -ENOMEM;
12546         }
12547
12548         ret = pci_register_driver(&bnx2x_pci_driver);
12549         if (ret) {
12550                 printk(KERN_ERR PFX "Cannot register driver\n");
12551                 destroy_workqueue(bnx2x_wq);
12552         }
12553         return ret;
12554 }
12555
12556 static void __exit bnx2x_cleanup(void)
12557 {
12558         pci_unregister_driver(&bnx2x_pci_driver);
12559
12560         destroy_workqueue(bnx2x_wq);
12561 }
12562
12563 module_init(bnx2x_init);
12564 module_exit(bnx2x_cleanup);
12565
12566 #ifdef BCM_CNIC
12567
12568 /* count denotes the number of new completions we have seen */
12569 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
12570 {
12571         struct eth_spe *spe;
12572
12573 #ifdef BNX2X_STOP_ON_ERROR
12574         if (unlikely(bp->panic))
12575                 return;
12576 #endif
12577
12578         spin_lock_bh(&bp->spq_lock);
12579         bp->cnic_spq_pending -= count;
12580
12581         for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
12582              bp->cnic_spq_pending++) {
12583
12584                 if (!bp->cnic_kwq_pending)
12585                         break;
12586
12587                 spe = bnx2x_sp_get_next(bp);
12588                 *spe = *bp->cnic_kwq_cons;
12589
12590                 bp->cnic_kwq_pending--;
12591
12592                 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
12593                    bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
12594
12595                 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
12596                         bp->cnic_kwq_cons = bp->cnic_kwq;
12597                 else
12598                         bp->cnic_kwq_cons++;
12599         }
12600         bnx2x_sp_prod_update(bp);
12601         spin_unlock_bh(&bp->spq_lock);
12602 }
12603
12604 static int bnx2x_cnic_sp_queue(struct net_device *dev,
12605                                struct kwqe_16 *kwqes[], u32 count)
12606 {
12607         struct bnx2x *bp = netdev_priv(dev);
12608         int i;
12609
12610 #ifdef BNX2X_STOP_ON_ERROR
12611         if (unlikely(bp->panic))
12612                 return -EIO;
12613 #endif
12614
12615         spin_lock_bh(&bp->spq_lock);
12616
12617         for (i = 0; i < count; i++) {
12618                 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
12619
12620                 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
12621                         break;
12622
12623                 *bp->cnic_kwq_prod = *spe;
12624
12625                 bp->cnic_kwq_pending++;
12626
12627                 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
12628                    spe->hdr.conn_and_cmd_data, spe->hdr.type,
12629                    spe->data.mac_config_addr.hi,
12630                    spe->data.mac_config_addr.lo,
12631                    bp->cnic_kwq_pending);
12632
12633                 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
12634                         bp->cnic_kwq_prod = bp->cnic_kwq;
12635                 else
12636                         bp->cnic_kwq_prod++;
12637         }
12638
12639         spin_unlock_bh(&bp->spq_lock);
12640
12641         if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
12642                 bnx2x_cnic_sp_post(bp, 0);
12643
12644         return i;
12645 }
12646
12647 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12648 {
12649         struct cnic_ops *c_ops;
12650         int rc = 0;
12651
12652         mutex_lock(&bp->cnic_mutex);
12653         c_ops = bp->cnic_ops;
12654         if (c_ops)
12655                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12656         mutex_unlock(&bp->cnic_mutex);
12657
12658         return rc;
12659 }
12660
12661 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12662 {
12663         struct cnic_ops *c_ops;
12664         int rc = 0;
12665
12666         rcu_read_lock();
12667         c_ops = rcu_dereference(bp->cnic_ops);
12668         if (c_ops)
12669                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12670         rcu_read_unlock();
12671
12672         return rc;
12673 }
12674
12675 /*
12676  * for commands that have no data
12677  */
12678 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
12679 {
12680         struct cnic_ctl_info ctl = {0};
12681
12682         ctl.cmd = cmd;
12683
12684         return bnx2x_cnic_ctl_send(bp, &ctl);
12685 }
12686
12687 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
12688 {
12689         struct cnic_ctl_info ctl;
12690
12691         /* first we tell CNIC and only then we count this as a completion */
12692         ctl.cmd = CNIC_CTL_COMPLETION_CMD;
12693         ctl.data.comp.cid = cid;
12694
12695         bnx2x_cnic_ctl_send_bh(bp, &ctl);
12696         bnx2x_cnic_sp_post(bp, 1);
12697 }
12698
12699 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
12700 {
12701         struct bnx2x *bp = netdev_priv(dev);
12702         int rc = 0;
12703
12704         switch (ctl->cmd) {
12705         case DRV_CTL_CTXTBL_WR_CMD: {
12706                 u32 index = ctl->data.io.offset;
12707                 dma_addr_t addr = ctl->data.io.dma_addr;
12708
12709                 bnx2x_ilt_wr(bp, index, addr);
12710                 break;
12711         }
12712
12713         case DRV_CTL_COMPLETION_CMD: {
12714                 int count = ctl->data.comp.comp_count;
12715
12716                 bnx2x_cnic_sp_post(bp, count);
12717                 break;
12718         }
12719
12720         /* rtnl_lock is held.  */
12721         case DRV_CTL_START_L2_CMD: {
12722                 u32 cli = ctl->data.ring.client_id;
12723
12724                 bp->rx_mode_cl_mask |= (1 << cli);
12725                 bnx2x_set_storm_rx_mode(bp);
12726                 break;
12727         }
12728
12729         /* rtnl_lock is held.  */
12730         case DRV_CTL_STOP_L2_CMD: {
12731                 u32 cli = ctl->data.ring.client_id;
12732
12733                 bp->rx_mode_cl_mask &= ~(1 << cli);
12734                 bnx2x_set_storm_rx_mode(bp);
12735                 break;
12736         }
12737
12738         default:
12739                 BNX2X_ERR("unknown command %x\n", ctl->cmd);
12740                 rc = -EINVAL;
12741         }
12742
12743         return rc;
12744 }
12745
12746 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
12747 {
12748         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12749
12750         if (bp->flags & USING_MSIX_FLAG) {
12751                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
12752                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
12753                 cp->irq_arr[0].vector = bp->msix_table[1].vector;
12754         } else {
12755                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
12756                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
12757         }
12758         cp->irq_arr[0].status_blk = bp->cnic_sb;
12759         cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
12760         cp->irq_arr[1].status_blk = bp->def_status_blk;
12761         cp->irq_arr[1].status_blk_num = DEF_SB_ID;
12762
12763         cp->num_irq = 2;
12764 }
12765
12766 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
12767                                void *data)
12768 {
12769         struct bnx2x *bp = netdev_priv(dev);
12770         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12771
12772         if (ops == NULL)
12773                 return -EINVAL;
12774
12775         if (atomic_read(&bp->intr_sem) != 0)
12776                 return -EBUSY;
12777
12778         bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
12779         if (!bp->cnic_kwq)
12780                 return -ENOMEM;
12781
12782         bp->cnic_kwq_cons = bp->cnic_kwq;
12783         bp->cnic_kwq_prod = bp->cnic_kwq;
12784         bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
12785
12786         bp->cnic_spq_pending = 0;
12787         bp->cnic_kwq_pending = 0;
12788
12789         bp->cnic_data = data;
12790
12791         cp->num_irq = 0;
12792         cp->drv_state = CNIC_DRV_STATE_REGD;
12793
12794         bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
12795
12796         bnx2x_setup_cnic_irq_info(bp);
12797         bnx2x_set_iscsi_eth_mac_addr(bp, 1);
12798         bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
12799         rcu_assign_pointer(bp->cnic_ops, ops);
12800
12801         return 0;
12802 }
12803
12804 static int bnx2x_unregister_cnic(struct net_device *dev)
12805 {
12806         struct bnx2x *bp = netdev_priv(dev);
12807         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12808
12809         mutex_lock(&bp->cnic_mutex);
12810         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
12811                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
12812                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
12813         }
12814         cp->drv_state = 0;
12815         rcu_assign_pointer(bp->cnic_ops, NULL);
12816         mutex_unlock(&bp->cnic_mutex);
12817         synchronize_rcu();
12818         kfree(bp->cnic_kwq);
12819         bp->cnic_kwq = NULL;
12820
12821         return 0;
12822 }
12823
12824 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
12825 {
12826         struct bnx2x *bp = netdev_priv(dev);
12827         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12828
12829         cp->drv_owner = THIS_MODULE;
12830         cp->chip_id = CHIP_ID(bp);
12831         cp->pdev = bp->pdev;
12832         cp->io_base = bp->regview;
12833         cp->io_base2 = bp->doorbells;
12834         cp->max_kwqe_pending = 8;
12835         cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
12836         cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
12837         cp->ctx_tbl_len = CNIC_ILT_LINES;
12838         cp->starting_cid = BCM_CNIC_CID_START;
12839         cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
12840         cp->drv_ctl = bnx2x_drv_ctl;
12841         cp->drv_register_cnic = bnx2x_register_cnic;
12842         cp->drv_unregister_cnic = bnx2x_unregister_cnic;
12843
12844         return cp;
12845 }
12846 EXPORT_SYMBOL(bnx2x_cnic_probe);
12847
12848 #endif /* BCM_CNIC */
12849