bnx2x: Allowing 0 as initial fairness value
[safe/jmp/linux-2.6] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52
53
54 #include "bnx2x.h"
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_dump.h"
58
59 #define DRV_MODULE_VERSION      "1.52.1"
60 #define DRV_MODULE_RELDATE      "2009/08/12"
61 #define BNX2X_BC_VER            0x040200
62
63 #include <linux/firmware.h>
64 #include "bnx2x_fw_file_hdr.h"
65 /* FW files */
66 #define FW_FILE_PREFIX_E1       "bnx2x-e1-"
67 #define FW_FILE_PREFIX_E1H      "bnx2x-e1h-"
68
69 /* Time in jiffies before concluding the transmitter is hung */
70 #define TX_TIMEOUT              (5*HZ)
71
72 static char version[] __devinitdata =
73         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
74         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
76 MODULE_AUTHOR("Eliezer Tamir");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
80
81 static int multi_mode = 1;
82 module_param(multi_mode, int, 0);
83 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84                              "(0 Disable; 1 Enable (default))");
85
86 static int num_rx_queues;
87 module_param(num_rx_queues, int, 0);
88 MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89                                 " (default is half number of CPUs)");
90
91 static int num_tx_queues;
92 module_param(num_tx_queues, int, 0);
93 MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94                                 " (default is half number of CPUs)");
95
96 static int disable_tpa;
97 module_param(disable_tpa, int, 0);
98 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
99
100 static int int_mode;
101 module_param(int_mode, int, 0);
102 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
103
104 static int dropless_fc;
105 module_param(dropless_fc, int, 0);
106 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
107
108 static int poll;
109 module_param(poll, int, 0);
110 MODULE_PARM_DESC(poll, " Use polling (for debug)");
111
112 static int mrrs = -1;
113 module_param(mrrs, int, 0);
114 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
115
116 static int debug;
117 module_param(debug, int, 0);
118 MODULE_PARM_DESC(debug, " Default debug msglevel");
119
120 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
121
122 static struct workqueue_struct *bnx2x_wq;
123
124 enum bnx2x_board_type {
125         BCM57710 = 0,
126         BCM57711 = 1,
127         BCM57711E = 2,
128 };
129
130 /* indexed by board_type, above */
131 static struct {
132         char *name;
133 } board_info[] __devinitdata = {
134         { "Broadcom NetXtreme II BCM57710 XGb" },
135         { "Broadcom NetXtreme II BCM57711 XGb" },
136         { "Broadcom NetXtreme II BCM57711E XGb" }
137 };
138
139
140 static const struct pci_device_id bnx2x_pci_tbl[] = {
141         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
142         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
143         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
144         { 0 }
145 };
146
147 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
148
149 /****************************************************************************
150 * General service functions
151 ****************************************************************************/
152
153 /* used only at init
154  * locking is done by mcp
155  */
156 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
157 {
158         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
159         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
160         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
161                                PCICFG_VENDOR_ID_OFFSET);
162 }
163
164 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
165 {
166         u32 val;
167
168         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
169         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
170         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
171                                PCICFG_VENDOR_ID_OFFSET);
172
173         return val;
174 }
175
176 static const u32 dmae_reg_go_c[] = {
177         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
178         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
179         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
180         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
181 };
182
183 /* copy command into DMAE command memory and set DMAE command go */
184 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
185                             int idx)
186 {
187         u32 cmd_offset;
188         int i;
189
190         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
191         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
192                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
193
194                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
195                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
196         }
197         REG_WR(bp, dmae_reg_go_c[idx], 1);
198 }
199
200 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
201                       u32 len32)
202 {
203         struct dmae_command dmae;
204         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
205         int cnt = 200;
206
207         if (!bp->dmae_ready) {
208                 u32 *data = bnx2x_sp(bp, wb_data[0]);
209
210                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
211                    "  using indirect\n", dst_addr, len32);
212                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
213                 return;
214         }
215
216         memset(&dmae, 0, sizeof(struct dmae_command));
217
218         dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
219                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
220                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
221 #ifdef __BIG_ENDIAN
222                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
223 #else
224                        DMAE_CMD_ENDIANITY_DW_SWAP |
225 #endif
226                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
227                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
228         dmae.src_addr_lo = U64_LO(dma_addr);
229         dmae.src_addr_hi = U64_HI(dma_addr);
230         dmae.dst_addr_lo = dst_addr >> 2;
231         dmae.dst_addr_hi = 0;
232         dmae.len = len32;
233         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
234         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
235         dmae.comp_val = DMAE_COMP_VAL;
236
237         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
238            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
239                     "dst_addr [%x:%08x (%08x)]\n"
240            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
241            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
242            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
243            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
244         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
245            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
246            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
247
248         mutex_lock(&bp->dmae_mutex);
249
250         *wb_comp = 0;
251
252         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
253
254         udelay(5);
255
256         while (*wb_comp != DMAE_COMP_VAL) {
257                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
258
259                 if (!cnt) {
260                         BNX2X_ERR("DMAE timeout!\n");
261                         break;
262                 }
263                 cnt--;
264                 /* adjust delay for emulation/FPGA */
265                 if (CHIP_REV_IS_SLOW(bp))
266                         msleep(100);
267                 else
268                         udelay(5);
269         }
270
271         mutex_unlock(&bp->dmae_mutex);
272 }
273
274 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
275 {
276         struct dmae_command dmae;
277         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
278         int cnt = 200;
279
280         if (!bp->dmae_ready) {
281                 u32 *data = bnx2x_sp(bp, wb_data[0]);
282                 int i;
283
284                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
285                    "  using indirect\n", src_addr, len32);
286                 for (i = 0; i < len32; i++)
287                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
288                 return;
289         }
290
291         memset(&dmae, 0, sizeof(struct dmae_command));
292
293         dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
294                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
295                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
296 #ifdef __BIG_ENDIAN
297                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
298 #else
299                        DMAE_CMD_ENDIANITY_DW_SWAP |
300 #endif
301                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
302                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
303         dmae.src_addr_lo = src_addr >> 2;
304         dmae.src_addr_hi = 0;
305         dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
306         dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
307         dmae.len = len32;
308         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
309         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
310         dmae.comp_val = DMAE_COMP_VAL;
311
312         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
313            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
314                     "dst_addr [%x:%08x (%08x)]\n"
315            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
316            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
317            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
318            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
319
320         mutex_lock(&bp->dmae_mutex);
321
322         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
323         *wb_comp = 0;
324
325         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
326
327         udelay(5);
328
329         while (*wb_comp != DMAE_COMP_VAL) {
330
331                 if (!cnt) {
332                         BNX2X_ERR("DMAE timeout!\n");
333                         break;
334                 }
335                 cnt--;
336                 /* adjust delay for emulation/FPGA */
337                 if (CHIP_REV_IS_SLOW(bp))
338                         msleep(100);
339                 else
340                         udelay(5);
341         }
342         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
343            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
344            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
345
346         mutex_unlock(&bp->dmae_mutex);
347 }
348
349 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
350                                u32 addr, u32 len)
351 {
352         int offset = 0;
353
354         while (len > DMAE_LEN32_WR_MAX) {
355                 bnx2x_write_dmae(bp, phys_addr + offset,
356                                  addr + offset, DMAE_LEN32_WR_MAX);
357                 offset += DMAE_LEN32_WR_MAX * 4;
358                 len -= DMAE_LEN32_WR_MAX;
359         }
360
361         bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
362 }
363
364 /* used only for slowpath so not inlined */
365 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
366 {
367         u32 wb_write[2];
368
369         wb_write[0] = val_hi;
370         wb_write[1] = val_lo;
371         REG_WR_DMAE(bp, reg, wb_write, 2);
372 }
373
374 #ifdef USE_WB_RD
375 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
376 {
377         u32 wb_data[2];
378
379         REG_RD_DMAE(bp, reg, wb_data, 2);
380
381         return HILO_U64(wb_data[0], wb_data[1]);
382 }
383 #endif
384
385 static int bnx2x_mc_assert(struct bnx2x *bp)
386 {
387         char last_idx;
388         int i, rc = 0;
389         u32 row0, row1, row2, row3;
390
391         /* XSTORM */
392         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
393                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
394         if (last_idx)
395                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
396
397         /* print the asserts */
398         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
399
400                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
401                               XSTORM_ASSERT_LIST_OFFSET(i));
402                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
403                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
404                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
405                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
406                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
407                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
408
409                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
410                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
411                                   " 0x%08x 0x%08x 0x%08x\n",
412                                   i, row3, row2, row1, row0);
413                         rc++;
414                 } else {
415                         break;
416                 }
417         }
418
419         /* TSTORM */
420         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
421                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
422         if (last_idx)
423                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
424
425         /* print the asserts */
426         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
427
428                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
429                               TSTORM_ASSERT_LIST_OFFSET(i));
430                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
431                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
432                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
433                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
434                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
435                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
436
437                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
438                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
439                                   " 0x%08x 0x%08x 0x%08x\n",
440                                   i, row3, row2, row1, row0);
441                         rc++;
442                 } else {
443                         break;
444                 }
445         }
446
447         /* CSTORM */
448         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
449                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
450         if (last_idx)
451                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
452
453         /* print the asserts */
454         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
455
456                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
457                               CSTORM_ASSERT_LIST_OFFSET(i));
458                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
459                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
460                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
461                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
462                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
463                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
464
465                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
466                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
467                                   " 0x%08x 0x%08x 0x%08x\n",
468                                   i, row3, row2, row1, row0);
469                         rc++;
470                 } else {
471                         break;
472                 }
473         }
474
475         /* USTORM */
476         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
477                            USTORM_ASSERT_LIST_INDEX_OFFSET);
478         if (last_idx)
479                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
480
481         /* print the asserts */
482         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
483
484                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
485                               USTORM_ASSERT_LIST_OFFSET(i));
486                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
487                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
488                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
489                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
490                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
491                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
492
493                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
494                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
495                                   " 0x%08x 0x%08x 0x%08x\n",
496                                   i, row3, row2, row1, row0);
497                         rc++;
498                 } else {
499                         break;
500                 }
501         }
502
503         return rc;
504 }
505
506 static void bnx2x_fw_dump(struct bnx2x *bp)
507 {
508         u32 mark, offset;
509         __be32 data[9];
510         int word;
511
512         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
513         mark = ((mark + 0x3) & ~0x3);
514         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
515
516         printk(KERN_ERR PFX);
517         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
518                 for (word = 0; word < 8; word++)
519                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
520                                                   offset + 4*word));
521                 data[8] = 0x0;
522                 printk(KERN_CONT "%s", (char *)data);
523         }
524         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
525                 for (word = 0; word < 8; word++)
526                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
527                                                   offset + 4*word));
528                 data[8] = 0x0;
529                 printk(KERN_CONT "%s", (char *)data);
530         }
531         printk(KERN_ERR PFX "end of fw dump\n");
532 }
533
534 static void bnx2x_panic_dump(struct bnx2x *bp)
535 {
536         int i;
537         u16 j, start, end;
538
539         bp->stats_state = STATS_STATE_DISABLED;
540         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
541
542         BNX2X_ERR("begin crash dump -----------------\n");
543
544         /* Indices */
545         /* Common */
546         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
547                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
548                   "  spq_prod_idx(%u)\n",
549                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
550                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
551
552         /* Rx */
553         for_each_rx_queue(bp, i) {
554                 struct bnx2x_fastpath *fp = &bp->fp[i];
555
556                 BNX2X_ERR("fp%d: rx_bd_prod(%x)  rx_bd_cons(%x)"
557                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
558                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
559                           i, fp->rx_bd_prod, fp->rx_bd_cons,
560                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
561                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
562                 BNX2X_ERR("      rx_sge_prod(%x)  last_max_sge(%x)"
563                           "  fp_u_idx(%x) *sb_u_idx(%x)\n",
564                           fp->rx_sge_prod, fp->last_max_sge,
565                           le16_to_cpu(fp->fp_u_idx),
566                           fp->status_blk->u_status_block.status_block_index);
567         }
568
569         /* Tx */
570         for_each_tx_queue(bp, i) {
571                 struct bnx2x_fastpath *fp = &bp->fp[i];
572
573                 BNX2X_ERR("fp%d: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
574                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
575                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
576                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
577                 BNX2X_ERR("      fp_c_idx(%x)  *sb_c_idx(%x)"
578                           "  tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
579                           fp->status_blk->c_status_block.status_block_index,
580                           fp->tx_db.data.prod);
581         }
582
583         /* Rings */
584         /* Rx */
585         for_each_rx_queue(bp, i) {
586                 struct bnx2x_fastpath *fp = &bp->fp[i];
587
588                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
589                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
590                 for (j = start; j != end; j = RX_BD(j + 1)) {
591                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
592                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
593
594                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
595                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
596                 }
597
598                 start = RX_SGE(fp->rx_sge_prod);
599                 end = RX_SGE(fp->last_max_sge);
600                 for (j = start; j != end; j = RX_SGE(j + 1)) {
601                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
602                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
603
604                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
605                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
606                 }
607
608                 start = RCQ_BD(fp->rx_comp_cons - 10);
609                 end = RCQ_BD(fp->rx_comp_cons + 503);
610                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
611                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
612
613                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
614                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
615                 }
616         }
617
618         /* Tx */
619         for_each_tx_queue(bp, i) {
620                 struct bnx2x_fastpath *fp = &bp->fp[i];
621
622                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
623                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
624                 for (j = start; j != end; j = TX_BD(j + 1)) {
625                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
626
627                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
628                                   i, j, sw_bd->skb, sw_bd->first_bd);
629                 }
630
631                 start = TX_BD(fp->tx_bd_cons - 10);
632                 end = TX_BD(fp->tx_bd_cons + 254);
633                 for (j = start; j != end; j = TX_BD(j + 1)) {
634                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
635
636                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
637                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
638                 }
639         }
640
641         bnx2x_fw_dump(bp);
642         bnx2x_mc_assert(bp);
643         BNX2X_ERR("end crash dump -----------------\n");
644 }
645
646 static void bnx2x_int_enable(struct bnx2x *bp)
647 {
648         int port = BP_PORT(bp);
649         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
650         u32 val = REG_RD(bp, addr);
651         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
652         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
653
654         if (msix) {
655                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
656                          HC_CONFIG_0_REG_INT_LINE_EN_0);
657                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
658                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
659         } else if (msi) {
660                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
661                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
662                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
663                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
664         } else {
665                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
666                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
667                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
668                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
669
670                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
671                    val, port, addr);
672
673                 REG_WR(bp, addr, val);
674
675                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
676         }
677
678         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
679            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
680
681         REG_WR(bp, addr, val);
682         /*
683          * Ensure that HC_CONFIG is written before leading/trailing edge config
684          */
685         mmiowb();
686         barrier();
687
688         if (CHIP_IS_E1H(bp)) {
689                 /* init leading/trailing edge */
690                 if (IS_E1HMF(bp)) {
691                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
692                         if (bp->port.pmf)
693                                 /* enable nig and gpio3 attention */
694                                 val |= 0x1100;
695                 } else
696                         val = 0xffff;
697
698                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
699                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
700         }
701
702         /* Make sure that interrupts are indeed enabled from here on */
703         mmiowb();
704 }
705
706 static void bnx2x_int_disable(struct bnx2x *bp)
707 {
708         int port = BP_PORT(bp);
709         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
710         u32 val = REG_RD(bp, addr);
711
712         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
713                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
714                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
715                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
716
717         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
718            val, port, addr);
719
720         /* flush all outstanding writes */
721         mmiowb();
722
723         REG_WR(bp, addr, val);
724         if (REG_RD(bp, addr) != val)
725                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
726 }
727
728 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
729 {
730         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
731         int i, offset;
732
733         /* disable interrupt handling */
734         atomic_inc(&bp->intr_sem);
735         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
736
737         if (disable_hw)
738                 /* prevent the HW from sending interrupts */
739                 bnx2x_int_disable(bp);
740
741         /* make sure all ISRs are done */
742         if (msix) {
743                 synchronize_irq(bp->msix_table[0].vector);
744                 offset = 1;
745 #ifdef BCM_CNIC
746                 offset++;
747 #endif
748                 for_each_queue(bp, i)
749                         synchronize_irq(bp->msix_table[i + offset].vector);
750         } else
751                 synchronize_irq(bp->pdev->irq);
752
753         /* make sure sp_task is not running */
754         cancel_delayed_work(&bp->sp_task);
755         flush_workqueue(bnx2x_wq);
756 }
757
758 /* fast path */
759
760 /*
761  * General service functions
762  */
763
764 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
765                                 u8 storm, u16 index, u8 op, u8 update)
766 {
767         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
768                        COMMAND_REG_INT_ACK);
769         struct igu_ack_register igu_ack;
770
771         igu_ack.status_block_index = index;
772         igu_ack.sb_id_and_flags =
773                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
774                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
775                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
776                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
777
778         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
779            (*(u32 *)&igu_ack), hc_addr);
780         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
781
782         /* Make sure that ACK is written */
783         mmiowb();
784         barrier();
785 }
786
787 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
788 {
789         struct host_status_block *fpsb = fp->status_blk;
790         u16 rc = 0;
791
792         barrier(); /* status block is written to by the chip */
793         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
794                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
795                 rc |= 1;
796         }
797         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
798                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
799                 rc |= 2;
800         }
801         return rc;
802 }
803
804 static u16 bnx2x_ack_int(struct bnx2x *bp)
805 {
806         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
807                        COMMAND_REG_SIMD_MASK);
808         u32 result = REG_RD(bp, hc_addr);
809
810         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
811            result, hc_addr);
812
813         return result;
814 }
815
816
817 /*
818  * fast path service functions
819  */
820
821 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
822 {
823         /* Tell compiler that consumer and producer can change */
824         barrier();
825         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
826 }
827
828 /* free skb in the packet ring at pos idx
829  * return idx of last bd freed
830  */
831 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
832                              u16 idx)
833 {
834         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
835         struct eth_tx_start_bd *tx_start_bd;
836         struct eth_tx_bd *tx_data_bd;
837         struct sk_buff *skb = tx_buf->skb;
838         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
839         int nbd;
840
841         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
842            idx, tx_buf, skb);
843
844         /* unmap first bd */
845         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
846         tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
847         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
848                          BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
849
850         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
851 #ifdef BNX2X_STOP_ON_ERROR
852         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
853                 BNX2X_ERR("BAD nbd!\n");
854                 bnx2x_panic();
855         }
856 #endif
857         new_cons = nbd + tx_buf->first_bd;
858
859         /* Get the next bd */
860         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
861
862         /* Skip a parse bd... */
863         --nbd;
864         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
865
866         /* ...and the TSO split header bd since they have no mapping */
867         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
868                 --nbd;
869                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
870         }
871
872         /* now free frags */
873         while (nbd > 0) {
874
875                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
876                 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
877                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
878                                BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
879                 if (--nbd)
880                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
881         }
882
883         /* release skb */
884         WARN_ON(!skb);
885         dev_kfree_skb_any(skb);
886         tx_buf->first_bd = 0;
887         tx_buf->skb = NULL;
888
889         return new_cons;
890 }
891
892 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
893 {
894         s16 used;
895         u16 prod;
896         u16 cons;
897
898         barrier(); /* Tell compiler that prod and cons can change */
899         prod = fp->tx_bd_prod;
900         cons = fp->tx_bd_cons;
901
902         /* NUM_TX_RINGS = number of "next-page" entries
903            It will be used as a threshold */
904         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
905
906 #ifdef BNX2X_STOP_ON_ERROR
907         WARN_ON(used < 0);
908         WARN_ON(used > fp->bp->tx_ring_size);
909         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
910 #endif
911
912         return (s16)(fp->bp->tx_ring_size) - used;
913 }
914
915 static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
916 {
917         struct bnx2x *bp = fp->bp;
918         struct netdev_queue *txq;
919         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
920         int done = 0;
921
922 #ifdef BNX2X_STOP_ON_ERROR
923         if (unlikely(bp->panic))
924                 return;
925 #endif
926
927         txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
928         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
929         sw_cons = fp->tx_pkt_cons;
930
931         while (sw_cons != hw_cons) {
932                 u16 pkt_cons;
933
934                 pkt_cons = TX_BD(sw_cons);
935
936                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
937
938                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
939                    hw_cons, sw_cons, pkt_cons);
940
941 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
942                         rmb();
943                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
944                 }
945 */
946                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
947                 sw_cons++;
948                 done++;
949         }
950
951         fp->tx_pkt_cons = sw_cons;
952         fp->tx_bd_cons = bd_cons;
953
954         /* TBD need a thresh? */
955         if (unlikely(netif_tx_queue_stopped(txq))) {
956
957                 /* Need to make the tx_bd_cons update visible to start_xmit()
958                  * before checking for netif_tx_queue_stopped().  Without the
959                  * memory barrier, there is a small possibility that
960                  * start_xmit() will miss it and cause the queue to be stopped
961                  * forever.
962                  */
963                 smp_mb();
964
965                 if ((netif_tx_queue_stopped(txq)) &&
966                     (bp->state == BNX2X_STATE_OPEN) &&
967                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
968                         netif_tx_wake_queue(txq);
969         }
970 }
971
972 #ifdef BCM_CNIC
973 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
974 #endif
975
976 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
977                            union eth_rx_cqe *rr_cqe)
978 {
979         struct bnx2x *bp = fp->bp;
980         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
981         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
982
983         DP(BNX2X_MSG_SP,
984            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
985            fp->index, cid, command, bp->state,
986            rr_cqe->ramrod_cqe.ramrod_type);
987
988         bp->spq_left++;
989
990         if (fp->index) {
991                 switch (command | fp->state) {
992                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
993                                                 BNX2X_FP_STATE_OPENING):
994                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
995                            cid);
996                         fp->state = BNX2X_FP_STATE_OPEN;
997                         break;
998
999                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1000                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1001                            cid);
1002                         fp->state = BNX2X_FP_STATE_HALTED;
1003                         break;
1004
1005                 default:
1006                         BNX2X_ERR("unexpected MC reply (%d)  "
1007                                   "fp->state is %x\n", command, fp->state);
1008                         break;
1009                 }
1010                 mb(); /* force bnx2x_wait_ramrod() to see the change */
1011                 return;
1012         }
1013
1014         switch (command | bp->state) {
1015         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1016                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1017                 bp->state = BNX2X_STATE_OPEN;
1018                 break;
1019
1020         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1021                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1022                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1023                 fp->state = BNX2X_FP_STATE_HALTED;
1024                 break;
1025
1026         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1027                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1028                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1029                 break;
1030
1031 #ifdef BCM_CNIC
1032         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1033                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1034                 bnx2x_cnic_cfc_comp(bp, cid);
1035                 break;
1036 #endif
1037
1038         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1039         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1040                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1041                 bp->set_mac_pending--;
1042                 smp_wmb();
1043                 break;
1044
1045         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1046         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED):
1047                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1048                 bp->set_mac_pending--;
1049                 smp_wmb();
1050                 break;
1051
1052         default:
1053                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
1054                           command, bp->state);
1055                 break;
1056         }
1057         mb(); /* force bnx2x_wait_ramrod() to see the change */
1058 }
1059
1060 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1061                                      struct bnx2x_fastpath *fp, u16 index)
1062 {
1063         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1064         struct page *page = sw_buf->page;
1065         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1066
1067         /* Skip "next page" elements */
1068         if (!page)
1069                 return;
1070
1071         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1072                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1073         __free_pages(page, PAGES_PER_SGE_SHIFT);
1074
1075         sw_buf->page = NULL;
1076         sge->addr_hi = 0;
1077         sge->addr_lo = 0;
1078 }
1079
1080 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1081                                            struct bnx2x_fastpath *fp, int last)
1082 {
1083         int i;
1084
1085         for (i = 0; i < last; i++)
1086                 bnx2x_free_rx_sge(bp, fp, i);
1087 }
1088
1089 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1090                                      struct bnx2x_fastpath *fp, u16 index)
1091 {
1092         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1093         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1094         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1095         dma_addr_t mapping;
1096
1097         if (unlikely(page == NULL))
1098                 return -ENOMEM;
1099
1100         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1101                                PCI_DMA_FROMDEVICE);
1102         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1103                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1104                 return -ENOMEM;
1105         }
1106
1107         sw_buf->page = page;
1108         pci_unmap_addr_set(sw_buf, mapping, mapping);
1109
1110         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1111         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1112
1113         return 0;
1114 }
1115
1116 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1117                                      struct bnx2x_fastpath *fp, u16 index)
1118 {
1119         struct sk_buff *skb;
1120         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1121         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1122         dma_addr_t mapping;
1123
1124         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1125         if (unlikely(skb == NULL))
1126                 return -ENOMEM;
1127
1128         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1129                                  PCI_DMA_FROMDEVICE);
1130         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1131                 dev_kfree_skb(skb);
1132                 return -ENOMEM;
1133         }
1134
1135         rx_buf->skb = skb;
1136         pci_unmap_addr_set(rx_buf, mapping, mapping);
1137
1138         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1139         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1140
1141         return 0;
1142 }
1143
1144 /* note that we are not allocating a new skb,
1145  * we are just moving one from cons to prod
1146  * we are not creating a new mapping,
1147  * so there is no need to check for dma_mapping_error().
1148  */
1149 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1150                                struct sk_buff *skb, u16 cons, u16 prod)
1151 {
1152         struct bnx2x *bp = fp->bp;
1153         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1154         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1155         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1156         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1157
1158         pci_dma_sync_single_for_device(bp->pdev,
1159                                        pci_unmap_addr(cons_rx_buf, mapping),
1160                                        RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1161
1162         prod_rx_buf->skb = cons_rx_buf->skb;
1163         pci_unmap_addr_set(prod_rx_buf, mapping,
1164                            pci_unmap_addr(cons_rx_buf, mapping));
1165         *prod_bd = *cons_bd;
1166 }
1167
1168 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1169                                              u16 idx)
1170 {
1171         u16 last_max = fp->last_max_sge;
1172
1173         if (SUB_S16(idx, last_max) > 0)
1174                 fp->last_max_sge = idx;
1175 }
1176
1177 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1178 {
1179         int i, j;
1180
1181         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1182                 int idx = RX_SGE_CNT * i - 1;
1183
1184                 for (j = 0; j < 2; j++) {
1185                         SGE_MASK_CLEAR_BIT(fp, idx);
1186                         idx--;
1187                 }
1188         }
1189 }
1190
1191 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1192                                   struct eth_fast_path_rx_cqe *fp_cqe)
1193 {
1194         struct bnx2x *bp = fp->bp;
1195         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1196                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1197                       SGE_PAGE_SHIFT;
1198         u16 last_max, last_elem, first_elem;
1199         u16 delta = 0;
1200         u16 i;
1201
1202         if (!sge_len)
1203                 return;
1204
1205         /* First mark all used pages */
1206         for (i = 0; i < sge_len; i++)
1207                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1208
1209         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1210            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1211
1212         /* Here we assume that the last SGE index is the biggest */
1213         prefetch((void *)(fp->sge_mask));
1214         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1215
1216         last_max = RX_SGE(fp->last_max_sge);
1217         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1218         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1219
1220         /* If ring is not full */
1221         if (last_elem + 1 != first_elem)
1222                 last_elem++;
1223
1224         /* Now update the prod */
1225         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1226                 if (likely(fp->sge_mask[i]))
1227                         break;
1228
1229                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1230                 delta += RX_SGE_MASK_ELEM_SZ;
1231         }
1232
1233         if (delta > 0) {
1234                 fp->rx_sge_prod += delta;
1235                 /* clear page-end entries */
1236                 bnx2x_clear_sge_mask_next_elems(fp);
1237         }
1238
1239         DP(NETIF_MSG_RX_STATUS,
1240            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1241            fp->last_max_sge, fp->rx_sge_prod);
1242 }
1243
1244 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1245 {
1246         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1247         memset(fp->sge_mask, 0xff,
1248                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1249
1250         /* Clear the two last indices in the page to 1:
1251            these are the indices that correspond to the "next" element,
1252            hence will never be indicated and should be removed from
1253            the calculations. */
1254         bnx2x_clear_sge_mask_next_elems(fp);
1255 }
1256
1257 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1258                             struct sk_buff *skb, u16 cons, u16 prod)
1259 {
1260         struct bnx2x *bp = fp->bp;
1261         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1262         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1263         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1264         dma_addr_t mapping;
1265
1266         /* move empty skb from pool to prod and map it */
1267         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1268         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1269                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1270         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1271
1272         /* move partial skb from cons to pool (don't unmap yet) */
1273         fp->tpa_pool[queue] = *cons_rx_buf;
1274
1275         /* mark bin state as start - print error if current state != stop */
1276         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1277                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1278
1279         fp->tpa_state[queue] = BNX2X_TPA_START;
1280
1281         /* point prod_bd to new skb */
1282         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1283         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1284
1285 #ifdef BNX2X_STOP_ON_ERROR
1286         fp->tpa_queue_used |= (1 << queue);
1287 #ifdef __powerpc64__
1288         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1289 #else
1290         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1291 #endif
1292            fp->tpa_queue_used);
1293 #endif
1294 }
1295
1296 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1297                                struct sk_buff *skb,
1298                                struct eth_fast_path_rx_cqe *fp_cqe,
1299                                u16 cqe_idx)
1300 {
1301         struct sw_rx_page *rx_pg, old_rx_pg;
1302         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1303         u32 i, frag_len, frag_size, pages;
1304         int err;
1305         int j;
1306
1307         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1308         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1309
1310         /* This is needed in order to enable forwarding support */
1311         if (frag_size)
1312                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1313                                                max(frag_size, (u32)len_on_bd));
1314
1315 #ifdef BNX2X_STOP_ON_ERROR
1316         if (pages >
1317             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1318                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1319                           pages, cqe_idx);
1320                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1321                           fp_cqe->pkt_len, len_on_bd);
1322                 bnx2x_panic();
1323                 return -EINVAL;
1324         }
1325 #endif
1326
1327         /* Run through the SGL and compose the fragmented skb */
1328         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1329                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1330
1331                 /* FW gives the indices of the SGE as if the ring is an array
1332                    (meaning that "next" element will consume 2 indices) */
1333                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1334                 rx_pg = &fp->rx_page_ring[sge_idx];
1335                 old_rx_pg = *rx_pg;
1336
1337                 /* If we fail to allocate a substitute page, we simply stop
1338                    where we are and drop the whole packet */
1339                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1340                 if (unlikely(err)) {
1341                         fp->eth_q_stats.rx_skb_alloc_failed++;
1342                         return err;
1343                 }
1344
1345                 /* Unmap the page as we r going to pass it to the stack */
1346                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1347                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1348
1349                 /* Add one frag and update the appropriate fields in the skb */
1350                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1351
1352                 skb->data_len += frag_len;
1353                 skb->truesize += frag_len;
1354                 skb->len += frag_len;
1355
1356                 frag_size -= frag_len;
1357         }
1358
1359         return 0;
1360 }
1361
1362 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1363                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1364                            u16 cqe_idx)
1365 {
1366         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1367         struct sk_buff *skb = rx_buf->skb;
1368         /* alloc new skb */
1369         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1370
1371         /* Unmap skb in the pool anyway, as we are going to change
1372            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1373            fails. */
1374         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1375                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1376
1377         if (likely(new_skb)) {
1378                 /* fix ip xsum and give it to the stack */
1379                 /* (no need to map the new skb) */
1380 #ifdef BCM_VLAN
1381                 int is_vlan_cqe =
1382                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1383                          PARSING_FLAGS_VLAN);
1384                 int is_not_hwaccel_vlan_cqe =
1385                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1386 #endif
1387
1388                 prefetch(skb);
1389                 prefetch(((char *)(skb)) + 128);
1390
1391 #ifdef BNX2X_STOP_ON_ERROR
1392                 if (pad + len > bp->rx_buf_size) {
1393                         BNX2X_ERR("skb_put is about to fail...  "
1394                                   "pad %d  len %d  rx_buf_size %d\n",
1395                                   pad, len, bp->rx_buf_size);
1396                         bnx2x_panic();
1397                         return;
1398                 }
1399 #endif
1400
1401                 skb_reserve(skb, pad);
1402                 skb_put(skb, len);
1403
1404                 skb->protocol = eth_type_trans(skb, bp->dev);
1405                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1406
1407                 {
1408                         struct iphdr *iph;
1409
1410                         iph = (struct iphdr *)skb->data;
1411 #ifdef BCM_VLAN
1412                         /* If there is no Rx VLAN offloading -
1413                            take VLAN tag into an account */
1414                         if (unlikely(is_not_hwaccel_vlan_cqe))
1415                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1416 #endif
1417                         iph->check = 0;
1418                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1419                 }
1420
1421                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1422                                          &cqe->fast_path_cqe, cqe_idx)) {
1423 #ifdef BCM_VLAN
1424                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1425                             (!is_not_hwaccel_vlan_cqe))
1426                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1427                                                 le16_to_cpu(cqe->fast_path_cqe.
1428                                                             vlan_tag));
1429                         else
1430 #endif
1431                                 netif_receive_skb(skb);
1432                 } else {
1433                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1434                            " - dropping packet!\n");
1435                         dev_kfree_skb(skb);
1436                 }
1437
1438
1439                 /* put new skb in bin */
1440                 fp->tpa_pool[queue].skb = new_skb;
1441
1442         } else {
1443                 /* else drop the packet and keep the buffer in the bin */
1444                 DP(NETIF_MSG_RX_STATUS,
1445                    "Failed to allocate new skb - dropping packet!\n");
1446                 fp->eth_q_stats.rx_skb_alloc_failed++;
1447         }
1448
1449         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1450 }
1451
1452 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1453                                         struct bnx2x_fastpath *fp,
1454                                         u16 bd_prod, u16 rx_comp_prod,
1455                                         u16 rx_sge_prod)
1456 {
1457         struct ustorm_eth_rx_producers rx_prods = {0};
1458         int i;
1459
1460         /* Update producers */
1461         rx_prods.bd_prod = bd_prod;
1462         rx_prods.cqe_prod = rx_comp_prod;
1463         rx_prods.sge_prod = rx_sge_prod;
1464
1465         /*
1466          * Make sure that the BD and SGE data is updated before updating the
1467          * producers since FW might read the BD/SGE right after the producer
1468          * is updated.
1469          * This is only applicable for weak-ordered memory model archs such
1470          * as IA-64. The following barrier is also mandatory since FW will
1471          * assumes BDs must have buffers.
1472          */
1473         wmb();
1474
1475         for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1476                 REG_WR(bp, BAR_USTRORM_INTMEM +
1477                        USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1478                        ((u32 *)&rx_prods)[i]);
1479
1480         mmiowb(); /* keep prod updates ordered */
1481
1482         DP(NETIF_MSG_RX_STATUS,
1483            "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
1484            fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1485 }
1486
1487 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1488 {
1489         struct bnx2x *bp = fp->bp;
1490         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1491         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1492         int rx_pkt = 0;
1493
1494 #ifdef BNX2X_STOP_ON_ERROR
1495         if (unlikely(bp->panic))
1496                 return 0;
1497 #endif
1498
1499         /* CQ "next element" is of the size of the regular element,
1500            that's why it's ok here */
1501         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1502         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1503                 hw_comp_cons++;
1504
1505         bd_cons = fp->rx_bd_cons;
1506         bd_prod = fp->rx_bd_prod;
1507         bd_prod_fw = bd_prod;
1508         sw_comp_cons = fp->rx_comp_cons;
1509         sw_comp_prod = fp->rx_comp_prod;
1510
1511         /* Memory barrier necessary as speculative reads of the rx
1512          * buffer can be ahead of the index in the status block
1513          */
1514         rmb();
1515
1516         DP(NETIF_MSG_RX_STATUS,
1517            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1518            fp->index, hw_comp_cons, sw_comp_cons);
1519
1520         while (sw_comp_cons != hw_comp_cons) {
1521                 struct sw_rx_bd *rx_buf = NULL;
1522                 struct sk_buff *skb;
1523                 union eth_rx_cqe *cqe;
1524                 u8 cqe_fp_flags;
1525                 u16 len, pad;
1526
1527                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1528                 bd_prod = RX_BD(bd_prod);
1529                 bd_cons = RX_BD(bd_cons);
1530
1531                 /* Prefetch the page containing the BD descriptor
1532                    at producer's index. It will be needed when new skb is
1533                    allocated */
1534                 prefetch((void *)(PAGE_ALIGN((unsigned long)
1535                                              (&fp->rx_desc_ring[bd_prod])) -
1536                                   PAGE_SIZE + 1));
1537
1538                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1539                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1540
1541                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1542                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1543                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1544                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1545                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1546                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1547
1548                 /* is this a slowpath msg? */
1549                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1550                         bnx2x_sp_event(fp, cqe);
1551                         goto next_cqe;
1552
1553                 /* this is an rx packet */
1554                 } else {
1555                         rx_buf = &fp->rx_buf_ring[bd_cons];
1556                         skb = rx_buf->skb;
1557                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1558                         pad = cqe->fast_path_cqe.placement_offset;
1559
1560                         /* If CQE is marked both TPA_START and TPA_END
1561                            it is a non-TPA CQE */
1562                         if ((!fp->disable_tpa) &&
1563                             (TPA_TYPE(cqe_fp_flags) !=
1564                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1565                                 u16 queue = cqe->fast_path_cqe.queue_index;
1566
1567                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1568                                         DP(NETIF_MSG_RX_STATUS,
1569                                            "calling tpa_start on queue %d\n",
1570                                            queue);
1571
1572                                         bnx2x_tpa_start(fp, queue, skb,
1573                                                         bd_cons, bd_prod);
1574                                         goto next_rx;
1575                                 }
1576
1577                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1578                                         DP(NETIF_MSG_RX_STATUS,
1579                                            "calling tpa_stop on queue %d\n",
1580                                            queue);
1581
1582                                         if (!BNX2X_RX_SUM_FIX(cqe))
1583                                                 BNX2X_ERR("STOP on none TCP "
1584                                                           "data\n");
1585
1586                                         /* This is a size of the linear data
1587                                            on this skb */
1588                                         len = le16_to_cpu(cqe->fast_path_cqe.
1589                                                                 len_on_bd);
1590                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1591                                                     len, cqe, comp_ring_cons);
1592 #ifdef BNX2X_STOP_ON_ERROR
1593                                         if (bp->panic)
1594                                                 return 0;
1595 #endif
1596
1597                                         bnx2x_update_sge_prod(fp,
1598                                                         &cqe->fast_path_cqe);
1599                                         goto next_cqe;
1600                                 }
1601                         }
1602
1603                         pci_dma_sync_single_for_device(bp->pdev,
1604                                         pci_unmap_addr(rx_buf, mapping),
1605                                                        pad + RX_COPY_THRESH,
1606                                                        PCI_DMA_FROMDEVICE);
1607                         prefetch(skb);
1608                         prefetch(((char *)(skb)) + 128);
1609
1610                         /* is this an error packet? */
1611                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1612                                 DP(NETIF_MSG_RX_ERR,
1613                                    "ERROR  flags %x  rx packet %u\n",
1614                                    cqe_fp_flags, sw_comp_cons);
1615                                 fp->eth_q_stats.rx_err_discard_pkt++;
1616                                 goto reuse_rx;
1617                         }
1618
1619                         /* Since we don't have a jumbo ring
1620                          * copy small packets if mtu > 1500
1621                          */
1622                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1623                             (len <= RX_COPY_THRESH)) {
1624                                 struct sk_buff *new_skb;
1625
1626                                 new_skb = netdev_alloc_skb(bp->dev,
1627                                                            len + pad);
1628                                 if (new_skb == NULL) {
1629                                         DP(NETIF_MSG_RX_ERR,
1630                                            "ERROR  packet dropped "
1631                                            "because of alloc failure\n");
1632                                         fp->eth_q_stats.rx_skb_alloc_failed++;
1633                                         goto reuse_rx;
1634                                 }
1635
1636                                 /* aligned copy */
1637                                 skb_copy_from_linear_data_offset(skb, pad,
1638                                                     new_skb->data + pad, len);
1639                                 skb_reserve(new_skb, pad);
1640                                 skb_put(new_skb, len);
1641
1642                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1643
1644                                 skb = new_skb;
1645
1646                         } else
1647                         if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1648                                 pci_unmap_single(bp->pdev,
1649                                         pci_unmap_addr(rx_buf, mapping),
1650                                                  bp->rx_buf_size,
1651                                                  PCI_DMA_FROMDEVICE);
1652                                 skb_reserve(skb, pad);
1653                                 skb_put(skb, len);
1654
1655                         } else {
1656                                 DP(NETIF_MSG_RX_ERR,
1657                                    "ERROR  packet dropped because "
1658                                    "of alloc failure\n");
1659                                 fp->eth_q_stats.rx_skb_alloc_failed++;
1660 reuse_rx:
1661                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1662                                 goto next_rx;
1663                         }
1664
1665                         skb->protocol = eth_type_trans(skb, bp->dev);
1666
1667                         skb->ip_summed = CHECKSUM_NONE;
1668                         if (bp->rx_csum) {
1669                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1670                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1671                                 else
1672                                         fp->eth_q_stats.hw_csum_err++;
1673                         }
1674                 }
1675
1676                 skb_record_rx_queue(skb, fp->index);
1677
1678 #ifdef BCM_VLAN
1679                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1680                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1681                      PARSING_FLAGS_VLAN))
1682                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1683                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1684                 else
1685 #endif
1686                         netif_receive_skb(skb);
1687
1688
1689 next_rx:
1690                 rx_buf->skb = NULL;
1691
1692                 bd_cons = NEXT_RX_IDX(bd_cons);
1693                 bd_prod = NEXT_RX_IDX(bd_prod);
1694                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1695                 rx_pkt++;
1696 next_cqe:
1697                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1698                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1699
1700                 if (rx_pkt == budget)
1701                         break;
1702         } /* while */
1703
1704         fp->rx_bd_cons = bd_cons;
1705         fp->rx_bd_prod = bd_prod_fw;
1706         fp->rx_comp_cons = sw_comp_cons;
1707         fp->rx_comp_prod = sw_comp_prod;
1708
1709         /* Update producers */
1710         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1711                              fp->rx_sge_prod);
1712
1713         fp->rx_pkt += rx_pkt;
1714         fp->rx_calls++;
1715
1716         return rx_pkt;
1717 }
1718
1719 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1720 {
1721         struct bnx2x_fastpath *fp = fp_cookie;
1722         struct bnx2x *bp = fp->bp;
1723
1724         /* Return here if interrupt is disabled */
1725         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1726                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1727                 return IRQ_HANDLED;
1728         }
1729
1730         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1731            fp->index, fp->sb_id);
1732         bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1733
1734 #ifdef BNX2X_STOP_ON_ERROR
1735         if (unlikely(bp->panic))
1736                 return IRQ_HANDLED;
1737 #endif
1738         /* Handle Rx or Tx according to MSI-X vector */
1739         if (fp->is_rx_queue) {
1740                 prefetch(fp->rx_cons_sb);
1741                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1742
1743                 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1744
1745         } else {
1746                 prefetch(fp->tx_cons_sb);
1747                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1748
1749                 bnx2x_update_fpsb_idx(fp);
1750                 rmb();
1751                 bnx2x_tx_int(fp);
1752
1753                 /* Re-enable interrupts */
1754                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1755                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1756                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1757                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1758         }
1759
1760         return IRQ_HANDLED;
1761 }
1762
1763 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1764 {
1765         struct bnx2x *bp = netdev_priv(dev_instance);
1766         u16 status = bnx2x_ack_int(bp);
1767         u16 mask;
1768         int i;
1769
1770         /* Return here if interrupt is shared and it's not for us */
1771         if (unlikely(status == 0)) {
1772                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1773                 return IRQ_NONE;
1774         }
1775         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1776
1777         /* Return here if interrupt is disabled */
1778         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1779                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1780                 return IRQ_HANDLED;
1781         }
1782
1783 #ifdef BNX2X_STOP_ON_ERROR
1784         if (unlikely(bp->panic))
1785                 return IRQ_HANDLED;
1786 #endif
1787
1788         for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1789                 struct bnx2x_fastpath *fp = &bp->fp[i];
1790
1791                 mask = 0x2 << fp->sb_id;
1792                 if (status & mask) {
1793                         /* Handle Rx or Tx according to SB id */
1794                         if (fp->is_rx_queue) {
1795                                 prefetch(fp->rx_cons_sb);
1796                                 prefetch(&fp->status_blk->u_status_block.
1797                                                         status_block_index);
1798
1799                                 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1800
1801                         } else {
1802                                 prefetch(fp->tx_cons_sb);
1803                                 prefetch(&fp->status_blk->c_status_block.
1804                                                         status_block_index);
1805
1806                                 bnx2x_update_fpsb_idx(fp);
1807                                 rmb();
1808                                 bnx2x_tx_int(fp);
1809
1810                                 /* Re-enable interrupts */
1811                                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1812                                              le16_to_cpu(fp->fp_u_idx),
1813                                              IGU_INT_NOP, 1);
1814                                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1815                                              le16_to_cpu(fp->fp_c_idx),
1816                                              IGU_INT_ENABLE, 1);
1817                         }
1818                         status &= ~mask;
1819                 }
1820         }
1821
1822 #ifdef BCM_CNIC
1823         mask = 0x2 << CNIC_SB_ID(bp);
1824         if (status & (mask | 0x1)) {
1825                 struct cnic_ops *c_ops = NULL;
1826
1827                 rcu_read_lock();
1828                 c_ops = rcu_dereference(bp->cnic_ops);
1829                 if (c_ops)
1830                         c_ops->cnic_handler(bp->cnic_data, NULL);
1831                 rcu_read_unlock();
1832
1833                 status &= ~mask;
1834         }
1835 #endif
1836
1837         if (unlikely(status & 0x1)) {
1838                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1839
1840                 status &= ~0x1;
1841                 if (!status)
1842                         return IRQ_HANDLED;
1843         }
1844
1845         if (status)
1846                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1847                    status);
1848
1849         return IRQ_HANDLED;
1850 }
1851
1852 /* end of fast path */
1853
1854 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1855
1856 /* Link */
1857
1858 /*
1859  * General service functions
1860  */
1861
1862 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1863 {
1864         u32 lock_status;
1865         u32 resource_bit = (1 << resource);
1866         int func = BP_FUNC(bp);
1867         u32 hw_lock_control_reg;
1868         int cnt;
1869
1870         /* Validating that the resource is within range */
1871         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1872                 DP(NETIF_MSG_HW,
1873                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1874                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1875                 return -EINVAL;
1876         }
1877
1878         if (func <= 5) {
1879                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1880         } else {
1881                 hw_lock_control_reg =
1882                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1883         }
1884
1885         /* Validating that the resource is not already taken */
1886         lock_status = REG_RD(bp, hw_lock_control_reg);
1887         if (lock_status & resource_bit) {
1888                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1889                    lock_status, resource_bit);
1890                 return -EEXIST;
1891         }
1892
1893         /* Try for 5 second every 5ms */
1894         for (cnt = 0; cnt < 1000; cnt++) {
1895                 /* Try to acquire the lock */
1896                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1897                 lock_status = REG_RD(bp, hw_lock_control_reg);
1898                 if (lock_status & resource_bit)
1899                         return 0;
1900
1901                 msleep(5);
1902         }
1903         DP(NETIF_MSG_HW, "Timeout\n");
1904         return -EAGAIN;
1905 }
1906
1907 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1908 {
1909         u32 lock_status;
1910         u32 resource_bit = (1 << resource);
1911         int func = BP_FUNC(bp);
1912         u32 hw_lock_control_reg;
1913
1914         /* Validating that the resource is within range */
1915         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1916                 DP(NETIF_MSG_HW,
1917                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1918                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1919                 return -EINVAL;
1920         }
1921
1922         if (func <= 5) {
1923                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1924         } else {
1925                 hw_lock_control_reg =
1926                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1927         }
1928
1929         /* Validating that the resource is currently taken */
1930         lock_status = REG_RD(bp, hw_lock_control_reg);
1931         if (!(lock_status & resource_bit)) {
1932                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1933                    lock_status, resource_bit);
1934                 return -EFAULT;
1935         }
1936
1937         REG_WR(bp, hw_lock_control_reg, resource_bit);
1938         return 0;
1939 }
1940
1941 /* HW Lock for shared dual port PHYs */
1942 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1943 {
1944         mutex_lock(&bp->port.phy_mutex);
1945
1946         if (bp->port.need_hw_lock)
1947                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1948 }
1949
1950 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1951 {
1952         if (bp->port.need_hw_lock)
1953                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1954
1955         mutex_unlock(&bp->port.phy_mutex);
1956 }
1957
1958 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1959 {
1960         /* The GPIO should be swapped if swap register is set and active */
1961         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1962                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1963         int gpio_shift = gpio_num +
1964                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1965         u32 gpio_mask = (1 << gpio_shift);
1966         u32 gpio_reg;
1967         int value;
1968
1969         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1970                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1971                 return -EINVAL;
1972         }
1973
1974         /* read GPIO value */
1975         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1976
1977         /* get the requested pin value */
1978         if ((gpio_reg & gpio_mask) == gpio_mask)
1979                 value = 1;
1980         else
1981                 value = 0;
1982
1983         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1984
1985         return value;
1986 }
1987
1988 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1989 {
1990         /* The GPIO should be swapped if swap register is set and active */
1991         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1992                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1993         int gpio_shift = gpio_num +
1994                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1995         u32 gpio_mask = (1 << gpio_shift);
1996         u32 gpio_reg;
1997
1998         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1999                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2000                 return -EINVAL;
2001         }
2002
2003         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2004         /* read GPIO and mask except the float bits */
2005         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2006
2007         switch (mode) {
2008         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2009                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
2010                    gpio_num, gpio_shift);
2011                 /* clear FLOAT and set CLR */
2012                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2013                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2014                 break;
2015
2016         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2017                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2018                    gpio_num, gpio_shift);
2019                 /* clear FLOAT and set SET */
2020                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2021                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2022                 break;
2023
2024         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2025                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2026                    gpio_num, gpio_shift);
2027                 /* set FLOAT */
2028                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2029                 break;
2030
2031         default:
2032                 break;
2033         }
2034
2035         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2036         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2037
2038         return 0;
2039 }
2040
2041 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2042 {
2043         /* The GPIO should be swapped if swap register is set and active */
2044         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2045                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2046         int gpio_shift = gpio_num +
2047                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2048         u32 gpio_mask = (1 << gpio_shift);
2049         u32 gpio_reg;
2050
2051         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2052                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2053                 return -EINVAL;
2054         }
2055
2056         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2057         /* read GPIO int */
2058         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2059
2060         switch (mode) {
2061         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2062                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2063                                    "output low\n", gpio_num, gpio_shift);
2064                 /* clear SET and set CLR */
2065                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2066                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2067                 break;
2068
2069         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2070                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2071                                    "output high\n", gpio_num, gpio_shift);
2072                 /* clear CLR and set SET */
2073                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2074                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2075                 break;
2076
2077         default:
2078                 break;
2079         }
2080
2081         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2082         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2083
2084         return 0;
2085 }
2086
2087 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2088 {
2089         u32 spio_mask = (1 << spio_num);
2090         u32 spio_reg;
2091
2092         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2093             (spio_num > MISC_REGISTERS_SPIO_7)) {
2094                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2095                 return -EINVAL;
2096         }
2097
2098         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2099         /* read SPIO and mask except the float bits */
2100         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2101
2102         switch (mode) {
2103         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2104                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2105                 /* clear FLOAT and set CLR */
2106                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2107                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2108                 break;
2109
2110         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2111                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2112                 /* clear FLOAT and set SET */
2113                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2114                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2115                 break;
2116
2117         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2118                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2119                 /* set FLOAT */
2120                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2121                 break;
2122
2123         default:
2124                 break;
2125         }
2126
2127         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2128         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2129
2130         return 0;
2131 }
2132
2133 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2134 {
2135         switch (bp->link_vars.ieee_fc &
2136                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2137         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2138                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2139                                           ADVERTISED_Pause);
2140                 break;
2141
2142         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2143                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2144                                          ADVERTISED_Pause);
2145                 break;
2146
2147         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2148                 bp->port.advertising |= ADVERTISED_Asym_Pause;
2149                 break;
2150
2151         default:
2152                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2153                                           ADVERTISED_Pause);
2154                 break;
2155         }
2156 }
2157
2158 static void bnx2x_link_report(struct bnx2x *bp)
2159 {
2160         if (bp->state == BNX2X_STATE_DISABLED) {
2161                 netif_carrier_off(bp->dev);
2162                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2163                 return;
2164         }
2165
2166         if (bp->link_vars.link_up) {
2167                 if (bp->state == BNX2X_STATE_OPEN)
2168                         netif_carrier_on(bp->dev);
2169                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2170
2171                 printk("%d Mbps ", bp->link_vars.line_speed);
2172
2173                 if (bp->link_vars.duplex == DUPLEX_FULL)
2174                         printk("full duplex");
2175                 else
2176                         printk("half duplex");
2177
2178                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2179                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2180                                 printk(", receive ");
2181                                 if (bp->link_vars.flow_ctrl &
2182                                     BNX2X_FLOW_CTRL_TX)
2183                                         printk("& transmit ");
2184                         } else {
2185                                 printk(", transmit ");
2186                         }
2187                         printk("flow control ON");
2188                 }
2189                 printk("\n");
2190
2191         } else { /* link_down */
2192                 netif_carrier_off(bp->dev);
2193                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2194         }
2195 }
2196
2197 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2198 {
2199         if (!BP_NOMCP(bp)) {
2200                 u8 rc;
2201
2202                 /* Initialize link parameters structure variables */
2203                 /* It is recommended to turn off RX FC for jumbo frames
2204                    for better performance */
2205                 if (bp->dev->mtu > 5000)
2206                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2207                 else
2208                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2209
2210                 bnx2x_acquire_phy_lock(bp);
2211
2212                 if (load_mode == LOAD_DIAG)
2213                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2214
2215                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2216
2217                 bnx2x_release_phy_lock(bp);
2218
2219                 bnx2x_calc_fc_adv(bp);
2220
2221                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2222                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2223                         bnx2x_link_report(bp);
2224                 }
2225
2226                 return rc;
2227         }
2228         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2229         return -EINVAL;
2230 }
2231
2232 static void bnx2x_link_set(struct bnx2x *bp)
2233 {
2234         if (!BP_NOMCP(bp)) {
2235                 bnx2x_acquire_phy_lock(bp);
2236                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2237                 bnx2x_release_phy_lock(bp);
2238
2239                 bnx2x_calc_fc_adv(bp);
2240         } else
2241                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2242 }
2243
2244 static void bnx2x__link_reset(struct bnx2x *bp)
2245 {
2246         if (!BP_NOMCP(bp)) {
2247                 bnx2x_acquire_phy_lock(bp);
2248                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2249                 bnx2x_release_phy_lock(bp);
2250         } else
2251                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2252 }
2253
2254 static u8 bnx2x_link_test(struct bnx2x *bp)
2255 {
2256         u8 rc;
2257
2258         bnx2x_acquire_phy_lock(bp);
2259         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2260         bnx2x_release_phy_lock(bp);
2261
2262         return rc;
2263 }
2264
2265 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2266 {
2267         u32 r_param = bp->link_vars.line_speed / 8;
2268         u32 fair_periodic_timeout_usec;
2269         u32 t_fair;
2270
2271         memset(&(bp->cmng.rs_vars), 0,
2272                sizeof(struct rate_shaping_vars_per_port));
2273         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2274
2275         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2276         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2277
2278         /* this is the threshold below which no timer arming will occur
2279            1.25 coefficient is for the threshold to be a little bigger
2280            than the real time, to compensate for timer in-accuracy */
2281         bp->cmng.rs_vars.rs_threshold =
2282                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2283
2284         /* resolution of fairness timer */
2285         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2286         /* for 10G it is 1000usec. for 1G it is 10000usec. */
2287         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2288
2289         /* this is the threshold below which we won't arm the timer anymore */
2290         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2291
2292         /* we multiply by 1e3/8 to get bytes/msec.
2293            We don't want the credits to pass a credit
2294            of the t_fair*FAIR_MEM (algorithm resolution) */
2295         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2296         /* since each tick is 4 usec */
2297         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2298 }
2299
2300 /* Calculates the sum of vn_min_rates.
2301    It's needed for further normalizing of the min_rates.
2302    Returns:
2303      sum of vn_min_rates.
2304        or
2305      0 - if all the min_rates are 0.
2306      In the later case fainess algorithm should be deactivated.
2307      If not all min_rates are zero then those that are zeroes will be set to 1.
2308  */
2309 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2310 {
2311         int all_zero = 1;
2312         int port = BP_PORT(bp);
2313         int vn;
2314
2315         bp->vn_weight_sum = 0;
2316         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2317                 int func = 2*vn + port;
2318                 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2319                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2320                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2321
2322                 /* Skip hidden vns */
2323                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2324                         continue;
2325
2326                 /* If min rate is zero - set it to 1 */
2327                 if (!vn_min_rate)
2328                         vn_min_rate = DEF_MIN_RATE;
2329                 else
2330                         all_zero = 0;
2331
2332                 bp->vn_weight_sum += vn_min_rate;
2333         }
2334
2335         /* ... only if all min rates are zeros - disable fairness */
2336         if (all_zero) {
2337                 bp->cmng.flags.cmng_enables &=
2338                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2339                 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2340                    "  fairness will be disabled\n");
2341         } else
2342                 bp->cmng.flags.cmng_enables |=
2343                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2344 }
2345
2346 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2347 {
2348         struct rate_shaping_vars_per_vn m_rs_vn;
2349         struct fairness_vars_per_vn m_fair_vn;
2350         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2351         u16 vn_min_rate, vn_max_rate;
2352         int i;
2353
2354         /* If function is hidden - set min and max to zeroes */
2355         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2356                 vn_min_rate = 0;
2357                 vn_max_rate = 0;
2358
2359         } else {
2360                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2361                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2362                 /* If min rate is zero - set it to 1 */
2363                 if (!vn_min_rate)
2364                         vn_min_rate = DEF_MIN_RATE;
2365                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2366                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2367         }
2368         DP(NETIF_MSG_IFUP,
2369            "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
2370            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2371
2372         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2373         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2374
2375         /* global vn counter - maximal Mbps for this vn */
2376         m_rs_vn.vn_counter.rate = vn_max_rate;
2377
2378         /* quota - number of bytes transmitted in this period */
2379         m_rs_vn.vn_counter.quota =
2380                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2381
2382         if (bp->vn_weight_sum) {
2383                 /* credit for each period of the fairness algorithm:
2384                    number of bytes in T_FAIR (the vn share the port rate).
2385                    vn_weight_sum should not be larger than 10000, thus
2386                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2387                    than zero */
2388                 m_fair_vn.vn_credit_delta =
2389                         max((u32)(vn_min_rate * (T_FAIR_COEF /
2390                                                  (8 * bp->vn_weight_sum))),
2391                             (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2392                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2393                    m_fair_vn.vn_credit_delta);
2394         }
2395
2396         /* Store it to internal memory */
2397         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2398                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2399                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2400                        ((u32 *)(&m_rs_vn))[i]);
2401
2402         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2403                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2404                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2405                        ((u32 *)(&m_fair_vn))[i]);
2406 }
2407
2408
2409 /* This function is called upon link interrupt */
2410 static void bnx2x_link_attn(struct bnx2x *bp)
2411 {
2412         /* Make sure that we are synced with the current statistics */
2413         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2414
2415         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2416
2417         if (bp->link_vars.link_up) {
2418
2419                 /* dropless flow control */
2420                 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
2421                         int port = BP_PORT(bp);
2422                         u32 pause_enabled = 0;
2423
2424                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2425                                 pause_enabled = 1;
2426
2427                         REG_WR(bp, BAR_USTRORM_INTMEM +
2428                                USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2429                                pause_enabled);
2430                 }
2431
2432                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2433                         struct host_port_stats *pstats;
2434
2435                         pstats = bnx2x_sp(bp, port_stats);
2436                         /* reset old bmac stats */
2437                         memset(&(pstats->mac_stx[0]), 0,
2438                                sizeof(struct mac_stx));
2439                 }
2440                 if ((bp->state == BNX2X_STATE_OPEN) ||
2441                     (bp->state == BNX2X_STATE_DISABLED))
2442                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2443         }
2444
2445         /* indicate link status */
2446         bnx2x_link_report(bp);
2447
2448         if (IS_E1HMF(bp)) {
2449                 int port = BP_PORT(bp);
2450                 int func;
2451                 int vn;
2452
2453                 /* Set the attention towards other drivers on the same port */
2454                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2455                         if (vn == BP_E1HVN(bp))
2456                                 continue;
2457
2458                         func = ((vn << 1) | port);
2459                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2460                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2461                 }
2462
2463                 if (bp->link_vars.link_up) {
2464                         int i;
2465
2466                         /* Init rate shaping and fairness contexts */
2467                         bnx2x_init_port_minmax(bp);
2468
2469                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2470                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2471
2472                         /* Store it to internal memory */
2473                         for (i = 0;
2474                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
2475                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2476                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2477                                        ((u32 *)(&bp->cmng))[i]);
2478                 }
2479         }
2480 }
2481
2482 static void bnx2x__link_status_update(struct bnx2x *bp)
2483 {
2484         int func = BP_FUNC(bp);
2485
2486         if (bp->state != BNX2X_STATE_OPEN)
2487                 return;
2488
2489         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2490
2491         if (bp->link_vars.link_up)
2492                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2493         else
2494                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2495
2496         bnx2x_calc_vn_weight_sum(bp);
2497
2498         /* indicate link status */
2499         bnx2x_link_report(bp);
2500 }
2501
2502 static void bnx2x_pmf_update(struct bnx2x *bp)
2503 {
2504         int port = BP_PORT(bp);
2505         u32 val;
2506
2507         bp->port.pmf = 1;
2508         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2509
2510         /* enable nig attention */
2511         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2512         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2513         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2514
2515         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2516 }
2517
2518 /* end of Link */
2519
2520 /* slow path */
2521
2522 /*
2523  * General service functions
2524  */
2525
2526 /* send the MCP a request, block until there is a reply */
2527 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2528 {
2529         int func = BP_FUNC(bp);
2530         u32 seq = ++bp->fw_seq;
2531         u32 rc = 0;
2532         u32 cnt = 1;
2533         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2534
2535         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2536         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2537
2538         do {
2539                 /* let the FW do it's magic ... */
2540                 msleep(delay);
2541
2542                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2543
2544                 /* Give the FW up to 2 second (200*10ms) */
2545         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
2546
2547         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2548            cnt*delay, rc, seq);
2549
2550         /* is this a reply to our command? */
2551         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2552                 rc &= FW_MSG_CODE_MASK;
2553         else {
2554                 /* FW BUG! */
2555                 BNX2X_ERR("FW failed to respond!\n");
2556                 bnx2x_fw_dump(bp);
2557                 rc = 0;
2558         }
2559
2560         return rc;
2561 }
2562
2563 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2564 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2565 static void bnx2x_set_rx_mode(struct net_device *dev);
2566
2567 static void bnx2x_e1h_disable(struct bnx2x *bp)
2568 {
2569         int port = BP_PORT(bp);
2570         int i;
2571
2572         bp->rx_mode = BNX2X_RX_MODE_NONE;
2573         bnx2x_set_storm_rx_mode(bp);
2574
2575         netif_tx_disable(bp->dev);
2576         bp->dev->trans_start = jiffies; /* prevent tx timeout */
2577
2578         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2579
2580         bnx2x_set_eth_mac_addr_e1h(bp, 0);
2581
2582         for (i = 0; i < MC_HASH_SIZE; i++)
2583                 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
2584
2585         netif_carrier_off(bp->dev);
2586 }
2587
2588 static void bnx2x_e1h_enable(struct bnx2x *bp)
2589 {
2590         int port = BP_PORT(bp);
2591
2592         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2593
2594         bnx2x_set_eth_mac_addr_e1h(bp, 1);
2595
2596         /* Tx queue should be only reenabled */
2597         netif_tx_wake_all_queues(bp->dev);
2598
2599         /* Initialize the receive filter. */
2600         bnx2x_set_rx_mode(bp->dev);
2601 }
2602
2603 static void bnx2x_update_min_max(struct bnx2x *bp)
2604 {
2605         int port = BP_PORT(bp);
2606         int vn, i;
2607
2608         /* Init rate shaping and fairness contexts */
2609         bnx2x_init_port_minmax(bp);
2610
2611         bnx2x_calc_vn_weight_sum(bp);
2612
2613         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2614                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2615
2616         if (bp->port.pmf) {
2617                 int func;
2618
2619                 /* Set the attention towards other drivers on the same port */
2620                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2621                         if (vn == BP_E1HVN(bp))
2622                                 continue;
2623
2624                         func = ((vn << 1) | port);
2625                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2626                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2627                 }
2628
2629                 /* Store it to internal memory */
2630                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2631                         REG_WR(bp, BAR_XSTRORM_INTMEM +
2632                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2633                                ((u32 *)(&bp->cmng))[i]);
2634         }
2635 }
2636
2637 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2638 {
2639         DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2640
2641         if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2642
2643                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2644                         DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2645                         bp->state = BNX2X_STATE_DISABLED;
2646
2647                         bnx2x_e1h_disable(bp);
2648                 } else {
2649                         DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2650                         bp->state = BNX2X_STATE_OPEN;
2651
2652                         bnx2x_e1h_enable(bp);
2653                 }
2654                 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2655         }
2656         if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2657
2658                 bnx2x_update_min_max(bp);
2659                 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2660         }
2661
2662         /* Report results to MCP */
2663         if (dcc_event)
2664                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2665         else
2666                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2667 }
2668
2669 /* must be called under the spq lock */
2670 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2671 {
2672         struct eth_spe *next_spe = bp->spq_prod_bd;
2673
2674         if (bp->spq_prod_bd == bp->spq_last_bd) {
2675                 bp->spq_prod_bd = bp->spq;
2676                 bp->spq_prod_idx = 0;
2677                 DP(NETIF_MSG_TIMER, "end of spq\n");
2678         } else {
2679                 bp->spq_prod_bd++;
2680                 bp->spq_prod_idx++;
2681         }
2682         return next_spe;
2683 }
2684
2685 /* must be called under the spq lock */
2686 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2687 {
2688         int func = BP_FUNC(bp);
2689
2690         /* Make sure that BD data is updated before writing the producer */
2691         wmb();
2692
2693         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2694                bp->spq_prod_idx);
2695         mmiowb();
2696 }
2697
2698 /* the slow path queue is odd since completions arrive on the fastpath ring */
2699 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2700                          u32 data_hi, u32 data_lo, int common)
2701 {
2702         struct eth_spe *spe;
2703
2704         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2705            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2706            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2707            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2708            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2709
2710 #ifdef BNX2X_STOP_ON_ERROR
2711         if (unlikely(bp->panic))
2712                 return -EIO;
2713 #endif
2714
2715         spin_lock_bh(&bp->spq_lock);
2716
2717         if (!bp->spq_left) {
2718                 BNX2X_ERR("BUG! SPQ ring full!\n");
2719                 spin_unlock_bh(&bp->spq_lock);
2720                 bnx2x_panic();
2721                 return -EBUSY;
2722         }
2723
2724         spe = bnx2x_sp_get_next(bp);
2725
2726         /* CID needs port number to be encoded int it */
2727         spe->hdr.conn_and_cmd_data =
2728                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2729                                      HW_CID(bp, cid)));
2730         spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2731         if (common)
2732                 spe->hdr.type |=
2733                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2734
2735         spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2736         spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2737
2738         bp->spq_left--;
2739
2740         bnx2x_sp_prod_update(bp);
2741         spin_unlock_bh(&bp->spq_lock);
2742         return 0;
2743 }
2744
2745 /* acquire split MCP access lock register */
2746 static int bnx2x_acquire_alr(struct bnx2x *bp)
2747 {
2748         u32 i, j, val;
2749         int rc = 0;
2750
2751         might_sleep();
2752         i = 100;
2753         for (j = 0; j < i*10; j++) {
2754                 val = (1UL << 31);
2755                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2756                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2757                 if (val & (1L << 31))
2758                         break;
2759
2760                 msleep(5);
2761         }
2762         if (!(val & (1L << 31))) {
2763                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2764                 rc = -EBUSY;
2765         }
2766
2767         return rc;
2768 }
2769
2770 /* release split MCP access lock register */
2771 static void bnx2x_release_alr(struct bnx2x *bp)
2772 {
2773         u32 val = 0;
2774
2775         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2776 }
2777
2778 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2779 {
2780         struct host_def_status_block *def_sb = bp->def_status_blk;
2781         u16 rc = 0;
2782
2783         barrier(); /* status block is written to by the chip */
2784         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2785                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2786                 rc |= 1;
2787         }
2788         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2789                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2790                 rc |= 2;
2791         }
2792         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2793                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2794                 rc |= 4;
2795         }
2796         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2797                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2798                 rc |= 8;
2799         }
2800         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2801                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2802                 rc |= 16;
2803         }
2804         return rc;
2805 }
2806
2807 /*
2808  * slow path service functions
2809  */
2810
2811 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2812 {
2813         int port = BP_PORT(bp);
2814         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2815                        COMMAND_REG_ATTN_BITS_SET);
2816         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2817                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2818         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2819                                        NIG_REG_MASK_INTERRUPT_PORT0;
2820         u32 aeu_mask;
2821         u32 nig_mask = 0;
2822
2823         if (bp->attn_state & asserted)
2824                 BNX2X_ERR("IGU ERROR\n");
2825
2826         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2827         aeu_mask = REG_RD(bp, aeu_addr);
2828
2829         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2830            aeu_mask, asserted);
2831         aeu_mask &= ~(asserted & 0xff);
2832         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2833
2834         REG_WR(bp, aeu_addr, aeu_mask);
2835         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2836
2837         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2838         bp->attn_state |= asserted;
2839         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2840
2841         if (asserted & ATTN_HARD_WIRED_MASK) {
2842                 if (asserted & ATTN_NIG_FOR_FUNC) {
2843
2844                         bnx2x_acquire_phy_lock(bp);
2845
2846                         /* save nig interrupt mask */
2847                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2848                         REG_WR(bp, nig_int_mask_addr, 0);
2849
2850                         bnx2x_link_attn(bp);
2851
2852                         /* handle unicore attn? */
2853                 }
2854                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2855                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2856
2857                 if (asserted & GPIO_2_FUNC)
2858                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2859
2860                 if (asserted & GPIO_3_FUNC)
2861                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2862
2863                 if (asserted & GPIO_4_FUNC)
2864                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2865
2866                 if (port == 0) {
2867                         if (asserted & ATTN_GENERAL_ATTN_1) {
2868                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2869                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2870                         }
2871                         if (asserted & ATTN_GENERAL_ATTN_2) {
2872                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2873                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2874                         }
2875                         if (asserted & ATTN_GENERAL_ATTN_3) {
2876                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2877                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2878                         }
2879                 } else {
2880                         if (asserted & ATTN_GENERAL_ATTN_4) {
2881                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2882                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2883                         }
2884                         if (asserted & ATTN_GENERAL_ATTN_5) {
2885                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2886                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2887                         }
2888                         if (asserted & ATTN_GENERAL_ATTN_6) {
2889                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2890                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2891                         }
2892                 }
2893
2894         } /* if hardwired */
2895
2896         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2897            asserted, hc_addr);
2898         REG_WR(bp, hc_addr, asserted);
2899
2900         /* now set back the mask */
2901         if (asserted & ATTN_NIG_FOR_FUNC) {
2902                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2903                 bnx2x_release_phy_lock(bp);
2904         }
2905 }
2906
2907 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2908 {
2909         int port = BP_PORT(bp);
2910
2911         /* mark the failure */
2912         bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2913         bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2914         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2915                  bp->link_params.ext_phy_config);
2916
2917         /* log the failure */
2918         printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2919                " the driver to shutdown the card to prevent permanent"
2920                " damage.  Please contact Dell Support for assistance\n",
2921                bp->dev->name);
2922 }
2923
2924 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2925 {
2926         int port = BP_PORT(bp);
2927         int reg_offset;
2928         u32 val, swap_val, swap_override;
2929
2930         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2931                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2932
2933         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2934
2935                 val = REG_RD(bp, reg_offset);
2936                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2937                 REG_WR(bp, reg_offset, val);
2938
2939                 BNX2X_ERR("SPIO5 hw attention\n");
2940
2941                 /* Fan failure attention */
2942                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2943                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2944                         /* Low power mode is controlled by GPIO 2 */
2945                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2946                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2947                         /* The PHY reset is controlled by GPIO 1 */
2948                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2949                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2950                         break;
2951
2952                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2953                         /* The PHY reset is controlled by GPIO 1 */
2954                         /* fake the port number to cancel the swap done in
2955                            set_gpio() */
2956                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2957                         swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2958                         port = (swap_val && swap_override) ^ 1;
2959                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2960                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2961                         break;
2962
2963                 default:
2964                         break;
2965                 }
2966                 bnx2x_fan_failure(bp);
2967         }
2968
2969         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2970                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2971                 bnx2x_acquire_phy_lock(bp);
2972                 bnx2x_handle_module_detect_int(&bp->link_params);
2973                 bnx2x_release_phy_lock(bp);
2974         }
2975
2976         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2977
2978                 val = REG_RD(bp, reg_offset);
2979                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2980                 REG_WR(bp, reg_offset, val);
2981
2982                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2983                           (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2984                 bnx2x_panic();
2985         }
2986 }
2987
2988 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2989 {
2990         u32 val;
2991
2992         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2993
2994                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2995                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2996                 /* DORQ discard attention */
2997                 if (val & 0x2)
2998                         BNX2X_ERR("FATAL error from DORQ\n");
2999         }
3000
3001         if (attn & HW_INTERRUT_ASSERT_SET_1) {
3002
3003                 int port = BP_PORT(bp);
3004                 int reg_offset;
3005
3006                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3007                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3008
3009                 val = REG_RD(bp, reg_offset);
3010                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3011                 REG_WR(bp, reg_offset, val);
3012
3013                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
3014                           (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
3015                 bnx2x_panic();
3016         }
3017 }
3018
3019 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3020 {
3021         u32 val;
3022
3023         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3024
3025                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3026                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3027                 /* CFC error attention */
3028                 if (val & 0x2)
3029                         BNX2X_ERR("FATAL error from CFC\n");
3030         }
3031
3032         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3033
3034                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3035                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3036                 /* RQ_USDMDP_FIFO_OVERFLOW */
3037                 if (val & 0x18000)
3038                         BNX2X_ERR("FATAL error from PXP\n");
3039         }
3040
3041         if (attn & HW_INTERRUT_ASSERT_SET_2) {
3042
3043                 int port = BP_PORT(bp);
3044                 int reg_offset;
3045
3046                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3047                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3048
3049                 val = REG_RD(bp, reg_offset);
3050                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3051                 REG_WR(bp, reg_offset, val);
3052
3053                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3054                           (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3055                 bnx2x_panic();
3056         }
3057 }
3058
3059 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3060 {
3061         u32 val;
3062
3063         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3064
3065                 if (attn & BNX2X_PMF_LINK_ASSERT) {
3066                         int func = BP_FUNC(bp);
3067
3068                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3069                         bp->mf_config = SHMEM_RD(bp,
3070                                            mf_cfg.func_mf_config[func].config);
3071                         val = SHMEM_RD(bp, func_mb[func].drv_status);
3072                         if (val & DRV_STATUS_DCC_EVENT_MASK)
3073                                 bnx2x_dcc_event(bp,
3074                                             (val & DRV_STATUS_DCC_EVENT_MASK));
3075                         bnx2x__link_status_update(bp);
3076                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3077                                 bnx2x_pmf_update(bp);
3078
3079                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3080
3081                         BNX2X_ERR("MC assert!\n");
3082                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3083                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3084                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3085                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3086                         bnx2x_panic();
3087
3088                 } else if (attn & BNX2X_MCP_ASSERT) {
3089
3090                         BNX2X_ERR("MCP assert!\n");
3091                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3092                         bnx2x_fw_dump(bp);
3093
3094                 } else
3095                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3096         }
3097
3098         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3099                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3100                 if (attn & BNX2X_GRC_TIMEOUT) {
3101                         val = CHIP_IS_E1H(bp) ?
3102                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3103                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
3104                 }
3105                 if (attn & BNX2X_GRC_RSV) {
3106                         val = CHIP_IS_E1H(bp) ?
3107                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3108                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
3109                 }
3110                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3111         }
3112 }
3113
3114 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3115 {
3116         struct attn_route attn;
3117         struct attn_route group_mask;
3118         int port = BP_PORT(bp);
3119         int index;
3120         u32 reg_addr;
3121         u32 val;
3122         u32 aeu_mask;
3123
3124         /* need to take HW lock because MCP or other port might also
3125            try to handle this event */
3126         bnx2x_acquire_alr(bp);
3127
3128         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3129         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3130         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3131         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3132         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3133            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3134
3135         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3136                 if (deasserted & (1 << index)) {
3137                         group_mask = bp->attn_group[index];
3138
3139                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3140                            index, group_mask.sig[0], group_mask.sig[1],
3141                            group_mask.sig[2], group_mask.sig[3]);
3142
3143                         bnx2x_attn_int_deasserted3(bp,
3144                                         attn.sig[3] & group_mask.sig[3]);
3145                         bnx2x_attn_int_deasserted1(bp,
3146                                         attn.sig[1] & group_mask.sig[1]);
3147                         bnx2x_attn_int_deasserted2(bp,
3148                                         attn.sig[2] & group_mask.sig[2]);
3149                         bnx2x_attn_int_deasserted0(bp,
3150                                         attn.sig[0] & group_mask.sig[0]);
3151
3152                         if ((attn.sig[0] & group_mask.sig[0] &
3153                                                 HW_PRTY_ASSERT_SET_0) ||
3154                             (attn.sig[1] & group_mask.sig[1] &
3155                                                 HW_PRTY_ASSERT_SET_1) ||
3156                             (attn.sig[2] & group_mask.sig[2] &
3157                                                 HW_PRTY_ASSERT_SET_2))
3158                                 BNX2X_ERR("FATAL HW block parity attention\n");
3159                 }
3160         }
3161
3162         bnx2x_release_alr(bp);
3163
3164         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3165
3166         val = ~deasserted;
3167         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3168            val, reg_addr);
3169         REG_WR(bp, reg_addr, val);
3170
3171         if (~bp->attn_state & deasserted)
3172                 BNX2X_ERR("IGU ERROR\n");
3173
3174         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3175                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
3176
3177         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3178         aeu_mask = REG_RD(bp, reg_addr);
3179
3180         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
3181            aeu_mask, deasserted);
3182         aeu_mask |= (deasserted & 0xff);
3183         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3184
3185         REG_WR(bp, reg_addr, aeu_mask);
3186         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3187
3188         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3189         bp->attn_state &= ~deasserted;
3190         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3191 }
3192
3193 static void bnx2x_attn_int(struct bnx2x *bp)
3194 {
3195         /* read local copy of bits */
3196         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3197                                                                 attn_bits);
3198         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3199                                                                 attn_bits_ack);
3200         u32 attn_state = bp->attn_state;
3201
3202         /* look for changed bits */
3203         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
3204         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
3205
3206         DP(NETIF_MSG_HW,
3207            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
3208            attn_bits, attn_ack, asserted, deasserted);
3209
3210         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3211                 BNX2X_ERR("BAD attention state\n");
3212
3213         /* handle bits that were raised */
3214         if (asserted)
3215                 bnx2x_attn_int_asserted(bp, asserted);
3216
3217         if (deasserted)
3218                 bnx2x_attn_int_deasserted(bp, deasserted);
3219 }
3220
3221 static void bnx2x_sp_task(struct work_struct *work)
3222 {
3223         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3224         u16 status;
3225
3226
3227         /* Return here if interrupt is disabled */
3228         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3229                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3230                 return;
3231         }
3232
3233         status = bnx2x_update_dsb_idx(bp);
3234 /*      if (status == 0)                                     */
3235 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
3236
3237         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
3238
3239         /* HW attentions */
3240         if (status & 0x1)
3241                 bnx2x_attn_int(bp);
3242
3243         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3244                      IGU_INT_NOP, 1);
3245         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3246                      IGU_INT_NOP, 1);
3247         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3248                      IGU_INT_NOP, 1);
3249         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3250                      IGU_INT_NOP, 1);
3251         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3252                      IGU_INT_ENABLE, 1);
3253
3254 }
3255
3256 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3257 {
3258         struct net_device *dev = dev_instance;
3259         struct bnx2x *bp = netdev_priv(dev);
3260
3261         /* Return here if interrupt is disabled */
3262         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3263                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3264                 return IRQ_HANDLED;
3265         }
3266
3267         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3268
3269 #ifdef BNX2X_STOP_ON_ERROR
3270         if (unlikely(bp->panic))
3271                 return IRQ_HANDLED;
3272 #endif
3273
3274 #ifdef BCM_CNIC
3275         {
3276                 struct cnic_ops *c_ops;
3277
3278                 rcu_read_lock();
3279                 c_ops = rcu_dereference(bp->cnic_ops);
3280                 if (c_ops)
3281                         c_ops->cnic_handler(bp->cnic_data, NULL);
3282                 rcu_read_unlock();
3283         }
3284 #endif
3285         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3286
3287         return IRQ_HANDLED;
3288 }
3289
3290 /* end of slow path */
3291
3292 /* Statistics */
3293
3294 /****************************************************************************
3295 * Macros
3296 ****************************************************************************/
3297
3298 /* sum[hi:lo] += add[hi:lo] */
3299 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3300         do { \
3301                 s_lo += a_lo; \
3302                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3303         } while (0)
3304
3305 /* difference = minuend - subtrahend */
3306 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3307         do { \
3308                 if (m_lo < s_lo) { \
3309                         /* underflow */ \
3310                         d_hi = m_hi - s_hi; \
3311                         if (d_hi > 0) { \
3312                                 /* we can 'loan' 1 */ \
3313                                 d_hi--; \
3314                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3315                         } else { \
3316                                 /* m_hi <= s_hi */ \
3317                                 d_hi = 0; \
3318                                 d_lo = 0; \
3319                         } \
3320                 } else { \
3321                         /* m_lo >= s_lo */ \
3322                         if (m_hi < s_hi) { \
3323                                 d_hi = 0; \
3324                                 d_lo = 0; \
3325                         } else { \
3326                                 /* m_hi >= s_hi */ \
3327                                 d_hi = m_hi - s_hi; \
3328                                 d_lo = m_lo - s_lo; \
3329                         } \
3330                 } \
3331         } while (0)
3332
3333 #define UPDATE_STAT64(s, t) \
3334         do { \
3335                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3336                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3337                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3338                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3339                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3340                        pstats->mac_stx[1].t##_lo, diff.lo); \
3341         } while (0)
3342
3343 #define UPDATE_STAT64_NIG(s, t) \
3344         do { \
3345                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3346                         diff.lo, new->s##_lo, old->s##_lo); \
3347                 ADD_64(estats->t##_hi, diff.hi, \
3348                        estats->t##_lo, diff.lo); \
3349         } while (0)
3350
3351 /* sum[hi:lo] += add */
3352 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3353         do { \
3354                 s_lo += a; \
3355                 s_hi += (s_lo < a) ? 1 : 0; \
3356         } while (0)
3357
3358 #define UPDATE_EXTEND_STAT(s) \
3359         do { \
3360                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3361                               pstats->mac_stx[1].s##_lo, \
3362                               new->s); \
3363         } while (0)
3364
3365 #define UPDATE_EXTEND_TSTAT(s, t) \
3366         do { \
3367                 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3368                 old_tclient->s = tclient->s; \
3369                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3370         } while (0)
3371
3372 #define UPDATE_EXTEND_USTAT(s, t) \
3373         do { \
3374                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3375                 old_uclient->s = uclient->s; \
3376                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3377         } while (0)
3378
3379 #define UPDATE_EXTEND_XSTAT(s, t) \
3380         do { \
3381                 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3382                 old_xclient->s = xclient->s; \
3383                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3384         } while (0)
3385
3386 /* minuend -= subtrahend */
3387 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3388         do { \
3389                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3390         } while (0)
3391
3392 /* minuend[hi:lo] -= subtrahend */
3393 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3394         do { \
3395                 SUB_64(m_hi, 0, m_lo, s); \
3396         } while (0)
3397
3398 #define SUB_EXTEND_USTAT(s, t) \
3399         do { \
3400                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3401                 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3402         } while (0)
3403
3404 /*
3405  * General service functions
3406  */
3407
3408 static inline long bnx2x_hilo(u32 *hiref)
3409 {
3410         u32 lo = *(hiref + 1);
3411 #if (BITS_PER_LONG == 64)
3412         u32 hi = *hiref;
3413
3414         return HILO_U64(hi, lo);
3415 #else
3416         return lo;
3417 #endif
3418 }
3419
3420 /*
3421  * Init service functions
3422  */
3423
3424 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3425 {
3426         if (!bp->stats_pending) {
3427                 struct eth_query_ramrod_data ramrod_data = {0};
3428                 int i, rc;
3429
3430                 ramrod_data.drv_counter = bp->stats_counter++;
3431                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3432                 for_each_queue(bp, i)
3433                         ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3434
3435                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3436                                    ((u32 *)&ramrod_data)[1],
3437                                    ((u32 *)&ramrod_data)[0], 0);
3438                 if (rc == 0) {
3439                         /* stats ramrod has it's own slot on the spq */
3440                         bp->spq_left++;
3441                         bp->stats_pending = 1;
3442                 }
3443         }
3444 }
3445
3446 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3447 {
3448         struct dmae_command *dmae = &bp->stats_dmae;
3449         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3450
3451         *stats_comp = DMAE_COMP_VAL;
3452         if (CHIP_REV_IS_SLOW(bp))
3453                 return;
3454
3455         /* loader */
3456         if (bp->executer_idx) {
3457                 int loader_idx = PMF_DMAE_C(bp);
3458
3459                 memset(dmae, 0, sizeof(struct dmae_command));
3460
3461                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3462                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3463                                 DMAE_CMD_DST_RESET |
3464 #ifdef __BIG_ENDIAN
3465                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3466 #else
3467                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3468 #endif
3469                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3470                                                DMAE_CMD_PORT_0) |
3471                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3472                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3473                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3474                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3475                                      sizeof(struct dmae_command) *
3476                                      (loader_idx + 1)) >> 2;
3477                 dmae->dst_addr_hi = 0;
3478                 dmae->len = sizeof(struct dmae_command) >> 2;
3479                 if (CHIP_IS_E1(bp))
3480                         dmae->len--;
3481                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3482                 dmae->comp_addr_hi = 0;
3483                 dmae->comp_val = 1;
3484
3485                 *stats_comp = 0;
3486                 bnx2x_post_dmae(bp, dmae, loader_idx);
3487
3488         } else if (bp->func_stx) {
3489                 *stats_comp = 0;
3490                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3491         }
3492 }
3493
3494 static int bnx2x_stats_comp(struct bnx2x *bp)
3495 {
3496         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3497         int cnt = 10;
3498
3499         might_sleep();
3500         while (*stats_comp != DMAE_COMP_VAL) {
3501                 if (!cnt) {
3502                         BNX2X_ERR("timeout waiting for stats finished\n");
3503                         break;
3504                 }
3505                 cnt--;
3506                 msleep(1);
3507         }
3508         return 1;
3509 }
3510
3511 /*
3512  * Statistics service functions
3513  */
3514
3515 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3516 {
3517         struct dmae_command *dmae;
3518         u32 opcode;
3519         int loader_idx = PMF_DMAE_C(bp);
3520         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3521
3522         /* sanity */
3523         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3524                 BNX2X_ERR("BUG!\n");
3525                 return;
3526         }
3527
3528         bp->executer_idx = 0;
3529
3530         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3531                   DMAE_CMD_C_ENABLE |
3532                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3533 #ifdef __BIG_ENDIAN
3534                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3535 #else
3536                   DMAE_CMD_ENDIANITY_DW_SWAP |
3537 #endif
3538                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3539                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3540
3541         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3542         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3543         dmae->src_addr_lo = bp->port.port_stx >> 2;
3544         dmae->src_addr_hi = 0;
3545         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3546         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3547         dmae->len = DMAE_LEN32_RD_MAX;
3548         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3549         dmae->comp_addr_hi = 0;
3550         dmae->comp_val = 1;
3551
3552         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3553         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3554         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3555         dmae->src_addr_hi = 0;
3556         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3557                                    DMAE_LEN32_RD_MAX * 4);
3558         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3559                                    DMAE_LEN32_RD_MAX * 4);
3560         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3561         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3562         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3563         dmae->comp_val = DMAE_COMP_VAL;
3564
3565         *stats_comp = 0;
3566         bnx2x_hw_stats_post(bp);
3567         bnx2x_stats_comp(bp);
3568 }
3569
3570 static void bnx2x_port_stats_init(struct bnx2x *bp)
3571 {
3572         struct dmae_command *dmae;
3573         int port = BP_PORT(bp);
3574         int vn = BP_E1HVN(bp);
3575         u32 opcode;
3576         int loader_idx = PMF_DMAE_C(bp);
3577         u32 mac_addr;
3578         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3579
3580         /* sanity */
3581         if (!bp->link_vars.link_up || !bp->port.pmf) {
3582                 BNX2X_ERR("BUG!\n");
3583                 return;
3584         }
3585
3586         bp->executer_idx = 0;
3587
3588         /* MCP */
3589         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3590                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3591                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3592 #ifdef __BIG_ENDIAN
3593                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3594 #else
3595                   DMAE_CMD_ENDIANITY_DW_SWAP |
3596 #endif
3597                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3598                   (vn << DMAE_CMD_E1HVN_SHIFT));
3599
3600         if (bp->port.port_stx) {
3601
3602                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3603                 dmae->opcode = opcode;
3604                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3605                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3606                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3607                 dmae->dst_addr_hi = 0;
3608                 dmae->len = sizeof(struct host_port_stats) >> 2;
3609                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3610                 dmae->comp_addr_hi = 0;
3611                 dmae->comp_val = 1;
3612         }
3613
3614         if (bp->func_stx) {
3615
3616                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3617                 dmae->opcode = opcode;
3618                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3619                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3620                 dmae->dst_addr_lo = bp->func_stx >> 2;
3621                 dmae->dst_addr_hi = 0;
3622                 dmae->len = sizeof(struct host_func_stats) >> 2;
3623                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3624                 dmae->comp_addr_hi = 0;
3625                 dmae->comp_val = 1;
3626         }
3627
3628         /* MAC */
3629         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3630                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3631                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3632 #ifdef __BIG_ENDIAN
3633                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3634 #else
3635                   DMAE_CMD_ENDIANITY_DW_SWAP |
3636 #endif
3637                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3638                   (vn << DMAE_CMD_E1HVN_SHIFT));
3639
3640         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3641
3642                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3643                                    NIG_REG_INGRESS_BMAC0_MEM);
3644
3645                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3646                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3647                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3648                 dmae->opcode = opcode;
3649                 dmae->src_addr_lo = (mac_addr +
3650                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3651                 dmae->src_addr_hi = 0;
3652                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3653                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3654                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3655                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3656                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3657                 dmae->comp_addr_hi = 0;
3658                 dmae->comp_val = 1;
3659
3660                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3661                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3662                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3663                 dmae->opcode = opcode;
3664                 dmae->src_addr_lo = (mac_addr +
3665                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3666                 dmae->src_addr_hi = 0;
3667                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3668                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3669                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3670                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3671                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3672                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3673                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3674                 dmae->comp_addr_hi = 0;
3675                 dmae->comp_val = 1;
3676
3677         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3678
3679                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3680
3681                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3682                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3683                 dmae->opcode = opcode;
3684                 dmae->src_addr_lo = (mac_addr +
3685                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3686                 dmae->src_addr_hi = 0;
3687                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3688                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3689                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3690                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3691                 dmae->comp_addr_hi = 0;
3692                 dmae->comp_val = 1;
3693
3694                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3695                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3696                 dmae->opcode = opcode;
3697                 dmae->src_addr_lo = (mac_addr +
3698                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3699                 dmae->src_addr_hi = 0;
3700                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3701                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3702                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3703                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3704                 dmae->len = 1;
3705                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3706                 dmae->comp_addr_hi = 0;
3707                 dmae->comp_val = 1;
3708
3709                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3710                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3711                 dmae->opcode = opcode;
3712                 dmae->src_addr_lo = (mac_addr +
3713                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3714                 dmae->src_addr_hi = 0;
3715                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3716                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3717                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3718                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3719                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3720                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3721                 dmae->comp_addr_hi = 0;
3722                 dmae->comp_val = 1;
3723         }
3724
3725         /* NIG */
3726         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3727         dmae->opcode = opcode;
3728         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3729                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3730         dmae->src_addr_hi = 0;
3731         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3732         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3733         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3734         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3735         dmae->comp_addr_hi = 0;
3736         dmae->comp_val = 1;
3737
3738         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3739         dmae->opcode = opcode;
3740         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3741                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3742         dmae->src_addr_hi = 0;
3743         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3744                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3745         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3746                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3747         dmae->len = (2*sizeof(u32)) >> 2;
3748         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3749         dmae->comp_addr_hi = 0;
3750         dmae->comp_val = 1;
3751
3752         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3753         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3754                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3755                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3756 #ifdef __BIG_ENDIAN
3757                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3758 #else
3759                         DMAE_CMD_ENDIANITY_DW_SWAP |
3760 #endif
3761                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3762                         (vn << DMAE_CMD_E1HVN_SHIFT));
3763         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3764                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3765         dmae->src_addr_hi = 0;
3766         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3767                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3768         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3769                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3770         dmae->len = (2*sizeof(u32)) >> 2;
3771         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3772         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3773         dmae->comp_val = DMAE_COMP_VAL;
3774
3775         *stats_comp = 0;
3776 }
3777
3778 static void bnx2x_func_stats_init(struct bnx2x *bp)
3779 {
3780         struct dmae_command *dmae = &bp->stats_dmae;
3781         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3782
3783         /* sanity */
3784         if (!bp->func_stx) {
3785                 BNX2X_ERR("BUG!\n");
3786                 return;
3787         }
3788
3789         bp->executer_idx = 0;
3790         memset(dmae, 0, sizeof(struct dmae_command));
3791
3792         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3793                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3794                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3795 #ifdef __BIG_ENDIAN
3796                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3797 #else
3798                         DMAE_CMD_ENDIANITY_DW_SWAP |
3799 #endif
3800                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3801                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3802         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3803         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3804         dmae->dst_addr_lo = bp->func_stx >> 2;
3805         dmae->dst_addr_hi = 0;
3806         dmae->len = sizeof(struct host_func_stats) >> 2;
3807         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3808         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3809         dmae->comp_val = DMAE_COMP_VAL;
3810
3811         *stats_comp = 0;
3812 }
3813
3814 static void bnx2x_stats_start(struct bnx2x *bp)
3815 {
3816         if (bp->port.pmf)
3817                 bnx2x_port_stats_init(bp);
3818
3819         else if (bp->func_stx)
3820                 bnx2x_func_stats_init(bp);
3821
3822         bnx2x_hw_stats_post(bp);
3823         bnx2x_storm_stats_post(bp);
3824 }
3825
3826 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3827 {
3828         bnx2x_stats_comp(bp);
3829         bnx2x_stats_pmf_update(bp);
3830         bnx2x_stats_start(bp);
3831 }
3832
3833 static void bnx2x_stats_restart(struct bnx2x *bp)
3834 {
3835         bnx2x_stats_comp(bp);
3836         bnx2x_stats_start(bp);
3837 }
3838
3839 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3840 {
3841         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3842         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3843         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3844         struct {
3845                 u32 lo;
3846                 u32 hi;
3847         } diff;
3848
3849         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3850         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3851         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3852         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3853         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3854         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3855         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3856         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3857         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3858         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3859         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3860         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3861         UPDATE_STAT64(tx_stat_gt127,
3862                                 tx_stat_etherstatspkts65octetsto127octets);
3863         UPDATE_STAT64(tx_stat_gt255,
3864                                 tx_stat_etherstatspkts128octetsto255octets);
3865         UPDATE_STAT64(tx_stat_gt511,
3866                                 tx_stat_etherstatspkts256octetsto511octets);
3867         UPDATE_STAT64(tx_stat_gt1023,
3868                                 tx_stat_etherstatspkts512octetsto1023octets);
3869         UPDATE_STAT64(tx_stat_gt1518,
3870                                 tx_stat_etherstatspkts1024octetsto1522octets);
3871         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3872         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3873         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3874         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3875         UPDATE_STAT64(tx_stat_gterr,
3876                                 tx_stat_dot3statsinternalmactransmiterrors);
3877         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3878
3879         estats->pause_frames_received_hi =
3880                                 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3881         estats->pause_frames_received_lo =
3882                                 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3883
3884         estats->pause_frames_sent_hi =
3885                                 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3886         estats->pause_frames_sent_lo =
3887                                 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3888 }
3889
3890 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3891 {
3892         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3893         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3894         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3895
3896         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3897         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3898         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3899         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3900         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3901         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3902         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3903         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3904         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3905         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3906         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3907         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3908         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3909         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3910         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3911         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3912         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3913         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3914         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3915         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3916         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3917         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3918         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3919         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3920         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3921         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3922         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3923         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3924         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3925         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3926         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3927
3928         estats->pause_frames_received_hi =
3929                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3930         estats->pause_frames_received_lo =
3931                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3932         ADD_64(estats->pause_frames_received_hi,
3933                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3934                estats->pause_frames_received_lo,
3935                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3936
3937         estats->pause_frames_sent_hi =
3938                         pstats->mac_stx[1].tx_stat_outxonsent_hi;
3939         estats->pause_frames_sent_lo =
3940                         pstats->mac_stx[1].tx_stat_outxonsent_lo;
3941         ADD_64(estats->pause_frames_sent_hi,
3942                pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3943                estats->pause_frames_sent_lo,
3944                pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3945 }
3946
3947 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3948 {
3949         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3950         struct nig_stats *old = &(bp->port.old_nig_stats);
3951         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3952         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3953         struct {
3954                 u32 lo;
3955                 u32 hi;
3956         } diff;
3957         u32 nig_timer_max;
3958
3959         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3960                 bnx2x_bmac_stats_update(bp);
3961
3962         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3963                 bnx2x_emac_stats_update(bp);
3964
3965         else { /* unreached */
3966                 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3967                 return -1;
3968         }
3969
3970         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3971                       new->brb_discard - old->brb_discard);
3972         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3973                       new->brb_truncate - old->brb_truncate);
3974
3975         UPDATE_STAT64_NIG(egress_mac_pkt0,
3976                                         etherstatspkts1024octetsto1522octets);
3977         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3978
3979         memcpy(old, new, sizeof(struct nig_stats));
3980
3981         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3982                sizeof(struct mac_stx));
3983         estats->brb_drop_hi = pstats->brb_drop_hi;
3984         estats->brb_drop_lo = pstats->brb_drop_lo;
3985
3986         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3987
3988         nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3989         if (nig_timer_max != estats->nig_timer_max) {
3990                 estats->nig_timer_max = nig_timer_max;
3991                 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3992         }
3993
3994         return 0;
3995 }
3996
3997 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3998 {
3999         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
4000         struct tstorm_per_port_stats *tport =
4001                                         &stats->tstorm_common.port_statistics;
4002         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
4003         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4004         int i;
4005
4006         memcpy(&(fstats->total_bytes_received_hi),
4007                &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
4008                sizeof(struct host_func_stats) - 2*sizeof(u32));
4009         estats->error_bytes_received_hi = 0;
4010         estats->error_bytes_received_lo = 0;
4011         estats->etherstatsoverrsizepkts_hi = 0;
4012         estats->etherstatsoverrsizepkts_lo = 0;
4013         estats->no_buff_discard_hi = 0;
4014         estats->no_buff_discard_lo = 0;
4015
4016         for_each_rx_queue(bp, i) {
4017                 struct bnx2x_fastpath *fp = &bp->fp[i];
4018                 int cl_id = fp->cl_id;
4019                 struct tstorm_per_client_stats *tclient =
4020                                 &stats->tstorm_common.client_statistics[cl_id];
4021                 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4022                 struct ustorm_per_client_stats *uclient =
4023                                 &stats->ustorm_common.client_statistics[cl_id];
4024                 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4025                 struct xstorm_per_client_stats *xclient =
4026                                 &stats->xstorm_common.client_statistics[cl_id];
4027                 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4028                 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4029                 u32 diff;
4030
4031                 /* are storm stats valid? */
4032                 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4033                                                         bp->stats_counter) {
4034                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4035                            "  xstorm counter (%d) != stats_counter (%d)\n",
4036                            i, xclient->stats_counter, bp->stats_counter);
4037                         return -1;
4038                 }
4039                 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4040                                                         bp->stats_counter) {
4041                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4042                            "  tstorm counter (%d) != stats_counter (%d)\n",
4043                            i, tclient->stats_counter, bp->stats_counter);
4044                         return -2;
4045                 }
4046                 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4047                                                         bp->stats_counter) {
4048                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4049                            "  ustorm counter (%d) != stats_counter (%d)\n",
4050                            i, uclient->stats_counter, bp->stats_counter);
4051                         return -4;
4052                 }
4053
4054                 qstats->total_bytes_received_hi =
4055                         le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4056                 qstats->total_bytes_received_lo =
4057                         le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4058
4059                 ADD_64(qstats->total_bytes_received_hi,
4060                        le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4061                        qstats->total_bytes_received_lo,
4062                        le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4063
4064                 ADD_64(qstats->total_bytes_received_hi,
4065                        le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4066                        qstats->total_bytes_received_lo,
4067                        le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4068
4069                 qstats->valid_bytes_received_hi =
4070                                         qstats->total_bytes_received_hi;
4071                 qstats->valid_bytes_received_lo =
4072                                         qstats->total_bytes_received_lo;
4073
4074                 qstats->error_bytes_received_hi =
4075                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
4076                 qstats->error_bytes_received_lo =
4077                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
4078
4079                 ADD_64(qstats->total_bytes_received_hi,
4080                        qstats->error_bytes_received_hi,
4081                        qstats->total_bytes_received_lo,
4082                        qstats->error_bytes_received_lo);
4083
4084                 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4085                                         total_unicast_packets_received);
4086                 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4087                                         total_multicast_packets_received);
4088                 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4089                                         total_broadcast_packets_received);
4090                 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4091                                         etherstatsoverrsizepkts);
4092                 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4093
4094                 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4095                                         total_unicast_packets_received);
4096                 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4097                                         total_multicast_packets_received);
4098                 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4099                                         total_broadcast_packets_received);
4100                 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4101                 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4102                 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4103
4104                 qstats->total_bytes_transmitted_hi =
4105                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4106                 qstats->total_bytes_transmitted_lo =
4107                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4108
4109                 ADD_64(qstats->total_bytes_transmitted_hi,
4110                        le32_to_cpu(xclient->multicast_bytes_sent.hi),
4111                        qstats->total_bytes_transmitted_lo,
4112                        le32_to_cpu(xclient->multicast_bytes_sent.lo));
4113
4114                 ADD_64(qstats->total_bytes_transmitted_hi,
4115                        le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4116                        qstats->total_bytes_transmitted_lo,
4117                        le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4118
4119                 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4120                                         total_unicast_packets_transmitted);
4121                 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4122                                         total_multicast_packets_transmitted);
4123                 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4124                                         total_broadcast_packets_transmitted);
4125
4126                 old_tclient->checksum_discard = tclient->checksum_discard;
4127                 old_tclient->ttl0_discard = tclient->ttl0_discard;
4128
4129                 ADD_64(fstats->total_bytes_received_hi,
4130                        qstats->total_bytes_received_hi,
4131                        fstats->total_bytes_received_lo,
4132                        qstats->total_bytes_received_lo);
4133                 ADD_64(fstats->total_bytes_transmitted_hi,
4134                        qstats->total_bytes_transmitted_hi,
4135                        fstats->total_bytes_transmitted_lo,
4136                        qstats->total_bytes_transmitted_lo);
4137                 ADD_64(fstats->total_unicast_packets_received_hi,
4138                        qstats->total_unicast_packets_received_hi,
4139                        fstats->total_unicast_packets_received_lo,
4140                        qstats->total_unicast_packets_received_lo);
4141                 ADD_64(fstats->total_multicast_packets_received_hi,
4142                        qstats->total_multicast_packets_received_hi,
4143                        fstats->total_multicast_packets_received_lo,
4144                        qstats->total_multicast_packets_received_lo);
4145                 ADD_64(fstats->total_broadcast_packets_received_hi,
4146                        qstats->total_broadcast_packets_received_hi,
4147                        fstats->total_broadcast_packets_received_lo,
4148                        qstats->total_broadcast_packets_received_lo);
4149                 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4150                        qstats->total_unicast_packets_transmitted_hi,
4151                        fstats->total_unicast_packets_transmitted_lo,
4152                        qstats->total_unicast_packets_transmitted_lo);
4153                 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4154                        qstats->total_multicast_packets_transmitted_hi,
4155                        fstats->total_multicast_packets_transmitted_lo,
4156                        qstats->total_multicast_packets_transmitted_lo);
4157                 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4158                        qstats->total_broadcast_packets_transmitted_hi,
4159                        fstats->total_broadcast_packets_transmitted_lo,
4160                        qstats->total_broadcast_packets_transmitted_lo);
4161                 ADD_64(fstats->valid_bytes_received_hi,
4162                        qstats->valid_bytes_received_hi,
4163                        fstats->valid_bytes_received_lo,
4164                        qstats->valid_bytes_received_lo);
4165
4166                 ADD_64(estats->error_bytes_received_hi,
4167                        qstats->error_bytes_received_hi,
4168                        estats->error_bytes_received_lo,
4169                        qstats->error_bytes_received_lo);
4170                 ADD_64(estats->etherstatsoverrsizepkts_hi,
4171                        qstats->etherstatsoverrsizepkts_hi,
4172                        estats->etherstatsoverrsizepkts_lo,
4173                        qstats->etherstatsoverrsizepkts_lo);
4174                 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4175                        estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4176         }
4177
4178         ADD_64(fstats->total_bytes_received_hi,
4179                estats->rx_stat_ifhcinbadoctets_hi,
4180                fstats->total_bytes_received_lo,
4181                estats->rx_stat_ifhcinbadoctets_lo);
4182
4183         memcpy(estats, &(fstats->total_bytes_received_hi),
4184                sizeof(struct host_func_stats) - 2*sizeof(u32));
4185
4186         ADD_64(estats->etherstatsoverrsizepkts_hi,
4187                estats->rx_stat_dot3statsframestoolong_hi,
4188                estats->etherstatsoverrsizepkts_lo,
4189                estats->rx_stat_dot3statsframestoolong_lo);
4190         ADD_64(estats->error_bytes_received_hi,
4191                estats->rx_stat_ifhcinbadoctets_hi,
4192                estats->error_bytes_received_lo,
4193                estats->rx_stat_ifhcinbadoctets_lo);
4194
4195         if (bp->port.pmf) {
4196                 estats->mac_filter_discard =
4197                                 le32_to_cpu(tport->mac_filter_discard);
4198                 estats->xxoverflow_discard =
4199                                 le32_to_cpu(tport->xxoverflow_discard);
4200                 estats->brb_truncate_discard =
4201                                 le32_to_cpu(tport->brb_truncate_discard);
4202                 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4203         }
4204
4205         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4206
4207         bp->stats_pending = 0;
4208
4209         return 0;
4210 }
4211
4212 static void bnx2x_net_stats_update(struct bnx2x *bp)
4213 {
4214         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4215         struct net_device_stats *nstats = &bp->dev->stats;
4216         int i;
4217
4218         nstats->rx_packets =
4219                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4220                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4221                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4222
4223         nstats->tx_packets =
4224                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4225                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4226                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4227
4228         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4229
4230         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4231
4232         nstats->rx_dropped = estats->mac_discard;
4233         for_each_rx_queue(bp, i)
4234                 nstats->rx_dropped +=
4235                         le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4236
4237         nstats->tx_dropped = 0;
4238
4239         nstats->multicast =
4240                 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4241
4242         nstats->collisions =
4243                 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4244
4245         nstats->rx_length_errors =
4246                 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4247                 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4248         nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4249                                  bnx2x_hilo(&estats->brb_truncate_hi);
4250         nstats->rx_crc_errors =
4251                 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4252         nstats->rx_frame_errors =
4253                 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4254         nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4255         nstats->rx_missed_errors = estats->xxoverflow_discard;
4256
4257         nstats->rx_errors = nstats->rx_length_errors +
4258                             nstats->rx_over_errors +
4259                             nstats->rx_crc_errors +
4260                             nstats->rx_frame_errors +
4261                             nstats->rx_fifo_errors +
4262                             nstats->rx_missed_errors;
4263
4264         nstats->tx_aborted_errors =
4265                 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4266                 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4267         nstats->tx_carrier_errors =
4268                 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4269         nstats->tx_fifo_errors = 0;
4270         nstats->tx_heartbeat_errors = 0;
4271         nstats->tx_window_errors = 0;
4272
4273         nstats->tx_errors = nstats->tx_aborted_errors +
4274                             nstats->tx_carrier_errors +
4275             bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4276 }
4277
4278 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4279 {
4280         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4281         int i;
4282
4283         estats->driver_xoff = 0;
4284         estats->rx_err_discard_pkt = 0;
4285         estats->rx_skb_alloc_failed = 0;
4286         estats->hw_csum_err = 0;
4287         for_each_rx_queue(bp, i) {
4288                 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4289
4290                 estats->driver_xoff += qstats->driver_xoff;
4291                 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4292                 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4293                 estats->hw_csum_err += qstats->hw_csum_err;
4294         }
4295 }
4296
4297 static void bnx2x_stats_update(struct bnx2x *bp)
4298 {
4299         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4300
4301         if (*stats_comp != DMAE_COMP_VAL)
4302                 return;
4303
4304         if (bp->port.pmf)
4305                 bnx2x_hw_stats_update(bp);
4306
4307         if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4308                 BNX2X_ERR("storm stats were not updated for 3 times\n");
4309                 bnx2x_panic();
4310                 return;
4311         }
4312
4313         bnx2x_net_stats_update(bp);
4314         bnx2x_drv_stats_update(bp);
4315
4316         if (bp->msglevel & NETIF_MSG_TIMER) {
4317                 struct bnx2x_fastpath *fp0_rx = bp->fp;
4318                 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
4319                 struct tstorm_per_client_stats *old_tclient =
4320                                                         &bp->fp->old_tclient;
4321                 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4322                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4323                 struct net_device_stats *nstats = &bp->dev->stats;
4324                 int i;
4325
4326                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4327                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
4328                                   "  tx pkt (%lx)\n",
4329                        bnx2x_tx_avail(fp0_tx),
4330                        le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
4331                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
4332                                   "  rx pkt (%lx)\n",
4333                        (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4334                              fp0_rx->rx_comp_cons),
4335                        le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
4336                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u  "
4337                                   "brb truncate %u\n",
4338                        (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4339                        qstats->driver_xoff,
4340                        estats->brb_drop_lo, estats->brb_truncate_lo);
4341                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
4342                         "packets_too_big_discard %lu  no_buff_discard %lu  "
4343                         "mac_discard %u  mac_filter_discard %u  "
4344                         "xxovrflow_discard %u  brb_truncate_discard %u  "
4345                         "ttl0_discard %u\n",
4346                        le32_to_cpu(old_tclient->checksum_discard),
4347                        bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4348                        bnx2x_hilo(&qstats->no_buff_discard_hi),
4349                        estats->mac_discard, estats->mac_filter_discard,
4350                        estats->xxoverflow_discard, estats->brb_truncate_discard,
4351                        le32_to_cpu(old_tclient->ttl0_discard));
4352
4353                 for_each_queue(bp, i) {
4354                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4355                                bnx2x_fp(bp, i, tx_pkt),
4356                                bnx2x_fp(bp, i, rx_pkt),
4357                                bnx2x_fp(bp, i, rx_calls));
4358                 }
4359         }
4360
4361         bnx2x_hw_stats_post(bp);
4362         bnx2x_storm_stats_post(bp);
4363 }
4364
4365 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4366 {
4367         struct dmae_command *dmae;
4368         u32 opcode;
4369         int loader_idx = PMF_DMAE_C(bp);
4370         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4371
4372         bp->executer_idx = 0;
4373
4374         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4375                   DMAE_CMD_C_ENABLE |
4376                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4377 #ifdef __BIG_ENDIAN
4378                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4379 #else
4380                   DMAE_CMD_ENDIANITY_DW_SWAP |
4381 #endif
4382                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4383                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4384
4385         if (bp->port.port_stx) {
4386
4387                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4388                 if (bp->func_stx)
4389                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4390                 else
4391                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4392                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4393                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4394                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4395                 dmae->dst_addr_hi = 0;
4396                 dmae->len = sizeof(struct host_port_stats) >> 2;
4397                 if (bp->func_stx) {
4398                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4399                         dmae->comp_addr_hi = 0;
4400                         dmae->comp_val = 1;
4401                 } else {
4402                         dmae->comp_addr_lo =
4403                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4404                         dmae->comp_addr_hi =
4405                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4406                         dmae->comp_val = DMAE_COMP_VAL;
4407
4408                         *stats_comp = 0;
4409                 }
4410         }
4411
4412         if (bp->func_stx) {
4413
4414                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4415                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4416                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4417                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4418                 dmae->dst_addr_lo = bp->func_stx >> 2;
4419                 dmae->dst_addr_hi = 0;
4420                 dmae->len = sizeof(struct host_func_stats) >> 2;
4421                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4422                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4423                 dmae->comp_val = DMAE_COMP_VAL;
4424
4425                 *stats_comp = 0;
4426         }
4427 }
4428
4429 static void bnx2x_stats_stop(struct bnx2x *bp)
4430 {
4431         int update = 0;
4432
4433         bnx2x_stats_comp(bp);
4434
4435         if (bp->port.pmf)
4436                 update = (bnx2x_hw_stats_update(bp) == 0);
4437
4438         update |= (bnx2x_storm_stats_update(bp) == 0);
4439
4440         if (update) {
4441                 bnx2x_net_stats_update(bp);
4442
4443                 if (bp->port.pmf)
4444                         bnx2x_port_stats_stop(bp);
4445
4446                 bnx2x_hw_stats_post(bp);
4447                 bnx2x_stats_comp(bp);
4448         }
4449 }
4450
4451 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4452 {
4453 }
4454
4455 static const struct {
4456         void (*action)(struct bnx2x *bp);
4457         enum bnx2x_stats_state next_state;
4458 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4459 /* state        event   */
4460 {
4461 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4462 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
4463 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4464 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4465 },
4466 {
4467 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
4468 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
4469 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
4470 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
4471 }
4472 };
4473
4474 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4475 {
4476         enum bnx2x_stats_state state = bp->stats_state;
4477
4478         bnx2x_stats_stm[state][event].action(bp);
4479         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4480
4481         /* Make sure the state has been "changed" */
4482         smp_wmb();
4483
4484         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4485                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4486                    state, event, bp->stats_state);
4487 }
4488
4489 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4490 {
4491         struct dmae_command *dmae;
4492         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4493
4494         /* sanity */
4495         if (!bp->port.pmf || !bp->port.port_stx) {
4496                 BNX2X_ERR("BUG!\n");
4497                 return;
4498         }
4499
4500         bp->executer_idx = 0;
4501
4502         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4503         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4504                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4505                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4506 #ifdef __BIG_ENDIAN
4507                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4508 #else
4509                         DMAE_CMD_ENDIANITY_DW_SWAP |
4510 #endif
4511                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4512                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4513         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4514         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4515         dmae->dst_addr_lo = bp->port.port_stx >> 2;
4516         dmae->dst_addr_hi = 0;
4517         dmae->len = sizeof(struct host_port_stats) >> 2;
4518         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4519         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4520         dmae->comp_val = DMAE_COMP_VAL;
4521
4522         *stats_comp = 0;
4523         bnx2x_hw_stats_post(bp);
4524         bnx2x_stats_comp(bp);
4525 }
4526
4527 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4528 {
4529         int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4530         int port = BP_PORT(bp);
4531         int func;
4532         u32 func_stx;
4533
4534         /* sanity */
4535         if (!bp->port.pmf || !bp->func_stx) {
4536                 BNX2X_ERR("BUG!\n");
4537                 return;
4538         }
4539
4540         /* save our func_stx */
4541         func_stx = bp->func_stx;
4542
4543         for (vn = VN_0; vn < vn_max; vn++) {
4544                 func = 2*vn + port;
4545
4546                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4547                 bnx2x_func_stats_init(bp);
4548                 bnx2x_hw_stats_post(bp);
4549                 bnx2x_stats_comp(bp);
4550         }
4551
4552         /* restore our func_stx */
4553         bp->func_stx = func_stx;
4554 }
4555
4556 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4557 {
4558         struct dmae_command *dmae = &bp->stats_dmae;
4559         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4560
4561         /* sanity */
4562         if (!bp->func_stx) {
4563                 BNX2X_ERR("BUG!\n");
4564                 return;
4565         }
4566
4567         bp->executer_idx = 0;
4568         memset(dmae, 0, sizeof(struct dmae_command));
4569
4570         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4571                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4572                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4573 #ifdef __BIG_ENDIAN
4574                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4575 #else
4576                         DMAE_CMD_ENDIANITY_DW_SWAP |
4577 #endif
4578                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4579                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4580         dmae->src_addr_lo = bp->func_stx >> 2;
4581         dmae->src_addr_hi = 0;
4582         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4583         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4584         dmae->len = sizeof(struct host_func_stats) >> 2;
4585         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4586         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4587         dmae->comp_val = DMAE_COMP_VAL;
4588
4589         *stats_comp = 0;
4590         bnx2x_hw_stats_post(bp);
4591         bnx2x_stats_comp(bp);
4592 }
4593
4594 static void bnx2x_stats_init(struct bnx2x *bp)
4595 {
4596         int port = BP_PORT(bp);
4597         int func = BP_FUNC(bp);
4598         int i;
4599
4600         bp->stats_pending = 0;
4601         bp->executer_idx = 0;
4602         bp->stats_counter = 0;
4603
4604         /* port and func stats for management */
4605         if (!BP_NOMCP(bp)) {
4606                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4607                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4608
4609         } else {
4610                 bp->port.port_stx = 0;
4611                 bp->func_stx = 0;
4612         }
4613         DP(BNX2X_MSG_STATS, "port_stx 0x%x  func_stx 0x%x\n",
4614            bp->port.port_stx, bp->func_stx);
4615
4616         /* port stats */
4617         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4618         bp->port.old_nig_stats.brb_discard =
4619                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4620         bp->port.old_nig_stats.brb_truncate =
4621                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4622         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4623                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4624         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4625                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4626
4627         /* function stats */
4628         for_each_queue(bp, i) {
4629                 struct bnx2x_fastpath *fp = &bp->fp[i];
4630
4631                 memset(&fp->old_tclient, 0,
4632                        sizeof(struct tstorm_per_client_stats));
4633                 memset(&fp->old_uclient, 0,
4634                        sizeof(struct ustorm_per_client_stats));
4635                 memset(&fp->old_xclient, 0,
4636                        sizeof(struct xstorm_per_client_stats));
4637                 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4638         }
4639
4640         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4641         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4642
4643         bp->stats_state = STATS_STATE_DISABLED;
4644
4645         if (bp->port.pmf) {
4646                 if (bp->port.port_stx)
4647                         bnx2x_port_stats_base_init(bp);
4648
4649                 if (bp->func_stx)
4650                         bnx2x_func_stats_base_init(bp);
4651
4652         } else if (bp->func_stx)
4653                 bnx2x_func_stats_base_update(bp);
4654 }
4655
4656 static void bnx2x_timer(unsigned long data)
4657 {
4658         struct bnx2x *bp = (struct bnx2x *) data;
4659
4660         if (!netif_running(bp->dev))
4661                 return;
4662
4663         if (atomic_read(&bp->intr_sem) != 0)
4664                 goto timer_restart;
4665
4666         if (poll) {
4667                 struct bnx2x_fastpath *fp = &bp->fp[0];
4668                 int rc;
4669
4670                 bnx2x_tx_int(fp);
4671                 rc = bnx2x_rx_int(fp, 1000);
4672         }
4673
4674         if (!BP_NOMCP(bp)) {
4675                 int func = BP_FUNC(bp);
4676                 u32 drv_pulse;
4677                 u32 mcp_pulse;
4678
4679                 ++bp->fw_drv_pulse_wr_seq;
4680                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4681                 /* TBD - add SYSTEM_TIME */
4682                 drv_pulse = bp->fw_drv_pulse_wr_seq;
4683                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4684
4685                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4686                              MCP_PULSE_SEQ_MASK);
4687                 /* The delta between driver pulse and mcp response
4688                  * should be 1 (before mcp response) or 0 (after mcp response)
4689                  */
4690                 if ((drv_pulse != mcp_pulse) &&
4691                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4692                         /* someone lost a heartbeat... */
4693                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4694                                   drv_pulse, mcp_pulse);
4695                 }
4696         }
4697
4698         if ((bp->state == BNX2X_STATE_OPEN) ||
4699             (bp->state == BNX2X_STATE_DISABLED))
4700                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4701
4702 timer_restart:
4703         mod_timer(&bp->timer, jiffies + bp->current_interval);
4704 }
4705
4706 /* end of Statistics */
4707
4708 /* nic init */
4709
4710 /*
4711  * nic init service functions
4712  */
4713
4714 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4715 {
4716         int port = BP_PORT(bp);
4717
4718         /* "CSTORM" */
4719         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4720                         CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4721                         CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4722         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4723                         CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4724                         CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
4725 }
4726
4727 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4728                           dma_addr_t mapping, int sb_id)
4729 {
4730         int port = BP_PORT(bp);
4731         int func = BP_FUNC(bp);
4732         int index;
4733         u64 section;
4734
4735         /* USTORM */
4736         section = ((u64)mapping) + offsetof(struct host_status_block,
4737                                             u_status_block);
4738         sb->u_status_block.status_block_id = sb_id;
4739
4740         REG_WR(bp, BAR_CSTRORM_INTMEM +
4741                CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4742         REG_WR(bp, BAR_CSTRORM_INTMEM +
4743                ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
4744                U64_HI(section));
4745         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4746                 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
4747
4748         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4749                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4750                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
4751
4752         /* CSTORM */
4753         section = ((u64)mapping) + offsetof(struct host_status_block,
4754                                             c_status_block);
4755         sb->c_status_block.status_block_id = sb_id;
4756
4757         REG_WR(bp, BAR_CSTRORM_INTMEM +
4758                CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
4759         REG_WR(bp, BAR_CSTRORM_INTMEM +
4760                ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
4761                U64_HI(section));
4762         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4763                 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
4764
4765         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4766                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4767                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
4768
4769         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4770 }
4771
4772 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4773 {
4774         int func = BP_FUNC(bp);
4775
4776         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
4777                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4778                         sizeof(struct tstorm_def_status_block)/4);
4779         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4780                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4781                         sizeof(struct cstorm_def_status_block_u)/4);
4782         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4783                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4784                         sizeof(struct cstorm_def_status_block_c)/4);
4785         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
4786                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4787                         sizeof(struct xstorm_def_status_block)/4);
4788 }
4789
4790 static void bnx2x_init_def_sb(struct bnx2x *bp,
4791                               struct host_def_status_block *def_sb,
4792                               dma_addr_t mapping, int sb_id)
4793 {
4794         int port = BP_PORT(bp);
4795         int func = BP_FUNC(bp);
4796         int index, val, reg_offset;
4797         u64 section;
4798
4799         /* ATTN */
4800         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4801                                             atten_status_block);
4802         def_sb->atten_status_block.status_block_id = sb_id;
4803
4804         bp->attn_state = 0;
4805
4806         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4807                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4808
4809         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4810                 bp->attn_group[index].sig[0] = REG_RD(bp,
4811                                                      reg_offset + 0x10*index);
4812                 bp->attn_group[index].sig[1] = REG_RD(bp,
4813                                                reg_offset + 0x4 + 0x10*index);
4814                 bp->attn_group[index].sig[2] = REG_RD(bp,
4815                                                reg_offset + 0x8 + 0x10*index);
4816                 bp->attn_group[index].sig[3] = REG_RD(bp,
4817                                                reg_offset + 0xc + 0x10*index);
4818         }
4819
4820         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4821                              HC_REG_ATTN_MSG0_ADDR_L);
4822
4823         REG_WR(bp, reg_offset, U64_LO(section));
4824         REG_WR(bp, reg_offset + 4, U64_HI(section));
4825
4826         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4827
4828         val = REG_RD(bp, reg_offset);
4829         val |= sb_id;
4830         REG_WR(bp, reg_offset, val);
4831
4832         /* USTORM */
4833         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4834                                             u_def_status_block);
4835         def_sb->u_def_status_block.status_block_id = sb_id;
4836
4837         REG_WR(bp, BAR_CSTRORM_INTMEM +
4838                CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4839         REG_WR(bp, BAR_CSTRORM_INTMEM +
4840                ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
4841                U64_HI(section));
4842         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4843                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
4844
4845         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4846                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4847                          CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
4848
4849         /* CSTORM */
4850         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4851                                             c_def_status_block);
4852         def_sb->c_def_status_block.status_block_id = sb_id;
4853
4854         REG_WR(bp, BAR_CSTRORM_INTMEM +
4855                CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
4856         REG_WR(bp, BAR_CSTRORM_INTMEM +
4857                ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
4858                U64_HI(section));
4859         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4860                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
4861
4862         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4863                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4864                          CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
4865
4866         /* TSTORM */
4867         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4868                                             t_def_status_block);
4869         def_sb->t_def_status_block.status_block_id = sb_id;
4870
4871         REG_WR(bp, BAR_TSTRORM_INTMEM +
4872                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4873         REG_WR(bp, BAR_TSTRORM_INTMEM +
4874                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4875                U64_HI(section));
4876         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4877                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4878
4879         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4880                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4881                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4882
4883         /* XSTORM */
4884         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4885                                             x_def_status_block);
4886         def_sb->x_def_status_block.status_block_id = sb_id;
4887
4888         REG_WR(bp, BAR_XSTRORM_INTMEM +
4889                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4890         REG_WR(bp, BAR_XSTRORM_INTMEM +
4891                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4892                U64_HI(section));
4893         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4894                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4895
4896         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4897                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4898                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4899
4900         bp->stats_pending = 0;
4901         bp->set_mac_pending = 0;
4902
4903         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4904 }
4905
4906 static void bnx2x_update_coalesce(struct bnx2x *bp)
4907 {
4908         int port = BP_PORT(bp);
4909         int i;
4910
4911         for_each_queue(bp, i) {
4912                 int sb_id = bp->fp[i].sb_id;
4913
4914                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4915                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4916                         CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4917                                                       U_SB_ETH_RX_CQ_INDEX),
4918                         bp->rx_ticks/12);
4919                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4920                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4921                                                        U_SB_ETH_RX_CQ_INDEX),
4922                          (bp->rx_ticks/12) ? 0 : 1);
4923
4924                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4925                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4926                         CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4927                                                       C_SB_ETH_TX_CQ_INDEX),
4928                         bp->tx_ticks/12);
4929                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4930                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4931                                                        C_SB_ETH_TX_CQ_INDEX),
4932                          (bp->tx_ticks/12) ? 0 : 1);
4933         }
4934 }
4935
4936 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4937                                        struct bnx2x_fastpath *fp, int last)
4938 {
4939         int i;
4940
4941         for (i = 0; i < last; i++) {
4942                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4943                 struct sk_buff *skb = rx_buf->skb;
4944
4945                 if (skb == NULL) {
4946                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4947                         continue;
4948                 }
4949
4950                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4951                         pci_unmap_single(bp->pdev,
4952                                          pci_unmap_addr(rx_buf, mapping),
4953                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4954
4955                 dev_kfree_skb(skb);
4956                 rx_buf->skb = NULL;
4957         }
4958 }
4959
4960 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4961 {
4962         int func = BP_FUNC(bp);
4963         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4964                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4965         u16 ring_prod, cqe_ring_prod;
4966         int i, j;
4967
4968         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4969         DP(NETIF_MSG_IFUP,
4970            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4971
4972         if (bp->flags & TPA_ENABLE_FLAG) {
4973
4974                 for_each_rx_queue(bp, j) {
4975                         struct bnx2x_fastpath *fp = &bp->fp[j];
4976
4977                         for (i = 0; i < max_agg_queues; i++) {
4978                                 fp->tpa_pool[i].skb =
4979                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4980                                 if (!fp->tpa_pool[i].skb) {
4981                                         BNX2X_ERR("Failed to allocate TPA "
4982                                                   "skb pool for queue[%d] - "
4983                                                   "disabling TPA on this "
4984                                                   "queue!\n", j);
4985                                         bnx2x_free_tpa_pool(bp, fp, i);
4986                                         fp->disable_tpa = 1;
4987                                         break;
4988                                 }
4989                                 pci_unmap_addr_set((struct sw_rx_bd *)
4990                                                         &bp->fp->tpa_pool[i],
4991                                                    mapping, 0);
4992                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4993                         }
4994                 }
4995         }
4996
4997         for_each_rx_queue(bp, j) {
4998                 struct bnx2x_fastpath *fp = &bp->fp[j];
4999
5000                 fp->rx_bd_cons = 0;
5001                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5002                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
5003
5004                 /* Mark queue as Rx */
5005                 fp->is_rx_queue = 1;
5006
5007                 /* "next page" elements initialization */
5008                 /* SGE ring */
5009                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5010                         struct eth_rx_sge *sge;
5011
5012                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5013                         sge->addr_hi =
5014                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5015                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5016                         sge->addr_lo =
5017                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5018                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5019                 }
5020
5021                 bnx2x_init_sge_ring_bit_mask(fp);
5022
5023                 /* RX BD ring */
5024                 for (i = 1; i <= NUM_RX_RINGS; i++) {
5025                         struct eth_rx_bd *rx_bd;
5026
5027                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5028                         rx_bd->addr_hi =
5029                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5030                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5031                         rx_bd->addr_lo =
5032                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5033                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5034                 }
5035
5036                 /* CQ ring */
5037                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5038                         struct eth_rx_cqe_next_page *nextpg;
5039
5040                         nextpg = (struct eth_rx_cqe_next_page *)
5041                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5042                         nextpg->addr_hi =
5043                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5044                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5045                         nextpg->addr_lo =
5046                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5047                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5048                 }
5049
5050                 /* Allocate SGEs and initialize the ring elements */
5051                 for (i = 0, ring_prod = 0;
5052                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
5053
5054                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5055                                 BNX2X_ERR("was only able to allocate "
5056                                           "%d rx sges\n", i);
5057                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5058                                 /* Cleanup already allocated elements */
5059                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
5060                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
5061                                 fp->disable_tpa = 1;
5062                                 ring_prod = 0;
5063                                 break;
5064                         }
5065                         ring_prod = NEXT_SGE_IDX(ring_prod);
5066                 }
5067                 fp->rx_sge_prod = ring_prod;
5068
5069                 /* Allocate BDs and initialize BD ring */
5070                 fp->rx_comp_cons = 0;
5071                 cqe_ring_prod = ring_prod = 0;
5072                 for (i = 0; i < bp->rx_ring_size; i++) {
5073                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5074                                 BNX2X_ERR("was only able to allocate "
5075                                           "%d rx skbs on queue[%d]\n", i, j);
5076                                 fp->eth_q_stats.rx_skb_alloc_failed++;
5077                                 break;
5078                         }
5079                         ring_prod = NEXT_RX_IDX(ring_prod);
5080                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5081                         WARN_ON(ring_prod <= i);
5082                 }
5083
5084                 fp->rx_bd_prod = ring_prod;
5085                 /* must not have more available CQEs than BDs */
5086                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5087                                        cqe_ring_prod);
5088                 fp->rx_pkt = fp->rx_calls = 0;
5089
5090                 /* Warning!
5091                  * this will generate an interrupt (to the TSTORM)
5092                  * must only be done after chip is initialized
5093                  */
5094                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5095                                      fp->rx_sge_prod);
5096                 if (j != 0)
5097                         continue;
5098
5099                 REG_WR(bp, BAR_USTRORM_INTMEM +
5100                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5101                        U64_LO(fp->rx_comp_mapping));
5102                 REG_WR(bp, BAR_USTRORM_INTMEM +
5103                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5104                        U64_HI(fp->rx_comp_mapping));
5105         }
5106 }
5107
5108 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5109 {
5110         int i, j;
5111
5112         for_each_tx_queue(bp, j) {
5113                 struct bnx2x_fastpath *fp = &bp->fp[j];
5114
5115                 for (i = 1; i <= NUM_TX_RINGS; i++) {
5116                         struct eth_tx_next_bd *tx_next_bd =
5117                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5118
5119                         tx_next_bd->addr_hi =
5120                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5121                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5122                         tx_next_bd->addr_lo =
5123                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5124                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5125                 }
5126
5127                 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5128                 fp->tx_db.data.zero_fill1 = 0;
5129                 fp->tx_db.data.prod = 0;
5130
5131                 fp->tx_pkt_prod = 0;
5132                 fp->tx_pkt_cons = 0;
5133                 fp->tx_bd_prod = 0;
5134                 fp->tx_bd_cons = 0;
5135                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5136                 fp->tx_pkt = 0;
5137         }
5138
5139         /* clean tx statistics */
5140         for_each_rx_queue(bp, i)
5141                 bnx2x_fp(bp, i, tx_pkt) = 0;
5142 }
5143
5144 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5145 {
5146         int func = BP_FUNC(bp);
5147
5148         spin_lock_init(&bp->spq_lock);
5149
5150         bp->spq_left = MAX_SPQ_PENDING;
5151         bp->spq_prod_idx = 0;
5152         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5153         bp->spq_prod_bd = bp->spq;
5154         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5155
5156         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5157                U64_LO(bp->spq_mapping));
5158         REG_WR(bp,
5159                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5160                U64_HI(bp->spq_mapping));
5161
5162         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5163                bp->spq_prod_idx);
5164 }
5165
5166 static void bnx2x_init_context(struct bnx2x *bp)
5167 {
5168         int i;
5169
5170         for_each_rx_queue(bp, i) {
5171                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5172                 struct bnx2x_fastpath *fp = &bp->fp[i];
5173                 u8 cl_id = fp->cl_id;
5174
5175                 context->ustorm_st_context.common.sb_index_numbers =
5176                                                 BNX2X_RX_SB_INDEX_NUM;
5177                 context->ustorm_st_context.common.clientId = cl_id;
5178                 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5179                 context->ustorm_st_context.common.flags =
5180                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5181                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5182                 context->ustorm_st_context.common.statistics_counter_id =
5183                                                 cl_id;
5184                 context->ustorm_st_context.common.mc_alignment_log_size =
5185                                                 BNX2X_RX_ALIGN_SHIFT;
5186                 context->ustorm_st_context.common.bd_buff_size =
5187                                                 bp->rx_buf_size;
5188                 context->ustorm_st_context.common.bd_page_base_hi =
5189                                                 U64_HI(fp->rx_desc_mapping);
5190                 context->ustorm_st_context.common.bd_page_base_lo =
5191                                                 U64_LO(fp->rx_desc_mapping);
5192                 if (!fp->disable_tpa) {
5193                         context->ustorm_st_context.common.flags |=
5194                                 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5195                         context->ustorm_st_context.common.sge_buff_size =
5196                                 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5197                                          (u32)0xffff);
5198                         context->ustorm_st_context.common.sge_page_base_hi =
5199                                                 U64_HI(fp->rx_sge_mapping);
5200                         context->ustorm_st_context.common.sge_page_base_lo =
5201                                                 U64_LO(fp->rx_sge_mapping);
5202
5203                         context->ustorm_st_context.common.max_sges_for_packet =
5204                                 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5205                         context->ustorm_st_context.common.max_sges_for_packet =
5206                                 ((context->ustorm_st_context.common.
5207                                   max_sges_for_packet + PAGES_PER_SGE - 1) &
5208                                  (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5209                 }
5210
5211                 context->ustorm_ag_context.cdu_usage =
5212                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5213                                                CDU_REGION_NUMBER_UCM_AG,
5214                                                ETH_CONNECTION_TYPE);
5215
5216                 context->xstorm_ag_context.cdu_reserved =
5217                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5218                                                CDU_REGION_NUMBER_XCM_AG,
5219                                                ETH_CONNECTION_TYPE);
5220         }
5221
5222         for_each_tx_queue(bp, i) {
5223                 struct bnx2x_fastpath *fp = &bp->fp[i];
5224                 struct eth_context *context =
5225                         bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
5226
5227                 context->cstorm_st_context.sb_index_number =
5228                                                 C_SB_ETH_TX_CQ_INDEX;
5229                 context->cstorm_st_context.status_block_id = fp->sb_id;
5230
5231                 context->xstorm_st_context.tx_bd_page_base_hi =
5232                                                 U64_HI(fp->tx_desc_mapping);
5233                 context->xstorm_st_context.tx_bd_page_base_lo =
5234                                                 U64_LO(fp->tx_desc_mapping);
5235                 context->xstorm_st_context.statistics_data = (fp->cl_id |
5236                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5237         }
5238 }
5239
5240 static void bnx2x_init_ind_table(struct bnx2x *bp)
5241 {
5242         int func = BP_FUNC(bp);
5243         int i;
5244
5245         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5246                 return;
5247
5248         DP(NETIF_MSG_IFUP,
5249            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
5250         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5251                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5252                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5253                         bp->fp->cl_id + (i % bp->num_rx_queues));
5254 }
5255
5256 static void bnx2x_set_client_config(struct bnx2x *bp)
5257 {
5258         struct tstorm_eth_client_config tstorm_client = {0};
5259         int port = BP_PORT(bp);
5260         int i;
5261
5262         tstorm_client.mtu = bp->dev->mtu;
5263         tstorm_client.config_flags =
5264                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5265                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5266 #ifdef BCM_VLAN
5267         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5268                 tstorm_client.config_flags |=
5269                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5270                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5271         }
5272 #endif
5273
5274         for_each_queue(bp, i) {
5275                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5276
5277                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5278                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5279                        ((u32 *)&tstorm_client)[0]);
5280                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5281                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5282                        ((u32 *)&tstorm_client)[1]);
5283         }
5284
5285         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5286            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5287 }
5288
5289 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5290 {
5291         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5292         int mode = bp->rx_mode;
5293         int mask = bp->rx_mode_cl_mask;
5294         int func = BP_FUNC(bp);
5295         int port = BP_PORT(bp);
5296         int i;
5297         /* All but management unicast packets should pass to the host as well */
5298         u32 llh_mask =
5299                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5300                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5301                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5302                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5303
5304         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
5305
5306         switch (mode) {
5307         case BNX2X_RX_MODE_NONE: /* no Rx */
5308                 tstorm_mac_filter.ucast_drop_all = mask;
5309                 tstorm_mac_filter.mcast_drop_all = mask;
5310                 tstorm_mac_filter.bcast_drop_all = mask;
5311                 break;
5312
5313         case BNX2X_RX_MODE_NORMAL:
5314                 tstorm_mac_filter.bcast_accept_all = mask;
5315                 break;
5316
5317         case BNX2X_RX_MODE_ALLMULTI:
5318                 tstorm_mac_filter.mcast_accept_all = mask;
5319                 tstorm_mac_filter.bcast_accept_all = mask;
5320                 break;
5321
5322         case BNX2X_RX_MODE_PROMISC:
5323                 tstorm_mac_filter.ucast_accept_all = mask;
5324                 tstorm_mac_filter.mcast_accept_all = mask;
5325                 tstorm_mac_filter.bcast_accept_all = mask;
5326                 /* pass management unicast packets as well */
5327                 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5328                 break;
5329
5330         default:
5331                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5332                 break;
5333         }
5334
5335         REG_WR(bp,
5336                (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5337                llh_mask);
5338
5339         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5340                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5341                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5342                        ((u32 *)&tstorm_mac_filter)[i]);
5343
5344 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5345                    ((u32 *)&tstorm_mac_filter)[i]); */
5346         }
5347
5348         if (mode != BNX2X_RX_MODE_NONE)
5349                 bnx2x_set_client_config(bp);
5350 }
5351
5352 static void bnx2x_init_internal_common(struct bnx2x *bp)
5353 {
5354         int i;
5355
5356         /* Zero this manually as its initialization is
5357            currently missing in the initTool */
5358         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5359                 REG_WR(bp, BAR_USTRORM_INTMEM +
5360                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
5361 }
5362
5363 static void bnx2x_init_internal_port(struct bnx2x *bp)
5364 {
5365         int port = BP_PORT(bp);
5366
5367         REG_WR(bp,
5368                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5369         REG_WR(bp,
5370                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5371         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5372         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5373 }
5374
5375 static void bnx2x_init_internal_func(struct bnx2x *bp)
5376 {
5377         struct tstorm_eth_function_common_config tstorm_config = {0};
5378         struct stats_indication_flags stats_flags = {0};
5379         int port = BP_PORT(bp);
5380         int func = BP_FUNC(bp);
5381         int i, j;
5382         u32 offset;
5383         u16 max_agg_size;
5384
5385         if (is_multi(bp)) {
5386                 tstorm_config.config_flags = MULTI_FLAGS(bp);
5387                 tstorm_config.rss_result_mask = MULTI_MASK;
5388         }
5389
5390         /* Enable TPA if needed */
5391         if (bp->flags & TPA_ENABLE_FLAG)
5392                 tstorm_config.config_flags |=
5393                         TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5394
5395         if (IS_E1HMF(bp))
5396                 tstorm_config.config_flags |=
5397                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5398
5399         tstorm_config.leading_client_id = BP_L_ID(bp);
5400
5401         REG_WR(bp, BAR_TSTRORM_INTMEM +
5402                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5403                (*(u32 *)&tstorm_config));
5404
5405         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5406         bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
5407         bnx2x_set_storm_rx_mode(bp);
5408
5409         for_each_queue(bp, i) {
5410                 u8 cl_id = bp->fp[i].cl_id;
5411
5412                 /* reset xstorm per client statistics */
5413                 offset = BAR_XSTRORM_INTMEM +
5414                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5415                 for (j = 0;
5416                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5417                         REG_WR(bp, offset + j*4, 0);
5418
5419                 /* reset tstorm per client statistics */
5420                 offset = BAR_TSTRORM_INTMEM +
5421                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5422                 for (j = 0;
5423                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5424                         REG_WR(bp, offset + j*4, 0);
5425
5426                 /* reset ustorm per client statistics */
5427                 offset = BAR_USTRORM_INTMEM +
5428                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5429                 for (j = 0;
5430                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5431                         REG_WR(bp, offset + j*4, 0);
5432         }
5433
5434         /* Init statistics related context */
5435         stats_flags.collect_eth = 1;
5436
5437         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5438                ((u32 *)&stats_flags)[0]);
5439         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5440                ((u32 *)&stats_flags)[1]);
5441
5442         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5443                ((u32 *)&stats_flags)[0]);
5444         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5445                ((u32 *)&stats_flags)[1]);
5446
5447         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5448                ((u32 *)&stats_flags)[0]);
5449         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5450                ((u32 *)&stats_flags)[1]);
5451
5452         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5453                ((u32 *)&stats_flags)[0]);
5454         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5455                ((u32 *)&stats_flags)[1]);
5456
5457         REG_WR(bp, BAR_XSTRORM_INTMEM +
5458                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5459                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5460         REG_WR(bp, BAR_XSTRORM_INTMEM +
5461                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5462                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5463
5464         REG_WR(bp, BAR_TSTRORM_INTMEM +
5465                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5466                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5467         REG_WR(bp, BAR_TSTRORM_INTMEM +
5468                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5469                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5470
5471         REG_WR(bp, BAR_USTRORM_INTMEM +
5472                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5473                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5474         REG_WR(bp, BAR_USTRORM_INTMEM +
5475                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5476                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5477
5478         if (CHIP_IS_E1H(bp)) {
5479                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5480                         IS_E1HMF(bp));
5481                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5482                         IS_E1HMF(bp));
5483                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5484                         IS_E1HMF(bp));
5485                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5486                         IS_E1HMF(bp));
5487
5488                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5489                          bp->e1hov);
5490         }
5491
5492         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5493         max_agg_size =
5494                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5495                           SGE_PAGE_SIZE * PAGES_PER_SGE),
5496                     (u32)0xffff);
5497         for_each_rx_queue(bp, i) {
5498                 struct bnx2x_fastpath *fp = &bp->fp[i];
5499
5500                 REG_WR(bp, BAR_USTRORM_INTMEM +
5501                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5502                        U64_LO(fp->rx_comp_mapping));
5503                 REG_WR(bp, BAR_USTRORM_INTMEM +
5504                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5505                        U64_HI(fp->rx_comp_mapping));
5506
5507                 /* Next page */
5508                 REG_WR(bp, BAR_USTRORM_INTMEM +
5509                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5510                        U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5511                 REG_WR(bp, BAR_USTRORM_INTMEM +
5512                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5513                        U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5514
5515                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5516                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5517                          max_agg_size);
5518         }
5519
5520         /* dropless flow control */
5521         if (CHIP_IS_E1H(bp)) {
5522                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5523
5524                 rx_pause.bd_thr_low = 250;
5525                 rx_pause.cqe_thr_low = 250;
5526                 rx_pause.cos = 1;
5527                 rx_pause.sge_thr_low = 0;
5528                 rx_pause.bd_thr_high = 350;
5529                 rx_pause.cqe_thr_high = 350;
5530                 rx_pause.sge_thr_high = 0;
5531
5532                 for_each_rx_queue(bp, i) {
5533                         struct bnx2x_fastpath *fp = &bp->fp[i];
5534
5535                         if (!fp->disable_tpa) {
5536                                 rx_pause.sge_thr_low = 150;
5537                                 rx_pause.sge_thr_high = 250;
5538                         }
5539
5540
5541                         offset = BAR_USTRORM_INTMEM +
5542                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5543                                                                    fp->cl_id);
5544                         for (j = 0;
5545                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5546                              j++)
5547                                 REG_WR(bp, offset + j*4,
5548                                        ((u32 *)&rx_pause)[j]);
5549                 }
5550         }
5551
5552         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5553
5554         /* Init rate shaping and fairness contexts */
5555         if (IS_E1HMF(bp)) {
5556                 int vn;
5557
5558                 /* During init there is no active link
5559                    Until link is up, set link rate to 10Gbps */
5560                 bp->link_vars.line_speed = SPEED_10000;
5561                 bnx2x_init_port_minmax(bp);
5562
5563                 if (!BP_NOMCP(bp))
5564                         bp->mf_config =
5565                               SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
5566                 bnx2x_calc_vn_weight_sum(bp);
5567
5568                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5569                         bnx2x_init_vn_minmax(bp, 2*vn + port);
5570
5571                 /* Enable rate shaping and fairness */
5572                 bp->cmng.flags.cmng_enables |=
5573                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5574
5575         } else {
5576                 /* rate shaping and fairness are disabled */
5577                 DP(NETIF_MSG_IFUP,
5578                    "single function mode  minmax will be disabled\n");
5579         }
5580
5581
5582         /* Store it to internal memory */
5583         if (bp->port.pmf)
5584                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5585                         REG_WR(bp, BAR_XSTRORM_INTMEM +
5586                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5587                                ((u32 *)(&bp->cmng))[i]);
5588 }
5589
5590 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5591 {
5592         switch (load_code) {
5593         case FW_MSG_CODE_DRV_LOAD_COMMON:
5594                 bnx2x_init_internal_common(bp);
5595                 /* no break */
5596
5597         case FW_MSG_CODE_DRV_LOAD_PORT:
5598                 bnx2x_init_internal_port(bp);
5599                 /* no break */
5600
5601         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5602                 bnx2x_init_internal_func(bp);
5603                 break;
5604
5605         default:
5606                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5607                 break;
5608         }
5609 }
5610
5611 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5612 {
5613         int i;
5614
5615         for_each_queue(bp, i) {
5616                 struct bnx2x_fastpath *fp = &bp->fp[i];
5617
5618                 fp->bp = bp;
5619                 fp->state = BNX2X_FP_STATE_CLOSED;
5620                 fp->index = i;
5621                 fp->cl_id = BP_L_ID(bp) + i;
5622 #ifdef BCM_CNIC
5623                 fp->sb_id = fp->cl_id + 1;
5624 #else
5625                 fp->sb_id = fp->cl_id;
5626 #endif
5627                 /* Suitable Rx and Tx SBs are served by the same client */
5628                 if (i >= bp->num_rx_queues)
5629                         fp->cl_id -= bp->num_rx_queues;
5630                 DP(NETIF_MSG_IFUP,
5631                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
5632                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5633                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5634                               fp->sb_id);
5635                 bnx2x_update_fpsb_idx(fp);
5636         }
5637
5638         /* ensure status block indices were read */
5639         rmb();
5640
5641
5642         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5643                           DEF_SB_ID);
5644         bnx2x_update_dsb_idx(bp);
5645         bnx2x_update_coalesce(bp);
5646         bnx2x_init_rx_rings(bp);
5647         bnx2x_init_tx_ring(bp);
5648         bnx2x_init_sp_ring(bp);
5649         bnx2x_init_context(bp);
5650         bnx2x_init_internal(bp, load_code);
5651         bnx2x_init_ind_table(bp);
5652         bnx2x_stats_init(bp);
5653
5654         /* At this point, we are ready for interrupts */
5655         atomic_set(&bp->intr_sem, 0);
5656
5657         /* flush all before enabling interrupts */
5658         mb();
5659         mmiowb();
5660
5661         bnx2x_int_enable(bp);
5662
5663         /* Check for SPIO5 */
5664         bnx2x_attn_int_deasserted0(bp,
5665                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5666                                    AEU_INPUTS_ATTN_BITS_SPIO5);
5667 }
5668
5669 /* end of nic init */
5670
5671 /*
5672  * gzip service functions
5673  */
5674
5675 static int bnx2x_gunzip_init(struct bnx2x *bp)
5676 {
5677         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5678                                               &bp->gunzip_mapping);
5679         if (bp->gunzip_buf  == NULL)
5680                 goto gunzip_nomem1;
5681
5682         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5683         if (bp->strm  == NULL)
5684                 goto gunzip_nomem2;
5685
5686         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5687                                       GFP_KERNEL);
5688         if (bp->strm->workspace == NULL)
5689                 goto gunzip_nomem3;
5690
5691         return 0;
5692
5693 gunzip_nomem3:
5694         kfree(bp->strm);
5695         bp->strm = NULL;
5696
5697 gunzip_nomem2:
5698         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5699                             bp->gunzip_mapping);
5700         bp->gunzip_buf = NULL;
5701
5702 gunzip_nomem1:
5703         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5704                " un-compression\n", bp->dev->name);
5705         return -ENOMEM;
5706 }
5707
5708 static void bnx2x_gunzip_end(struct bnx2x *bp)
5709 {
5710         kfree(bp->strm->workspace);
5711
5712         kfree(bp->strm);
5713         bp->strm = NULL;
5714
5715         if (bp->gunzip_buf) {
5716                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5717                                     bp->gunzip_mapping);
5718                 bp->gunzip_buf = NULL;
5719         }
5720 }
5721
5722 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5723 {
5724         int n, rc;
5725
5726         /* check gzip header */
5727         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5728                 BNX2X_ERR("Bad gzip header\n");
5729                 return -EINVAL;
5730         }
5731
5732         n = 10;
5733
5734 #define FNAME                           0x8
5735
5736         if (zbuf[3] & FNAME)
5737                 while ((zbuf[n++] != 0) && (n < len));
5738
5739         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5740         bp->strm->avail_in = len - n;
5741         bp->strm->next_out = bp->gunzip_buf;
5742         bp->strm->avail_out = FW_BUF_SIZE;
5743
5744         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5745         if (rc != Z_OK)
5746                 return rc;
5747
5748         rc = zlib_inflate(bp->strm, Z_FINISH);
5749         if ((rc != Z_OK) && (rc != Z_STREAM_END))
5750                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5751                        bp->dev->name, bp->strm->msg);
5752
5753         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5754         if (bp->gunzip_outlen & 0x3)
5755                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5756                                     " gunzip_outlen (%d) not aligned\n",
5757                        bp->dev->name, bp->gunzip_outlen);
5758         bp->gunzip_outlen >>= 2;
5759
5760         zlib_inflateEnd(bp->strm);
5761
5762         if (rc == Z_STREAM_END)
5763                 return 0;
5764
5765         return rc;
5766 }
5767
5768 /* nic load/unload */
5769
5770 /*
5771  * General service functions
5772  */
5773
5774 /* send a NIG loopback debug packet */
5775 static void bnx2x_lb_pckt(struct bnx2x *bp)
5776 {
5777         u32 wb_write[3];
5778
5779         /* Ethernet source and destination addresses */
5780         wb_write[0] = 0x55555555;
5781         wb_write[1] = 0x55555555;
5782         wb_write[2] = 0x20;             /* SOP */
5783         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5784
5785         /* NON-IP protocol */
5786         wb_write[0] = 0x09000000;
5787         wb_write[1] = 0x55555555;
5788         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
5789         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5790 }
5791
5792 /* some of the internal memories
5793  * are not directly readable from the driver
5794  * to test them we send debug packets
5795  */
5796 static int bnx2x_int_mem_test(struct bnx2x *bp)
5797 {
5798         int factor;
5799         int count, i;
5800         u32 val = 0;
5801
5802         if (CHIP_REV_IS_FPGA(bp))
5803                 factor = 120;
5804         else if (CHIP_REV_IS_EMUL(bp))
5805                 factor = 200;
5806         else
5807                 factor = 1;
5808
5809         DP(NETIF_MSG_HW, "start part1\n");
5810
5811         /* Disable inputs of parser neighbor blocks */
5812         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5813         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5814         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5815         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5816
5817         /*  Write 0 to parser credits for CFC search request */
5818         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5819
5820         /* send Ethernet packet */
5821         bnx2x_lb_pckt(bp);
5822
5823         /* TODO do i reset NIG statistic? */
5824         /* Wait until NIG register shows 1 packet of size 0x10 */
5825         count = 1000 * factor;
5826         while (count) {
5827
5828                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5829                 val = *bnx2x_sp(bp, wb_data[0]);
5830                 if (val == 0x10)
5831                         break;
5832
5833                 msleep(10);
5834                 count--;
5835         }
5836         if (val != 0x10) {
5837                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5838                 return -1;
5839         }
5840
5841         /* Wait until PRS register shows 1 packet */
5842         count = 1000 * factor;
5843         while (count) {
5844                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5845                 if (val == 1)
5846                         break;
5847
5848                 msleep(10);
5849                 count--;
5850         }
5851         if (val != 0x1) {
5852                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5853                 return -2;
5854         }
5855
5856         /* Reset and init BRB, PRS */
5857         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5858         msleep(50);
5859         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5860         msleep(50);
5861         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5862         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5863
5864         DP(NETIF_MSG_HW, "part2\n");
5865
5866         /* Disable inputs of parser neighbor blocks */
5867         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5868         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5869         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5870         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5871
5872         /* Write 0 to parser credits for CFC search request */
5873         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5874
5875         /* send 10 Ethernet packets */
5876         for (i = 0; i < 10; i++)
5877                 bnx2x_lb_pckt(bp);
5878
5879         /* Wait until NIG register shows 10 + 1
5880            packets of size 11*0x10 = 0xb0 */
5881         count = 1000 * factor;
5882         while (count) {
5883
5884                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5885                 val = *bnx2x_sp(bp, wb_data[0]);
5886                 if (val == 0xb0)
5887                         break;
5888
5889                 msleep(10);
5890                 count--;
5891         }
5892         if (val != 0xb0) {
5893                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5894                 return -3;
5895         }
5896
5897         /* Wait until PRS register shows 2 packets */
5898         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5899         if (val != 2)
5900                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5901
5902         /* Write 1 to parser credits for CFC search request */
5903         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5904
5905         /* Wait until PRS register shows 3 packets */
5906         msleep(10 * factor);
5907         /* Wait until NIG register shows 1 packet of size 0x10 */
5908         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5909         if (val != 3)
5910                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5911
5912         /* clear NIG EOP FIFO */
5913         for (i = 0; i < 11; i++)
5914                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5915         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5916         if (val != 1) {
5917                 BNX2X_ERR("clear of NIG failed\n");
5918                 return -4;
5919         }
5920
5921         /* Reset and init BRB, PRS, NIG */
5922         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5923         msleep(50);
5924         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5925         msleep(50);
5926         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5927         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5928 #ifndef BCM_CNIC
5929         /* set NIC mode */
5930         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5931 #endif
5932
5933         /* Enable inputs of parser neighbor blocks */
5934         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5935         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5936         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5937         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5938
5939         DP(NETIF_MSG_HW, "done\n");
5940
5941         return 0; /* OK */
5942 }
5943
5944 static void enable_blocks_attention(struct bnx2x *bp)
5945 {
5946         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5947         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5948         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5949         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5950         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5951         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5952         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5953         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5954         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5955 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5956 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5957         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5958         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5959         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5960 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5961 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5962         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5963         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5964         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5965         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5966 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5967 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5968         if (CHIP_REV_IS_FPGA(bp))
5969                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5970         else
5971                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5972         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5973         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5974         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5975 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5976 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5977         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5978         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5979 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5980         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5981 }
5982
5983
5984 static void bnx2x_reset_common(struct bnx2x *bp)
5985 {
5986         /* reset_common */
5987         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5988                0xd3ffff7f);
5989         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5990 }
5991
5992 static void bnx2x_init_pxp(struct bnx2x *bp)
5993 {
5994         u16 devctl;
5995         int r_order, w_order;
5996
5997         pci_read_config_word(bp->pdev,
5998                              bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
5999         DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6000         w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6001         if (bp->mrrs == -1)
6002                 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6003         else {
6004                 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6005                 r_order = bp->mrrs;
6006         }
6007
6008         bnx2x_init_pxp_arb(bp, r_order, w_order);
6009 }
6010
6011 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6012 {
6013         u32 val;
6014         u8 port;
6015         u8 is_required = 0;
6016
6017         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6018               SHARED_HW_CFG_FAN_FAILURE_MASK;
6019
6020         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6021                 is_required = 1;
6022
6023         /*
6024          * The fan failure mechanism is usually related to the PHY type since
6025          * the power consumption of the board is affected by the PHY. Currently,
6026          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6027          */
6028         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6029                 for (port = PORT_0; port < PORT_MAX; port++) {
6030                         u32 phy_type =
6031                                 SHMEM_RD(bp, dev_info.port_hw_config[port].
6032                                          external_phy_config) &
6033                                 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6034                         is_required |=
6035                                 ((phy_type ==
6036                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
6037                                  (phy_type ==
6038                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
6039                                  (phy_type ==
6040                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6041                 }
6042
6043         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6044
6045         if (is_required == 0)
6046                 return;
6047
6048         /* Fan failure is indicated by SPIO 5 */
6049         bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6050                        MISC_REGISTERS_SPIO_INPUT_HI_Z);
6051
6052         /* set to active low mode */
6053         val = REG_RD(bp, MISC_REG_SPIO_INT);
6054         val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6055                                 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6056         REG_WR(bp, MISC_REG_SPIO_INT, val);
6057
6058         /* enable interrupt to signal the IGU */
6059         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6060         val |= (1 << MISC_REGISTERS_SPIO_5);
6061         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6062 }
6063
6064 static int bnx2x_init_common(struct bnx2x *bp)
6065 {
6066         u32 val, i;
6067 #ifdef BCM_CNIC
6068         u32 wb_write[2];
6069 #endif
6070
6071         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
6072
6073         bnx2x_reset_common(bp);
6074         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6075         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
6076
6077         bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
6078         if (CHIP_IS_E1H(bp))
6079                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
6080
6081         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6082         msleep(30);
6083         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6084
6085         bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
6086         if (CHIP_IS_E1(bp)) {
6087                 /* enable HW interrupt from PXP on USDM overflow
6088                    bit 16 on INT_MASK_0 */
6089                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6090         }
6091
6092         bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
6093         bnx2x_init_pxp(bp);
6094
6095 #ifdef __BIG_ENDIAN
6096         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6097         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6098         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6099         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6100         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6101         /* make sure this value is 0 */
6102         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6103
6104 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6105         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6106         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6107         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6108         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6109 #endif
6110
6111         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
6112 #ifdef BCM_CNIC
6113         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6114         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6115         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6116 #endif
6117
6118         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6119                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6120
6121         /* let the HW do it's magic ... */
6122         msleep(100);
6123         /* finish PXP init */
6124         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6125         if (val != 1) {
6126                 BNX2X_ERR("PXP2 CFG failed\n");
6127                 return -EBUSY;
6128         }
6129         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6130         if (val != 1) {
6131                 BNX2X_ERR("PXP2 RD_INIT failed\n");
6132                 return -EBUSY;
6133         }
6134
6135         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6136         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6137
6138         bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
6139
6140         /* clean the DMAE memory */
6141         bp->dmae_ready = 1;
6142         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6143
6144         bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6145         bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6146         bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6147         bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
6148
6149         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6150         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6151         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6152         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6153
6154         bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
6155
6156 #ifdef BCM_CNIC
6157         wb_write[0] = 0;
6158         wb_write[1] = 0;
6159         for (i = 0; i < 64; i++) {
6160                 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6161                 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6162
6163                 if (CHIP_IS_E1H(bp)) {
6164                         REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6165                         bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6166                                           wb_write, 2);
6167                 }
6168         }
6169 #endif
6170         /* soft reset pulse */
6171         REG_WR(bp, QM_REG_SOFT_RESET, 1);
6172         REG_WR(bp, QM_REG_SOFT_RESET, 0);
6173
6174 #ifdef BCM_CNIC
6175         bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
6176 #endif
6177
6178         bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
6179         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6180         if (!CHIP_REV_IS_SLOW(bp)) {
6181                 /* enable hw interrupt from doorbell Q */
6182                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6183         }
6184
6185         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6186         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6187         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6188 #ifndef BCM_CNIC
6189         /* set NIC mode */
6190         REG_WR(bp, PRS_REG_NIC_MODE, 1);
6191 #endif
6192         if (CHIP_IS_E1H(bp))
6193                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
6194
6195         bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6196         bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6197         bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6198         bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
6199
6200         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6201         bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6202         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6203         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6204
6205         bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6206         bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6207         bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6208         bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
6209
6210         /* sync semi rtc */
6211         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6212                0x80000000);
6213         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6214                0x80000000);
6215
6216         bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6217         bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6218         bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
6219
6220         REG_WR(bp, SRC_REG_SOFT_RST, 1);
6221         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6222                 REG_WR(bp, i, 0xc0cac01a);
6223                 /* TODO: replace with something meaningful */
6224         }
6225         bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6226 #ifdef BCM_CNIC
6227         REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6228         REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6229         REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6230         REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6231         REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6232         REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6233         REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6234         REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6235         REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6236         REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6237 #endif
6238         REG_WR(bp, SRC_REG_SOFT_RST, 0);
6239
6240         if (sizeof(union cdu_context) != 1024)
6241                 /* we currently assume that a context is 1024 bytes */
6242                 printk(KERN_ALERT PFX "please adjust the size of"
6243                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
6244
6245         bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
6246         val = (4 << 24) + (0 << 12) + 1024;
6247         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6248
6249         bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
6250         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6251         /* enable context validation interrupt from CFC */
6252         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6253
6254         /* set the thresholds to prevent CFC/CDU race */
6255         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6256
6257         bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6258         bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
6259
6260         bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
6261         /* Reset PCIE errors for debug */
6262         REG_WR(bp, 0x2814, 0xffffffff);
6263         REG_WR(bp, 0x3820, 0xffffffff);
6264
6265         bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
6266         bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
6267         bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
6268         bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
6269
6270         bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
6271         if (CHIP_IS_E1H(bp)) {
6272                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6273                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6274         }
6275
6276         if (CHIP_REV_IS_SLOW(bp))
6277                 msleep(200);
6278
6279         /* finish CFC init */
6280         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6281         if (val != 1) {
6282                 BNX2X_ERR("CFC LL_INIT failed\n");
6283                 return -EBUSY;
6284         }
6285         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6286         if (val != 1) {
6287                 BNX2X_ERR("CFC AC_INIT failed\n");
6288                 return -EBUSY;
6289         }
6290         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6291         if (val != 1) {
6292                 BNX2X_ERR("CFC CAM_INIT failed\n");
6293                 return -EBUSY;
6294         }
6295         REG_WR(bp, CFC_REG_DEBUG0, 0);
6296
6297         /* read NIG statistic
6298            to see if this is our first up since powerup */
6299         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6300         val = *bnx2x_sp(bp, wb_data[0]);
6301
6302         /* do internal memory self test */
6303         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6304                 BNX2X_ERR("internal mem self test failed\n");
6305                 return -EBUSY;
6306         }
6307
6308         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6309         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6310         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6311         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6312         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6313                 bp->port.need_hw_lock = 1;
6314                 break;
6315
6316         default:
6317                 break;
6318         }
6319
6320         bnx2x_setup_fan_failure_detection(bp);
6321
6322         /* clear PXP2 attentions */
6323         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6324
6325         enable_blocks_attention(bp);
6326
6327         if (!BP_NOMCP(bp)) {
6328                 bnx2x_acquire_phy_lock(bp);
6329                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6330                 bnx2x_release_phy_lock(bp);
6331         } else
6332                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6333
6334         return 0;
6335 }
6336
6337 static int bnx2x_init_port(struct bnx2x *bp)
6338 {
6339         int port = BP_PORT(bp);
6340         int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
6341         u32 low, high;
6342         u32 val;
6343
6344         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
6345
6346         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6347
6348         bnx2x_init_block(bp, PXP_BLOCK, init_stage);
6349         bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
6350
6351         bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6352         bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6353         bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6354         bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6355
6356 #ifdef BCM_CNIC
6357         REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
6358
6359         bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6360         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6361         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
6362 #endif
6363         bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6364
6365         bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
6366         if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6367                 /* no pause for emulation and FPGA */
6368                 low = 0;
6369                 high = 513;
6370         } else {
6371                 if (IS_E1HMF(bp))
6372                         low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6373                 else if (bp->dev->mtu > 4096) {
6374                         if (bp->flags & ONE_PORT_FLAG)
6375                                 low = 160;
6376                         else {
6377                                 val = bp->dev->mtu;
6378                                 /* (24*1024 + val*4)/256 */
6379                                 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6380                         }
6381                 } else
6382                         low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6383                 high = low + 56;        /* 14*1024/256 */
6384         }
6385         REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6386         REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6387
6388
6389         bnx2x_init_block(bp, PRS_BLOCK, init_stage);
6390
6391         bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
6392         bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
6393         bnx2x_init_block(bp, USDM_BLOCK, init_stage);
6394         bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
6395
6396         bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6397         bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6398         bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6399         bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
6400
6401         bnx2x_init_block(bp, UPB_BLOCK, init_stage);
6402         bnx2x_init_block(bp, XPB_BLOCK, init_stage);
6403
6404         bnx2x_init_block(bp, PBF_BLOCK, init_stage);
6405
6406         /* configure PBF to work without PAUSE mtu 9000 */
6407         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6408
6409         /* update threshold */
6410         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6411         /* update init credit */
6412         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6413
6414         /* probe changes */
6415         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6416         msleep(5);
6417         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6418
6419 #ifdef BCM_CNIC
6420         bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
6421 #endif
6422         bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6423         bnx2x_init_block(bp, CFC_BLOCK, init_stage);
6424
6425         if (CHIP_IS_E1(bp)) {
6426                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6427                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6428         }
6429         bnx2x_init_block(bp, HC_BLOCK, init_stage);
6430
6431         bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6432         /* init aeu_mask_attn_func_0/1:
6433          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6434          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6435          *             bits 4-7 are used for "per vn group attention" */
6436         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6437                (IS_E1HMF(bp) ? 0xF7 : 0x7));
6438
6439         bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6440         bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6441         bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6442         bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6443         bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6444
6445         bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6446
6447         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6448
6449         if (CHIP_IS_E1H(bp)) {
6450                 /* 0x2 disable e1hov, 0x1 enable */
6451                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6452                        (IS_E1HMF(bp) ? 0x1 : 0x2));
6453
6454                 {
6455                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6456                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6457                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6458                 }
6459         }
6460
6461         bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6462         bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6463
6464         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6465         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6466                 {
6467                 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6468
6469                 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6470                                MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6471
6472                 /* The GPIO should be swapped if the swap register is
6473                    set and active */
6474                 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6475                 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6476
6477                 /* Select function upon port-swap configuration */
6478                 if (port == 0) {
6479                         offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6480                         aeu_gpio_mask = (swap_val && swap_override) ?
6481                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6482                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6483                 } else {
6484                         offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6485                         aeu_gpio_mask = (swap_val && swap_override) ?
6486                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6487                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6488                 }
6489                 val = REG_RD(bp, offset);
6490                 /* add GPIO3 to group */
6491                 val |= aeu_gpio_mask;
6492                 REG_WR(bp, offset, val);
6493                 }
6494                 break;
6495
6496         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6497         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6498                 /* add SPIO 5 to group 0 */
6499                 {
6500                 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6501                                        MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6502                 val = REG_RD(bp, reg_addr);
6503                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6504                 REG_WR(bp, reg_addr, val);
6505                 }
6506                 break;
6507
6508         default:
6509                 break;
6510         }
6511
6512         bnx2x__link_reset(bp);
6513
6514         return 0;
6515 }
6516
6517 #define ILT_PER_FUNC            (768/2)
6518 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
6519 /* the phys address is shifted right 12 bits and has an added
6520    1=valid bit added to the 53rd bit
6521    then since this is a wide register(TM)
6522    we split it into two 32 bit writes
6523  */
6524 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6525 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
6526 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
6527 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
6528
6529 #ifdef BCM_CNIC
6530 #define CNIC_ILT_LINES          127
6531 #define CNIC_CTX_PER_ILT        16
6532 #else
6533 #define CNIC_ILT_LINES          0
6534 #endif
6535
6536 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6537 {
6538         int reg;
6539
6540         if (CHIP_IS_E1H(bp))
6541                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6542         else /* E1 */
6543                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6544
6545         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6546 }
6547
6548 static int bnx2x_init_func(struct bnx2x *bp)
6549 {
6550         int port = BP_PORT(bp);
6551         int func = BP_FUNC(bp);
6552         u32 addr, val;
6553         int i;
6554
6555         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
6556
6557         /* set MSI reconfigure capability */
6558         addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6559         val = REG_RD(bp, addr);
6560         val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6561         REG_WR(bp, addr, val);
6562
6563         i = FUNC_ILT_BASE(func);
6564
6565         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6566         if (CHIP_IS_E1H(bp)) {
6567                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6568                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6569         } else /* E1 */
6570                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6571                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6572
6573 #ifdef BCM_CNIC
6574         i += 1 + CNIC_ILT_LINES;
6575         bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6576         if (CHIP_IS_E1(bp))
6577                 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6578         else {
6579                 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6580                 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6581         }
6582
6583         i++;
6584         bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6585         if (CHIP_IS_E1(bp))
6586                 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6587         else {
6588                 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
6589                 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
6590         }
6591
6592         i++;
6593         bnx2x_ilt_wr(bp, i, bp->t1_mapping);
6594         if (CHIP_IS_E1(bp))
6595                 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6596         else {
6597                 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
6598                 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
6599         }
6600
6601         /* tell the searcher where the T2 table is */
6602         REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
6603
6604         bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
6605                     U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
6606
6607         bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
6608                     U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
6609                     U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
6610
6611         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
6612 #endif
6613
6614         if (CHIP_IS_E1H(bp)) {
6615                 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6616                 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6617                 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6618                 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6619                 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6620                 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6621                 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6622                 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6623                 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
6624
6625                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6626                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6627         }
6628
6629         /* HC init per function */
6630         if (CHIP_IS_E1H(bp)) {
6631                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6632
6633                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6634                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6635         }
6636         bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
6637
6638         /* Reset PCIE errors for debug */
6639         REG_WR(bp, 0x2114, 0xffffffff);
6640         REG_WR(bp, 0x2120, 0xffffffff);
6641
6642         return 0;
6643 }
6644
6645 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6646 {
6647         int i, rc = 0;
6648
6649         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
6650            BP_FUNC(bp), load_code);
6651
6652         bp->dmae_ready = 0;
6653         mutex_init(&bp->dmae_mutex);
6654         rc = bnx2x_gunzip_init(bp);
6655         if (rc)
6656                 return rc;
6657
6658         switch (load_code) {
6659         case FW_MSG_CODE_DRV_LOAD_COMMON:
6660                 rc = bnx2x_init_common(bp);
6661                 if (rc)
6662                         goto init_hw_err;
6663                 /* no break */
6664
6665         case FW_MSG_CODE_DRV_LOAD_PORT:
6666                 bp->dmae_ready = 1;
6667                 rc = bnx2x_init_port(bp);
6668                 if (rc)
6669                         goto init_hw_err;
6670                 /* no break */
6671
6672         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6673                 bp->dmae_ready = 1;
6674                 rc = bnx2x_init_func(bp);
6675                 if (rc)
6676                         goto init_hw_err;
6677                 break;
6678
6679         default:
6680                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6681                 break;
6682         }
6683
6684         if (!BP_NOMCP(bp)) {
6685                 int func = BP_FUNC(bp);
6686
6687                 bp->fw_drv_pulse_wr_seq =
6688                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6689                                  DRV_PULSE_SEQ_MASK);
6690                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6691         }
6692
6693         /* this needs to be done before gunzip end */
6694         bnx2x_zero_def_sb(bp);
6695         for_each_queue(bp, i)
6696                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6697 #ifdef BCM_CNIC
6698         bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6699 #endif
6700
6701 init_hw_err:
6702         bnx2x_gunzip_end(bp);
6703
6704         return rc;
6705 }
6706
6707 static void bnx2x_free_mem(struct bnx2x *bp)
6708 {
6709
6710 #define BNX2X_PCI_FREE(x, y, size) \
6711         do { \
6712                 if (x) { \
6713                         pci_free_consistent(bp->pdev, size, x, y); \
6714                         x = NULL; \
6715                         y = 0; \
6716                 } \
6717         } while (0)
6718
6719 #define BNX2X_FREE(x) \
6720         do { \
6721                 if (x) { \
6722                         vfree(x); \
6723                         x = NULL; \
6724                 } \
6725         } while (0)
6726
6727         int i;
6728
6729         /* fastpath */
6730         /* Common */
6731         for_each_queue(bp, i) {
6732
6733                 /* status blocks */
6734                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6735                                bnx2x_fp(bp, i, status_blk_mapping),
6736                                sizeof(struct host_status_block));
6737         }
6738         /* Rx */
6739         for_each_rx_queue(bp, i) {
6740
6741                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6742                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6743                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6744                                bnx2x_fp(bp, i, rx_desc_mapping),
6745                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
6746
6747                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6748                                bnx2x_fp(bp, i, rx_comp_mapping),
6749                                sizeof(struct eth_fast_path_rx_cqe) *
6750                                NUM_RCQ_BD);
6751
6752                 /* SGE ring */
6753                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6754                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6755                                bnx2x_fp(bp, i, rx_sge_mapping),
6756                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6757         }
6758         /* Tx */
6759         for_each_tx_queue(bp, i) {
6760
6761                 /* fastpath tx rings: tx_buf tx_desc */
6762                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6763                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6764                                bnx2x_fp(bp, i, tx_desc_mapping),
6765                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6766         }
6767         /* end of fastpath */
6768
6769         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6770                        sizeof(struct host_def_status_block));
6771
6772         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6773                        sizeof(struct bnx2x_slowpath));
6774
6775 #ifdef BCM_CNIC
6776         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6777         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6778         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6779         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6780         BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
6781                        sizeof(struct host_status_block));
6782 #endif
6783         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6784
6785 #undef BNX2X_PCI_FREE
6786 #undef BNX2X_KFREE
6787 }
6788
6789 static int bnx2x_alloc_mem(struct bnx2x *bp)
6790 {
6791
6792 #define BNX2X_PCI_ALLOC(x, y, size) \
6793         do { \
6794                 x = pci_alloc_consistent(bp->pdev, size, y); \
6795                 if (x == NULL) \
6796                         goto alloc_mem_err; \
6797                 memset(x, 0, size); \
6798         } while (0)
6799
6800 #define BNX2X_ALLOC(x, size) \
6801         do { \
6802                 x = vmalloc(size); \
6803                 if (x == NULL) \
6804                         goto alloc_mem_err; \
6805                 memset(x, 0, size); \
6806         } while (0)
6807
6808         int i;
6809
6810         /* fastpath */
6811         /* Common */
6812         for_each_queue(bp, i) {
6813                 bnx2x_fp(bp, i, bp) = bp;
6814
6815                 /* status blocks */
6816                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6817                                 &bnx2x_fp(bp, i, status_blk_mapping),
6818                                 sizeof(struct host_status_block));
6819         }
6820         /* Rx */
6821         for_each_rx_queue(bp, i) {
6822
6823                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6824                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6825                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6826                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6827                                 &bnx2x_fp(bp, i, rx_desc_mapping),
6828                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6829
6830                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6831                                 &bnx2x_fp(bp, i, rx_comp_mapping),
6832                                 sizeof(struct eth_fast_path_rx_cqe) *
6833                                 NUM_RCQ_BD);
6834
6835                 /* SGE ring */
6836                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6837                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6838                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6839                                 &bnx2x_fp(bp, i, rx_sge_mapping),
6840                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6841         }
6842         /* Tx */
6843         for_each_tx_queue(bp, i) {
6844
6845                 /* fastpath tx rings: tx_buf tx_desc */
6846                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6847                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6848                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6849                                 &bnx2x_fp(bp, i, tx_desc_mapping),
6850                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6851         }
6852         /* end of fastpath */
6853
6854         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6855                         sizeof(struct host_def_status_block));
6856
6857         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6858                         sizeof(struct bnx2x_slowpath));
6859
6860 #ifdef BCM_CNIC
6861         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6862
6863         /* allocate searcher T2 table
6864            we allocate 1/4 of alloc num for T2
6865           (which is not entered into the ILT) */
6866         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6867
6868         /* Initialize T2 (for 1024 connections) */
6869         for (i = 0; i < 16*1024; i += 64)
6870                 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6871
6872         /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
6873         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6874
6875         /* QM queues (128*MAX_CONN) */
6876         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6877
6878         BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
6879                         sizeof(struct host_status_block));
6880 #endif
6881
6882         /* Slow path ring */
6883         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6884
6885         return 0;
6886
6887 alloc_mem_err:
6888         bnx2x_free_mem(bp);
6889         return -ENOMEM;
6890
6891 #undef BNX2X_PCI_ALLOC
6892 #undef BNX2X_ALLOC
6893 }
6894
6895 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6896 {
6897         int i;
6898
6899         for_each_tx_queue(bp, i) {
6900                 struct bnx2x_fastpath *fp = &bp->fp[i];
6901
6902                 u16 bd_cons = fp->tx_bd_cons;
6903                 u16 sw_prod = fp->tx_pkt_prod;
6904                 u16 sw_cons = fp->tx_pkt_cons;
6905
6906                 while (sw_cons != sw_prod) {
6907                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6908                         sw_cons++;
6909                 }
6910         }
6911 }
6912
6913 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6914 {
6915         int i, j;
6916
6917         for_each_rx_queue(bp, j) {
6918                 struct bnx2x_fastpath *fp = &bp->fp[j];
6919
6920                 for (i = 0; i < NUM_RX_BD; i++) {
6921                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6922                         struct sk_buff *skb = rx_buf->skb;
6923
6924                         if (skb == NULL)
6925                                 continue;
6926
6927                         pci_unmap_single(bp->pdev,
6928                                          pci_unmap_addr(rx_buf, mapping),
6929                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6930
6931                         rx_buf->skb = NULL;
6932                         dev_kfree_skb(skb);
6933                 }
6934                 if (!fp->disable_tpa)
6935                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6936                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
6937                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
6938         }
6939 }
6940
6941 static void bnx2x_free_skbs(struct bnx2x *bp)
6942 {
6943         bnx2x_free_tx_skbs(bp);
6944         bnx2x_free_rx_skbs(bp);
6945 }
6946
6947 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6948 {
6949         int i, offset = 1;
6950
6951         free_irq(bp->msix_table[0].vector, bp->dev);
6952         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6953            bp->msix_table[0].vector);
6954
6955 #ifdef BCM_CNIC
6956         offset++;
6957 #endif
6958         for_each_queue(bp, i) {
6959                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
6960                    "state %x\n", i, bp->msix_table[i + offset].vector,
6961                    bnx2x_fp(bp, i, state));
6962
6963                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6964         }
6965 }
6966
6967 static void bnx2x_free_irq(struct bnx2x *bp)
6968 {
6969         if (bp->flags & USING_MSIX_FLAG) {
6970                 bnx2x_free_msix_irqs(bp);
6971                 pci_disable_msix(bp->pdev);
6972                 bp->flags &= ~USING_MSIX_FLAG;
6973
6974         } else if (bp->flags & USING_MSI_FLAG) {
6975                 free_irq(bp->pdev->irq, bp->dev);
6976                 pci_disable_msi(bp->pdev);
6977                 bp->flags &= ~USING_MSI_FLAG;
6978
6979         } else
6980                 free_irq(bp->pdev->irq, bp->dev);
6981 }
6982
6983 static int bnx2x_enable_msix(struct bnx2x *bp)
6984 {
6985         int i, rc, offset = 1;
6986         int igu_vec = 0;
6987
6988         bp->msix_table[0].entry = igu_vec;
6989         DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6990
6991 #ifdef BCM_CNIC
6992         igu_vec = BP_L_ID(bp) + offset;
6993         bp->msix_table[1].entry = igu_vec;
6994         DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
6995         offset++;
6996 #endif
6997         for_each_queue(bp, i) {
6998                 igu_vec = BP_L_ID(bp) + offset + i;
6999                 bp->msix_table[i + offset].entry = igu_vec;
7000                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
7001                    "(fastpath #%u)\n", i + offset, igu_vec, i);
7002         }
7003
7004         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
7005                              BNX2X_NUM_QUEUES(bp) + offset);
7006         if (rc) {
7007                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
7008                 return rc;
7009         }
7010
7011         bp->flags |= USING_MSIX_FLAG;
7012
7013         return 0;
7014 }
7015
7016 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7017 {
7018         int i, rc, offset = 1;
7019
7020         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7021                          bp->dev->name, bp->dev);
7022         if (rc) {
7023                 BNX2X_ERR("request sp irq failed\n");
7024                 return -EBUSY;
7025         }
7026
7027 #ifdef BCM_CNIC
7028         offset++;
7029 #endif
7030         for_each_queue(bp, i) {
7031                 struct bnx2x_fastpath *fp = &bp->fp[i];
7032
7033                 if (i < bp->num_rx_queues)
7034                         sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
7035                 else
7036                         sprintf(fp->name, "%s-tx-%d",
7037                                 bp->dev->name, i - bp->num_rx_queues);
7038
7039                 rc = request_irq(bp->msix_table[i + offset].vector,
7040                                  bnx2x_msix_fp_int, 0, fp->name, fp);
7041                 if (rc) {
7042                         BNX2X_ERR("request fp #%d irq failed  rc %d\n", i, rc);
7043                         bnx2x_free_msix_irqs(bp);
7044                         return -EBUSY;
7045                 }
7046
7047                 fp->state = BNX2X_FP_STATE_IRQ;
7048         }
7049
7050         i = BNX2X_NUM_QUEUES(bp);
7051         printk(KERN_INFO PFX "%s: using MSI-X  IRQs: sp %d  fp[%d] %d"
7052                " ... fp[%d] %d\n",
7053                bp->dev->name, bp->msix_table[0].vector,
7054                0, bp->msix_table[offset].vector,
7055                i - 1, bp->msix_table[offset + i - 1].vector);
7056
7057         return 0;
7058 }
7059
7060 static int bnx2x_enable_msi(struct bnx2x *bp)
7061 {
7062         int rc;
7063
7064         rc = pci_enable_msi(bp->pdev);
7065         if (rc) {
7066                 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7067                 return -1;
7068         }
7069         bp->flags |= USING_MSI_FLAG;
7070
7071         return 0;
7072 }
7073
7074 static int bnx2x_req_irq(struct bnx2x *bp)
7075 {
7076         unsigned long flags;
7077         int rc;
7078
7079         if (bp->flags & USING_MSI_FLAG)
7080                 flags = 0;
7081         else
7082                 flags = IRQF_SHARED;
7083
7084         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
7085                          bp->dev->name, bp->dev);
7086         if (!rc)
7087                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7088
7089         return rc;
7090 }
7091
7092 static void bnx2x_napi_enable(struct bnx2x *bp)
7093 {
7094         int i;
7095
7096         for_each_rx_queue(bp, i)
7097                 napi_enable(&bnx2x_fp(bp, i, napi));
7098 }
7099
7100 static void bnx2x_napi_disable(struct bnx2x *bp)
7101 {
7102         int i;
7103
7104         for_each_rx_queue(bp, i)
7105                 napi_disable(&bnx2x_fp(bp, i, napi));
7106 }
7107
7108 static void bnx2x_netif_start(struct bnx2x *bp)
7109 {
7110         int intr_sem;
7111
7112         intr_sem = atomic_dec_and_test(&bp->intr_sem);
7113         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7114
7115         if (intr_sem) {
7116                 if (netif_running(bp->dev)) {
7117                         bnx2x_napi_enable(bp);
7118                         bnx2x_int_enable(bp);
7119                         if (bp->state == BNX2X_STATE_OPEN)
7120                                 netif_tx_wake_all_queues(bp->dev);
7121                 }
7122         }
7123 }
7124
7125 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
7126 {
7127         bnx2x_int_disable_sync(bp, disable_hw);
7128         bnx2x_napi_disable(bp);
7129         netif_tx_disable(bp->dev);
7130         bp->dev->trans_start = jiffies; /* prevent tx timeout */
7131 }
7132
7133 /*
7134  * Init service functions
7135  */
7136
7137 /**
7138  * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7139  *
7140  * @param bp driver descriptor
7141  * @param set set or clear an entry (1 or 0)
7142  * @param mac pointer to a buffer containing a MAC
7143  * @param cl_bit_vec bit vector of clients to register a MAC for
7144  * @param cam_offset offset in a CAM to use
7145  * @param with_bcast set broadcast MAC as well
7146  */
7147 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7148                                       u32 cl_bit_vec, u8 cam_offset,
7149                                       u8 with_bcast)
7150 {
7151         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
7152         int port = BP_PORT(bp);
7153
7154         /* CAM allocation
7155          * unicasts 0-31:port0 32-63:port1
7156          * multicast 64-127:port0 128-191:port1
7157          */
7158         config->hdr.length = 1 + (with_bcast ? 1 : 0);
7159         config->hdr.offset = cam_offset;
7160         config->hdr.client_id = 0xff;
7161         config->hdr.reserved1 = 0;
7162
7163         /* primary MAC */
7164         config->config_table[0].cam_entry.msb_mac_addr =
7165                                         swab16(*(u16 *)&mac[0]);
7166         config->config_table[0].cam_entry.middle_mac_addr =
7167                                         swab16(*(u16 *)&mac[2]);
7168         config->config_table[0].cam_entry.lsb_mac_addr =
7169                                         swab16(*(u16 *)&mac[4]);
7170         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
7171         if (set)
7172                 config->config_table[0].target_table_entry.flags = 0;
7173         else
7174                 CAM_INVALIDATE(config->config_table[0]);
7175         config->config_table[0].target_table_entry.clients_bit_vector =
7176                                                 cpu_to_le32(cl_bit_vec);
7177         config->config_table[0].target_table_entry.vlan_id = 0;
7178
7179         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7180            (set ? "setting" : "clearing"),
7181            config->config_table[0].cam_entry.msb_mac_addr,
7182            config->config_table[0].cam_entry.middle_mac_addr,
7183            config->config_table[0].cam_entry.lsb_mac_addr);
7184
7185         /* broadcast */
7186         if (with_bcast) {
7187                 config->config_table[1].cam_entry.msb_mac_addr =
7188                         cpu_to_le16(0xffff);
7189                 config->config_table[1].cam_entry.middle_mac_addr =
7190                         cpu_to_le16(0xffff);
7191                 config->config_table[1].cam_entry.lsb_mac_addr =
7192                         cpu_to_le16(0xffff);
7193                 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7194                 if (set)
7195                         config->config_table[1].target_table_entry.flags =
7196                                         TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7197                 else
7198                         CAM_INVALIDATE(config->config_table[1]);
7199                 config->config_table[1].target_table_entry.clients_bit_vector =
7200                                                         cpu_to_le32(cl_bit_vec);
7201                 config->config_table[1].target_table_entry.vlan_id = 0;
7202         }
7203
7204         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7205                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7206                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7207 }
7208
7209 /**
7210  * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7211  *
7212  * @param bp driver descriptor
7213  * @param set set or clear an entry (1 or 0)
7214  * @param mac pointer to a buffer containing a MAC
7215  * @param cl_bit_vec bit vector of clients to register a MAC for
7216  * @param cam_offset offset in a CAM to use
7217  */
7218 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7219                                        u32 cl_bit_vec, u8 cam_offset)
7220 {
7221         struct mac_configuration_cmd_e1h *config =
7222                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7223
7224         config->hdr.length = 1;
7225         config->hdr.offset = cam_offset;
7226         config->hdr.client_id = 0xff;
7227         config->hdr.reserved1 = 0;
7228
7229         /* primary MAC */
7230         config->config_table[0].msb_mac_addr =
7231                                         swab16(*(u16 *)&mac[0]);
7232         config->config_table[0].middle_mac_addr =
7233                                         swab16(*(u16 *)&mac[2]);
7234         config->config_table[0].lsb_mac_addr =
7235                                         swab16(*(u16 *)&mac[4]);
7236         config->config_table[0].clients_bit_vector =
7237                                         cpu_to_le32(cl_bit_vec);
7238         config->config_table[0].vlan_id = 0;
7239         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
7240         if (set)
7241                 config->config_table[0].flags = BP_PORT(bp);
7242         else
7243                 config->config_table[0].flags =
7244                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
7245
7246         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID mask %d\n",
7247            (set ? "setting" : "clearing"),
7248            config->config_table[0].msb_mac_addr,
7249            config->config_table[0].middle_mac_addr,
7250            config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
7251
7252         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7253                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7254                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7255 }
7256
7257 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7258                              int *state_p, int poll)
7259 {
7260         /* can take a while if any port is running */
7261         int cnt = 5000;
7262
7263         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7264            poll ? "polling" : "waiting", state, idx);
7265
7266         might_sleep();
7267         while (cnt--) {
7268                 if (poll) {
7269                         bnx2x_rx_int(bp->fp, 10);
7270                         /* if index is different from 0
7271                          * the reply for some commands will
7272                          * be on the non default queue
7273                          */
7274                         if (idx)
7275                                 bnx2x_rx_int(&bp->fp[idx], 10);
7276                 }
7277
7278                 mb(); /* state is changed by bnx2x_sp_event() */
7279                 if (*state_p == state) {
7280 #ifdef BNX2X_STOP_ON_ERROR
7281                         DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
7282 #endif
7283                         return 0;
7284                 }
7285
7286                 msleep(1);
7287
7288                 if (bp->panic)
7289                         return -EIO;
7290         }
7291
7292         /* timeout! */
7293         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7294                   poll ? "polling" : "waiting", state, idx);
7295 #ifdef BNX2X_STOP_ON_ERROR
7296         bnx2x_panic();
7297 #endif
7298
7299         return -EBUSY;
7300 }
7301
7302 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7303 {
7304         bp->set_mac_pending++;
7305         smp_wmb();
7306
7307         bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7308                                    (1 << bp->fp->cl_id), BP_FUNC(bp));
7309
7310         /* Wait for a completion */
7311         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7312 }
7313
7314 static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7315 {
7316         bp->set_mac_pending++;
7317         smp_wmb();
7318
7319         bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7320                                   (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7321                                   1);
7322
7323         /* Wait for a completion */
7324         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7325 }
7326
7327 #ifdef BCM_CNIC
7328 /**
7329  * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7330  * MAC(s). This function will wait until the ramdord completion
7331  * returns.
7332  *
7333  * @param bp driver handle
7334  * @param set set or clear the CAM entry
7335  *
7336  * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7337  */
7338 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7339 {
7340         u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7341
7342         bp->set_mac_pending++;
7343         smp_wmb();
7344
7345         /* Send a SET_MAC ramrod */
7346         if (CHIP_IS_E1(bp))
7347                 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7348                                   cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7349                                   1);
7350         else
7351                 /* CAM allocation for E1H
7352                 * unicasts: by func number
7353                 * multicast: 20+FUNC*20, 20 each
7354                 */
7355                 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7356                                    cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7357
7358         /* Wait for a completion when setting */
7359         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7360
7361         return 0;
7362 }
7363 #endif
7364
7365 static int bnx2x_setup_leading(struct bnx2x *bp)
7366 {
7367         int rc;
7368
7369         /* reset IGU state */
7370         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7371
7372         /* SETUP ramrod */
7373         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7374
7375         /* Wait for completion */
7376         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7377
7378         return rc;
7379 }
7380
7381 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7382 {
7383         struct bnx2x_fastpath *fp = &bp->fp[index];
7384
7385         /* reset IGU state */
7386         bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7387
7388         /* SETUP ramrod */
7389         fp->state = BNX2X_FP_STATE_OPENING;
7390         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7391                       fp->cl_id, 0);
7392
7393         /* Wait for completion */
7394         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7395                                  &(fp->state), 0);
7396 }
7397
7398 static int bnx2x_poll(struct napi_struct *napi, int budget);
7399
7400 static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out,
7401                                     int *num_tx_queues_out)
7402 {
7403         int _num_rx_queues = 0, _num_tx_queues = 0;
7404
7405         switch (bp->multi_mode) {
7406         case ETH_RSS_MODE_DISABLED:
7407                 _num_rx_queues = 1;
7408                 _num_tx_queues = 1;
7409                 break;
7410
7411         case ETH_RSS_MODE_REGULAR:
7412                 if (num_rx_queues)
7413                         _num_rx_queues = min_t(u32, num_rx_queues,
7414                                                BNX2X_MAX_QUEUES(bp));
7415                 else
7416                         _num_rx_queues = min_t(u32, num_online_cpus(),
7417                                                BNX2X_MAX_QUEUES(bp));
7418
7419                 if (num_tx_queues)
7420                         _num_tx_queues = min_t(u32, num_tx_queues,
7421                                                BNX2X_MAX_QUEUES(bp));
7422                 else
7423                         _num_tx_queues = min_t(u32, num_online_cpus(),
7424                                                BNX2X_MAX_QUEUES(bp));
7425
7426                 /* There must be not more Tx queues than Rx queues */
7427                 if (_num_tx_queues > _num_rx_queues) {
7428                         BNX2X_ERR("number of tx queues (%d) > "
7429                                   "number of rx queues (%d)"
7430                                   "  defaulting to %d\n",
7431                                   _num_tx_queues, _num_rx_queues,
7432                                   _num_rx_queues);
7433                         _num_tx_queues = _num_rx_queues;
7434                 }
7435                 break;
7436
7437
7438         default:
7439                 _num_rx_queues = 1;
7440                 _num_tx_queues = 1;
7441                 break;
7442         }
7443
7444         *num_rx_queues_out = _num_rx_queues;
7445         *num_tx_queues_out = _num_tx_queues;
7446 }
7447
7448 static int bnx2x_set_int_mode(struct bnx2x *bp)
7449 {
7450         int rc = 0;
7451
7452         switch (int_mode) {
7453         case INT_MODE_INTx:
7454         case INT_MODE_MSI:
7455                 bp->num_rx_queues = 1;
7456                 bp->num_tx_queues = 1;
7457                 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7458                 break;
7459
7460         case INT_MODE_MSIX:
7461         default:
7462                 /* Set interrupt mode according to bp->multi_mode value */
7463                 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues,
7464                                         &bp->num_tx_queues);
7465
7466                 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n",
7467                    bp->num_rx_queues, bp->num_tx_queues);
7468
7469                 /* if we can't use MSI-X we only need one fp,
7470                  * so try to enable MSI-X with the requested number of fp's
7471                  * and fallback to MSI or legacy INTx with one fp
7472                  */
7473                 rc = bnx2x_enable_msix(bp);
7474                 if (rc) {
7475                         /* failed to enable MSI-X */
7476                         if (bp->multi_mode)
7477                                 BNX2X_ERR("Multi requested but failed to "
7478                                           "enable MSI-X (rx %d tx %d), "
7479                                           "set number of queues to 1\n",
7480                                           bp->num_rx_queues, bp->num_tx_queues);
7481                         bp->num_rx_queues = 1;
7482                         bp->num_tx_queues = 1;
7483                 }
7484                 break;
7485         }
7486         bp->dev->real_num_tx_queues = bp->num_tx_queues;
7487         return rc;
7488 }
7489
7490 #ifdef BCM_CNIC
7491 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7492 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7493 #endif
7494
7495 /* must be called with rtnl_lock */
7496 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7497 {
7498         u32 load_code;
7499         int i, rc;
7500
7501 #ifdef BNX2X_STOP_ON_ERROR
7502         if (unlikely(bp->panic))
7503                 return -EPERM;
7504 #endif
7505
7506         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7507
7508         rc = bnx2x_set_int_mode(bp);
7509
7510         if (bnx2x_alloc_mem(bp))
7511                 return -ENOMEM;
7512
7513         for_each_rx_queue(bp, i)
7514                 bnx2x_fp(bp, i, disable_tpa) =
7515                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
7516
7517         for_each_rx_queue(bp, i)
7518                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7519                                bnx2x_poll, 128);
7520
7521         bnx2x_napi_enable(bp);
7522
7523         if (bp->flags & USING_MSIX_FLAG) {
7524                 rc = bnx2x_req_msix_irqs(bp);
7525                 if (rc) {
7526                         pci_disable_msix(bp->pdev);
7527                         goto load_error1;
7528                 }
7529         } else {
7530                 /* Fall to INTx if failed to enable MSI-X due to lack of
7531                    memory (in bnx2x_set_int_mode()) */
7532                 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7533                         bnx2x_enable_msi(bp);
7534                 bnx2x_ack_int(bp);
7535                 rc = bnx2x_req_irq(bp);
7536                 if (rc) {
7537                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
7538                         if (bp->flags & USING_MSI_FLAG)
7539                                 pci_disable_msi(bp->pdev);
7540                         goto load_error1;
7541                 }
7542                 if (bp->flags & USING_MSI_FLAG) {
7543                         bp->dev->irq = bp->pdev->irq;
7544                         printk(KERN_INFO PFX "%s: using MSI  IRQ %d\n",
7545                                bp->dev->name, bp->pdev->irq);
7546                 }
7547         }
7548
7549         /* Send LOAD_REQUEST command to MCP
7550            Returns the type of LOAD command:
7551            if it is the first port to be initialized
7552            common blocks should be initialized, otherwise - not
7553         */
7554         if (!BP_NOMCP(bp)) {
7555                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7556                 if (!load_code) {
7557                         BNX2X_ERR("MCP response failure, aborting\n");
7558                         rc = -EBUSY;
7559                         goto load_error2;
7560                 }
7561                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7562                         rc = -EBUSY; /* other port in diagnostic mode */
7563                         goto load_error2;
7564                 }
7565
7566         } else {
7567                 int port = BP_PORT(bp);
7568
7569                 DP(NETIF_MSG_IFUP, "NO MCP - load counts      %d, %d, %d\n",
7570                    load_count[0], load_count[1], load_count[2]);
7571                 load_count[0]++;
7572                 load_count[1 + port]++;
7573                 DP(NETIF_MSG_IFUP, "NO MCP - new load counts  %d, %d, %d\n",
7574                    load_count[0], load_count[1], load_count[2]);
7575                 if (load_count[0] == 1)
7576                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7577                 else if (load_count[1 + port] == 1)
7578                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7579                 else
7580                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7581         }
7582
7583         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7584             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7585                 bp->port.pmf = 1;
7586         else
7587                 bp->port.pmf = 0;
7588         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7589
7590         /* Initialize HW */
7591         rc = bnx2x_init_hw(bp, load_code);
7592         if (rc) {
7593                 BNX2X_ERR("HW init failed, aborting\n");
7594                 goto load_error2;
7595         }
7596
7597         /* Setup NIC internals and enable interrupts */
7598         bnx2x_nic_init(bp, load_code);
7599
7600         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7601             (bp->common.shmem2_base))
7602                 SHMEM2_WR(bp, dcc_support,
7603                           (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7604                            SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7605
7606         /* Send LOAD_DONE command to MCP */
7607         if (!BP_NOMCP(bp)) {
7608                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7609                 if (!load_code) {
7610                         BNX2X_ERR("MCP response failure, aborting\n");
7611                         rc = -EBUSY;
7612                         goto load_error3;
7613                 }
7614         }
7615
7616         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7617
7618         rc = bnx2x_setup_leading(bp);
7619         if (rc) {
7620                 BNX2X_ERR("Setup leading failed!\n");
7621 #ifndef BNX2X_STOP_ON_ERROR
7622                 goto load_error3;
7623 #else
7624                 bp->panic = 1;
7625                 return -EBUSY;
7626 #endif
7627         }
7628
7629         if (CHIP_IS_E1H(bp))
7630                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
7631                         DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
7632                         bp->state = BNX2X_STATE_DISABLED;
7633                 }
7634
7635         if (bp->state == BNX2X_STATE_OPEN) {
7636 #ifdef BCM_CNIC
7637                 /* Enable Timer scan */
7638                 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
7639 #endif
7640                 for_each_nondefault_queue(bp, i) {
7641                         rc = bnx2x_setup_multi(bp, i);
7642                         if (rc)
7643 #ifdef BCM_CNIC
7644                                 goto load_error4;
7645 #else
7646                                 goto load_error3;
7647 #endif
7648                 }
7649
7650                 if (CHIP_IS_E1(bp))
7651                         bnx2x_set_eth_mac_addr_e1(bp, 1);
7652                 else
7653                         bnx2x_set_eth_mac_addr_e1h(bp, 1);
7654 #ifdef BCM_CNIC
7655                 /* Set iSCSI L2 MAC */
7656                 mutex_lock(&bp->cnic_mutex);
7657                 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
7658                         bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7659                         bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7660                 }
7661                 mutex_unlock(&bp->cnic_mutex);
7662 #endif
7663         }
7664
7665         if (bp->port.pmf)
7666                 bnx2x_initial_phy_init(bp, load_mode);
7667
7668         /* Start fast path */
7669         switch (load_mode) {
7670         case LOAD_NORMAL:
7671                 if (bp->state == BNX2X_STATE_OPEN) {
7672                         /* Tx queue should be only reenabled */
7673                         netif_tx_wake_all_queues(bp->dev);
7674                 }
7675                 /* Initialize the receive filter. */
7676                 bnx2x_set_rx_mode(bp->dev);
7677                 break;
7678
7679         case LOAD_OPEN:
7680                 netif_tx_start_all_queues(bp->dev);
7681                 if (bp->state != BNX2X_STATE_OPEN)
7682                         netif_tx_disable(bp->dev);
7683                 /* Initialize the receive filter. */
7684                 bnx2x_set_rx_mode(bp->dev);
7685                 break;
7686
7687         case LOAD_DIAG:
7688                 /* Initialize the receive filter. */
7689                 bnx2x_set_rx_mode(bp->dev);
7690                 bp->state = BNX2X_STATE_DIAG;
7691                 break;
7692
7693         default:
7694                 break;
7695         }
7696
7697         if (!bp->port.pmf)
7698                 bnx2x__link_status_update(bp);
7699
7700         /* start the timer */
7701         mod_timer(&bp->timer, jiffies + bp->current_interval);
7702
7703 #ifdef BCM_CNIC
7704         bnx2x_setup_cnic_irq_info(bp);
7705         if (bp->state == BNX2X_STATE_OPEN)
7706                 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
7707 #endif
7708
7709         return 0;
7710
7711 #ifdef BCM_CNIC
7712 load_error4:
7713         /* Disable Timer scan */
7714         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
7715 #endif
7716 load_error3:
7717         bnx2x_int_disable_sync(bp, 1);
7718         if (!BP_NOMCP(bp)) {
7719                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7720                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7721         }
7722         bp->port.pmf = 0;
7723         /* Free SKBs, SGEs, TPA pool and driver internals */
7724         bnx2x_free_skbs(bp);
7725         for_each_rx_queue(bp, i)
7726                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7727 load_error2:
7728         /* Release IRQs */
7729         bnx2x_free_irq(bp);
7730 load_error1:
7731         bnx2x_napi_disable(bp);
7732         for_each_rx_queue(bp, i)
7733                 netif_napi_del(&bnx2x_fp(bp, i, napi));
7734         bnx2x_free_mem(bp);
7735
7736         return rc;
7737 }
7738
7739 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7740 {
7741         struct bnx2x_fastpath *fp = &bp->fp[index];
7742         int rc;
7743
7744         /* halt the connection */
7745         fp->state = BNX2X_FP_STATE_HALTING;
7746         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7747
7748         /* Wait for completion */
7749         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7750                                &(fp->state), 1);
7751         if (rc) /* timeout */
7752                 return rc;
7753
7754         /* delete cfc entry */
7755         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7756
7757         /* Wait for completion */
7758         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7759                                &(fp->state), 1);
7760         return rc;
7761 }
7762
7763 static int bnx2x_stop_leading(struct bnx2x *bp)
7764 {
7765         __le16 dsb_sp_prod_idx;
7766         /* if the other port is handling traffic,
7767            this can take a lot of time */
7768         int cnt = 500;
7769         int rc;
7770
7771         might_sleep();
7772
7773         /* Send HALT ramrod */
7774         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7775         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7776
7777         /* Wait for completion */
7778         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7779                                &(bp->fp[0].state), 1);
7780         if (rc) /* timeout */
7781                 return rc;
7782
7783         dsb_sp_prod_idx = *bp->dsb_sp_prod;
7784
7785         /* Send PORT_DELETE ramrod */
7786         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7787
7788         /* Wait for completion to arrive on default status block
7789            we are going to reset the chip anyway
7790            so there is not much to do if this times out
7791          */
7792         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7793                 if (!cnt) {
7794                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7795                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7796                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
7797 #ifdef BNX2X_STOP_ON_ERROR
7798                         bnx2x_panic();
7799 #endif
7800                         rc = -EBUSY;
7801                         break;
7802                 }
7803                 cnt--;
7804                 msleep(1);
7805                 rmb(); /* Refresh the dsb_sp_prod */
7806         }
7807         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7808         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7809
7810         return rc;
7811 }
7812
7813 static void bnx2x_reset_func(struct bnx2x *bp)
7814 {
7815         int port = BP_PORT(bp);
7816         int func = BP_FUNC(bp);
7817         int base, i;
7818
7819         /* Configure IGU */
7820         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7821         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7822
7823 #ifdef BCM_CNIC
7824         /* Disable Timer scan */
7825         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
7826         /*
7827          * Wait for at least 10ms and up to 2 second for the timers scan to
7828          * complete
7829          */
7830         for (i = 0; i < 200; i++) {
7831                 msleep(10);
7832                 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7833                         break;
7834         }
7835 #endif
7836         /* Clear ILT */
7837         base = FUNC_ILT_BASE(func);
7838         for (i = base; i < base + ILT_PER_FUNC; i++)
7839                 bnx2x_ilt_wr(bp, i, 0);
7840 }
7841
7842 static void bnx2x_reset_port(struct bnx2x *bp)
7843 {
7844         int port = BP_PORT(bp);
7845         u32 val;
7846
7847         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7848
7849         /* Do not rcv packets to BRB */
7850         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7851         /* Do not direct rcv packets that are not for MCP to the BRB */
7852         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7853                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7854
7855         /* Configure AEU */
7856         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7857
7858         msleep(100);
7859         /* Check for BRB port occupancy */
7860         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7861         if (val)
7862                 DP(NETIF_MSG_IFDOWN,
7863                    "BRB1 is not empty  %d blocks are occupied\n", val);
7864
7865         /* TODO: Close Doorbell port? */
7866 }
7867
7868 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7869 {
7870         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
7871            BP_FUNC(bp), reset_code);
7872
7873         switch (reset_code) {
7874         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7875                 bnx2x_reset_port(bp);
7876                 bnx2x_reset_func(bp);
7877                 bnx2x_reset_common(bp);
7878                 break;
7879
7880         case FW_MSG_CODE_DRV_UNLOAD_PORT:
7881                 bnx2x_reset_port(bp);
7882                 bnx2x_reset_func(bp);
7883                 break;
7884
7885         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7886                 bnx2x_reset_func(bp);
7887                 break;
7888
7889         default:
7890                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7891                 break;
7892         }
7893 }
7894
7895 /* must be called with rtnl_lock */
7896 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7897 {
7898         int port = BP_PORT(bp);
7899         u32 reset_code = 0;
7900         int i, cnt, rc;
7901
7902 #ifdef BCM_CNIC
7903         bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
7904 #endif
7905         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7906
7907         /* Set "drop all" */
7908         bp->rx_mode = BNX2X_RX_MODE_NONE;
7909         bnx2x_set_storm_rx_mode(bp);
7910
7911         /* Disable HW interrupts, NAPI and Tx */
7912         bnx2x_netif_stop(bp, 1);
7913
7914         del_timer_sync(&bp->timer);
7915         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7916                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7917         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7918
7919         /* Release IRQs */
7920         bnx2x_free_irq(bp);
7921
7922         /* Wait until tx fastpath tasks complete */
7923         for_each_tx_queue(bp, i) {
7924                 struct bnx2x_fastpath *fp = &bp->fp[i];
7925
7926                 cnt = 1000;
7927                 while (bnx2x_has_tx_work_unload(fp)) {
7928
7929                         bnx2x_tx_int(fp);
7930                         if (!cnt) {
7931                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
7932                                           i);
7933 #ifdef BNX2X_STOP_ON_ERROR
7934                                 bnx2x_panic();
7935                                 return -EBUSY;
7936 #else
7937                                 break;
7938 #endif
7939                         }
7940                         cnt--;
7941                         msleep(1);
7942                 }
7943         }
7944         /* Give HW time to discard old tx messages */
7945         msleep(1);
7946
7947         if (CHIP_IS_E1(bp)) {
7948                 struct mac_configuration_cmd *config =
7949                                                 bnx2x_sp(bp, mcast_config);
7950
7951                 bnx2x_set_eth_mac_addr_e1(bp, 0);
7952
7953                 for (i = 0; i < config->hdr.length; i++)
7954                         CAM_INVALIDATE(config->config_table[i]);
7955
7956                 config->hdr.length = i;
7957                 if (CHIP_REV_IS_SLOW(bp))
7958                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7959                 else
7960                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7961                 config->hdr.client_id = bp->fp->cl_id;
7962                 config->hdr.reserved1 = 0;
7963
7964                 bp->set_mac_pending++;
7965                 smp_wmb();
7966
7967                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7968                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7969                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7970
7971         } else { /* E1H */
7972                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7973
7974                 bnx2x_set_eth_mac_addr_e1h(bp, 0);
7975
7976                 for (i = 0; i < MC_HASH_SIZE; i++)
7977                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7978
7979                 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7980         }
7981 #ifdef BCM_CNIC
7982         /* Clear iSCSI L2 MAC */
7983         mutex_lock(&bp->cnic_mutex);
7984         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7985                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7986                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7987         }
7988         mutex_unlock(&bp->cnic_mutex);
7989 #endif
7990
7991         if (unload_mode == UNLOAD_NORMAL)
7992                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7993
7994         else if (bp->flags & NO_WOL_FLAG)
7995                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7996
7997         else if (bp->wol) {
7998                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7999                 u8 *mac_addr = bp->dev->dev_addr;
8000                 u32 val;
8001                 /* The mac address is written to entries 1-4 to
8002                    preserve entry 0 which is used by the PMF */
8003                 u8 entry = (BP_E1HVN(bp) + 1)*8;
8004
8005                 val = (mac_addr[0] << 8) | mac_addr[1];
8006                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
8007
8008                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8009                       (mac_addr[4] << 8) | mac_addr[5];
8010                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
8011
8012                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
8013
8014         } else
8015                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8016
8017         /* Close multi and leading connections
8018            Completions for ramrods are collected in a synchronous way */
8019         for_each_nondefault_queue(bp, i)
8020                 if (bnx2x_stop_multi(bp, i))
8021                         goto unload_error;
8022
8023         rc = bnx2x_stop_leading(bp);
8024         if (rc) {
8025                 BNX2X_ERR("Stop leading failed!\n");
8026 #ifdef BNX2X_STOP_ON_ERROR
8027                 return -EBUSY;
8028 #else
8029                 goto unload_error;
8030 #endif
8031         }
8032
8033 unload_error:
8034         if (!BP_NOMCP(bp))
8035                 reset_code = bnx2x_fw_command(bp, reset_code);
8036         else {
8037                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts      %d, %d, %d\n",
8038                    load_count[0], load_count[1], load_count[2]);
8039                 load_count[0]--;
8040                 load_count[1 + port]--;
8041                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts  %d, %d, %d\n",
8042                    load_count[0], load_count[1], load_count[2]);
8043                 if (load_count[0] == 0)
8044                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
8045                 else if (load_count[1 + port] == 0)
8046                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8047                 else
8048                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8049         }
8050
8051         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
8052             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
8053                 bnx2x__link_reset(bp);
8054
8055         /* Reset the chip */
8056         bnx2x_reset_chip(bp, reset_code);
8057
8058         /* Report UNLOAD_DONE to MCP */
8059         if (!BP_NOMCP(bp))
8060                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8061
8062         bp->port.pmf = 0;
8063
8064         /* Free SKBs, SGEs, TPA pool and driver internals */
8065         bnx2x_free_skbs(bp);
8066         for_each_rx_queue(bp, i)
8067                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8068         for_each_rx_queue(bp, i)
8069                 netif_napi_del(&bnx2x_fp(bp, i, napi));
8070         bnx2x_free_mem(bp);
8071
8072         bp->state = BNX2X_STATE_CLOSED;
8073
8074         netif_carrier_off(bp->dev);
8075
8076         return 0;
8077 }
8078
8079 static void bnx2x_reset_task(struct work_struct *work)
8080 {
8081         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
8082
8083 #ifdef BNX2X_STOP_ON_ERROR
8084         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8085                   " so reset not done to allow debug dump,\n"
8086                   " you will need to reboot when done\n");
8087         return;
8088 #endif
8089
8090         rtnl_lock();
8091
8092         if (!netif_running(bp->dev))
8093                 goto reset_task_exit;
8094
8095         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8096         bnx2x_nic_load(bp, LOAD_NORMAL);
8097
8098 reset_task_exit:
8099         rtnl_unlock();
8100 }
8101
8102 /* end of nic load/unload */
8103
8104 /* ethtool_ops */
8105
8106 /*
8107  * Init service functions
8108  */
8109
8110 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8111 {
8112         switch (func) {
8113         case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8114         case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8115         case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8116         case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
8117         case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
8118         case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
8119         case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
8120         case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
8121         default:
8122                 BNX2X_ERR("Unsupported function index: %d\n", func);
8123                 return (u32)(-1);
8124         }
8125 }
8126
8127 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
8128 {
8129         u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
8130
8131         /* Flush all outstanding writes */
8132         mmiowb();
8133
8134         /* Pretend to be function 0 */
8135         REG_WR(bp, reg, 0);
8136         /* Flush the GRC transaction (in the chip) */
8137         new_val = REG_RD(bp, reg);
8138         if (new_val != 0) {
8139                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
8140                           new_val);
8141                 BUG();
8142         }
8143
8144         /* From now we are in the "like-E1" mode */
8145         bnx2x_int_disable(bp);
8146
8147         /* Flush all outstanding writes */
8148         mmiowb();
8149
8150         /* Restore the original funtion settings */
8151         REG_WR(bp, reg, orig_func);
8152         new_val = REG_RD(bp, reg);
8153         if (new_val != orig_func) {
8154                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
8155                           orig_func, new_val);
8156                 BUG();
8157         }
8158 }
8159
8160 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
8161 {
8162         if (CHIP_IS_E1H(bp))
8163                 bnx2x_undi_int_disable_e1h(bp, func);
8164         else
8165                 bnx2x_int_disable(bp);
8166 }
8167
8168 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8169 {
8170         u32 val;
8171
8172         /* Check if there is any driver already loaded */
8173         val = REG_RD(bp, MISC_REG_UNPREPARED);
8174         if (val == 0x1) {
8175                 /* Check if it is the UNDI driver
8176                  * UNDI driver initializes CID offset for normal bell to 0x7
8177                  */
8178                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8179                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8180                 if (val == 0x7) {
8181                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8182                         /* save our func */
8183                         int func = BP_FUNC(bp);
8184                         u32 swap_en;
8185                         u32 swap_val;
8186
8187                         /* clear the UNDI indication */
8188                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
8189
8190                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
8191
8192                         /* try unload UNDI on port 0 */
8193                         bp->func = 0;
8194                         bp->fw_seq =
8195                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8196                                 DRV_MSG_SEQ_NUMBER_MASK);
8197                         reset_code = bnx2x_fw_command(bp, reset_code);
8198
8199                         /* if UNDI is loaded on the other port */
8200                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
8201
8202                                 /* send "DONE" for previous unload */
8203                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8204
8205                                 /* unload UNDI on port 1 */
8206                                 bp->func = 1;
8207                                 bp->fw_seq =
8208                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8209                                         DRV_MSG_SEQ_NUMBER_MASK);
8210                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8211
8212                                 bnx2x_fw_command(bp, reset_code);
8213                         }
8214
8215                         /* now it's safe to release the lock */
8216                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8217
8218                         bnx2x_undi_int_disable(bp, func);
8219
8220                         /* close input traffic and wait for it */
8221                         /* Do not rcv packets to BRB */
8222                         REG_WR(bp,
8223                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
8224                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
8225                         /* Do not direct rcv packets that are not for MCP to
8226                          * the BRB */
8227                         REG_WR(bp,
8228                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
8229                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8230                         /* clear AEU */
8231                         REG_WR(bp,
8232                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8233                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
8234                         msleep(10);
8235
8236                         /* save NIG port swap info */
8237                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
8238                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
8239                         /* reset device */
8240                         REG_WR(bp,
8241                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8242                                0xd3ffffff);
8243                         REG_WR(bp,
8244                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8245                                0x1403);
8246                         /* take the NIG out of reset and restore swap values */
8247                         REG_WR(bp,
8248                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
8249                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
8250                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
8251                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
8252
8253                         /* send unload done to the MCP */
8254                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8255
8256                         /* restore our func and fw_seq */
8257                         bp->func = func;
8258                         bp->fw_seq =
8259                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8260                                 DRV_MSG_SEQ_NUMBER_MASK);
8261
8262                 } else
8263                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8264         }
8265 }
8266
8267 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8268 {
8269         u32 val, val2, val3, val4, id;
8270         u16 pmc;
8271
8272         /* Get the chip revision id and number. */
8273         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
8274         val = REG_RD(bp, MISC_REG_CHIP_NUM);
8275         id = ((val & 0xffff) << 16);
8276         val = REG_RD(bp, MISC_REG_CHIP_REV);
8277         id |= ((val & 0xf) << 12);
8278         val = REG_RD(bp, MISC_REG_CHIP_METAL);
8279         id |= ((val & 0xff) << 4);
8280         val = REG_RD(bp, MISC_REG_BOND_ID);
8281         id |= (val & 0xf);
8282         bp->common.chip_id = id;
8283         bp->link_params.chip_id = bp->common.chip_id;
8284         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
8285
8286         val = (REG_RD(bp, 0x2874) & 0x55);
8287         if ((bp->common.chip_id & 0x1) ||
8288             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
8289                 bp->flags |= ONE_PORT_FLAG;
8290                 BNX2X_DEV_INFO("single port device\n");
8291         }
8292
8293         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8294         bp->common.flash_size = (NVRAM_1MB_SIZE <<
8295                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
8296         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8297                        bp->common.flash_size, bp->common.flash_size);
8298
8299         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8300         bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
8301         bp->link_params.shmem_base = bp->common.shmem_base;
8302         BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
8303                        bp->common.shmem_base, bp->common.shmem2_base);
8304
8305         if (!bp->common.shmem_base ||
8306             (bp->common.shmem_base < 0xA0000) ||
8307             (bp->common.shmem_base >= 0xC0000)) {
8308                 BNX2X_DEV_INFO("MCP not active\n");
8309                 bp->flags |= NO_MCP_FLAG;
8310                 return;
8311         }
8312
8313         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8314         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8315                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8316                 BNX2X_ERR("BAD MCP validity signature\n");
8317
8318         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
8319         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
8320
8321         bp->link_params.hw_led_mode = ((bp->common.hw_config &
8322                                         SHARED_HW_CFG_LED_MODE_MASK) >>
8323                                        SHARED_HW_CFG_LED_MODE_SHIFT);
8324
8325         bp->link_params.feature_config_flags = 0;
8326         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8327         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8328                 bp->link_params.feature_config_flags |=
8329                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8330         else
8331                 bp->link_params.feature_config_flags &=
8332                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8333
8334         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8335         bp->common.bc_ver = val;
8336         BNX2X_DEV_INFO("bc_ver %X\n", val);
8337         if (val < BNX2X_BC_VER) {
8338                 /* for now only warn
8339                  * later we might need to enforce this */
8340                 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8341                           " please upgrade BC\n", BNX2X_BC_VER, val);
8342         }
8343         bp->link_params.feature_config_flags |=
8344                 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8345                 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
8346
8347         if (BP_E1HVN(bp) == 0) {
8348                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8349                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8350         } else {
8351                 /* no WOL capability for E1HVN != 0 */
8352                 bp->flags |= NO_WOL_FLAG;
8353         }
8354         BNX2X_DEV_INFO("%sWoL capable\n",
8355                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
8356
8357         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8358         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8359         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8360         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8361
8362         printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8363                val, val2, val3, val4);
8364 }
8365
8366 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8367                                                     u32 switch_cfg)
8368 {
8369         int port = BP_PORT(bp);
8370         u32 ext_phy_type;
8371
8372         switch (switch_cfg) {
8373         case SWITCH_CFG_1G:
8374                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8375
8376                 ext_phy_type =
8377                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8378                 switch (ext_phy_type) {
8379                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8380                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8381                                        ext_phy_type);
8382
8383                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8384                                                SUPPORTED_10baseT_Full |
8385                                                SUPPORTED_100baseT_Half |
8386                                                SUPPORTED_100baseT_Full |
8387                                                SUPPORTED_1000baseT_Full |
8388                                                SUPPORTED_2500baseX_Full |
8389                                                SUPPORTED_TP |
8390                                                SUPPORTED_FIBRE |
8391                                                SUPPORTED_Autoneg |
8392                                                SUPPORTED_Pause |
8393                                                SUPPORTED_Asym_Pause);
8394                         break;
8395
8396                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8397                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8398                                        ext_phy_type);
8399
8400                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8401                                                SUPPORTED_10baseT_Full |
8402                                                SUPPORTED_100baseT_Half |
8403                                                SUPPORTED_100baseT_Full |
8404                                                SUPPORTED_1000baseT_Full |
8405                                                SUPPORTED_TP |
8406                                                SUPPORTED_FIBRE |
8407                                                SUPPORTED_Autoneg |
8408                                                SUPPORTED_Pause |
8409                                                SUPPORTED_Asym_Pause);
8410                         break;
8411
8412                 default:
8413                         BNX2X_ERR("NVRAM config error. "
8414                                   "BAD SerDes ext_phy_config 0x%x\n",
8415                                   bp->link_params.ext_phy_config);
8416                         return;
8417                 }
8418
8419                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8420                                            port*0x10);
8421                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8422                 break;
8423
8424         case SWITCH_CFG_10G:
8425                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8426
8427                 ext_phy_type =
8428                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8429                 switch (ext_phy_type) {
8430                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8431                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8432                                        ext_phy_type);
8433
8434                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8435                                                SUPPORTED_10baseT_Full |
8436                                                SUPPORTED_100baseT_Half |
8437                                                SUPPORTED_100baseT_Full |
8438                                                SUPPORTED_1000baseT_Full |
8439                                                SUPPORTED_2500baseX_Full |
8440                                                SUPPORTED_10000baseT_Full |
8441                                                SUPPORTED_TP |
8442                                                SUPPORTED_FIBRE |
8443                                                SUPPORTED_Autoneg |
8444                                                SUPPORTED_Pause |
8445                                                SUPPORTED_Asym_Pause);
8446                         break;
8447
8448                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8449                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
8450                                        ext_phy_type);
8451
8452                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8453                                                SUPPORTED_1000baseT_Full |
8454                                                SUPPORTED_FIBRE |
8455                                                SUPPORTED_Autoneg |
8456                                                SUPPORTED_Pause |
8457                                                SUPPORTED_Asym_Pause);
8458                         break;
8459
8460                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8461                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
8462                                        ext_phy_type);
8463
8464                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8465                                                SUPPORTED_2500baseX_Full |
8466                                                SUPPORTED_1000baseT_Full |
8467                                                SUPPORTED_FIBRE |
8468                                                SUPPORTED_Autoneg |
8469                                                SUPPORTED_Pause |
8470                                                SUPPORTED_Asym_Pause);
8471                         break;
8472
8473                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8474                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8475                                        ext_phy_type);
8476
8477                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8478                                                SUPPORTED_FIBRE |
8479                                                SUPPORTED_Pause |
8480                                                SUPPORTED_Asym_Pause);
8481                         break;
8482
8483                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8484                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
8485                                        ext_phy_type);
8486
8487                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8488                                                SUPPORTED_1000baseT_Full |
8489                                                SUPPORTED_FIBRE |
8490                                                SUPPORTED_Pause |
8491                                                SUPPORTED_Asym_Pause);
8492                         break;
8493
8494                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8495                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
8496                                        ext_phy_type);
8497
8498                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8499                                                SUPPORTED_1000baseT_Full |
8500                                                SUPPORTED_Autoneg |
8501                                                SUPPORTED_FIBRE |
8502                                                SUPPORTED_Pause |
8503                                                SUPPORTED_Asym_Pause);
8504                         break;
8505
8506                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8507                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8508                                        ext_phy_type);
8509
8510                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8511                                                SUPPORTED_1000baseT_Full |
8512                                                SUPPORTED_Autoneg |
8513                                                SUPPORTED_FIBRE |
8514                                                SUPPORTED_Pause |
8515                                                SUPPORTED_Asym_Pause);
8516                         break;
8517
8518                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8519                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8520                                        ext_phy_type);
8521
8522                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8523                                                SUPPORTED_TP |
8524                                                SUPPORTED_Autoneg |
8525                                                SUPPORTED_Pause |
8526                                                SUPPORTED_Asym_Pause);
8527                         break;
8528
8529                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8530                         BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8531                                        ext_phy_type);
8532
8533                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8534                                                SUPPORTED_10baseT_Full |
8535                                                SUPPORTED_100baseT_Half |
8536                                                SUPPORTED_100baseT_Full |
8537                                                SUPPORTED_1000baseT_Full |
8538                                                SUPPORTED_10000baseT_Full |
8539                                                SUPPORTED_TP |
8540                                                SUPPORTED_Autoneg |
8541                                                SUPPORTED_Pause |
8542                                                SUPPORTED_Asym_Pause);
8543                         break;
8544
8545                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8546                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8547                                   bp->link_params.ext_phy_config);
8548                         break;
8549
8550                 default:
8551                         BNX2X_ERR("NVRAM config error. "
8552                                   "BAD XGXS ext_phy_config 0x%x\n",
8553                                   bp->link_params.ext_phy_config);
8554                         return;
8555                 }
8556
8557                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8558                                            port*0x18);
8559                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8560
8561                 break;
8562
8563         default:
8564                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
8565                           bp->port.link_config);
8566                 return;
8567         }
8568         bp->link_params.phy_addr = bp->port.phy_addr;
8569
8570         /* mask what we support according to speed_cap_mask */
8571         if (!(bp->link_params.speed_cap_mask &
8572                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
8573                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
8574
8575         if (!(bp->link_params.speed_cap_mask &
8576                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
8577                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
8578
8579         if (!(bp->link_params.speed_cap_mask &
8580                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
8581                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
8582
8583         if (!(bp->link_params.speed_cap_mask &
8584                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
8585                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
8586
8587         if (!(bp->link_params.speed_cap_mask &
8588                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
8589                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8590                                         SUPPORTED_1000baseT_Full);
8591
8592         if (!(bp->link_params.speed_cap_mask &
8593                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
8594                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
8595
8596         if (!(bp->link_params.speed_cap_mask &
8597                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
8598                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
8599
8600         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
8601 }
8602
8603 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8604 {
8605         bp->link_params.req_duplex = DUPLEX_FULL;
8606
8607         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
8608         case PORT_FEATURE_LINK_SPEED_AUTO:
8609                 if (bp->port.supported & SUPPORTED_Autoneg) {
8610                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8611                         bp->port.advertising = bp->port.supported;
8612                 } else {
8613                         u32 ext_phy_type =
8614                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8615
8616                         if ((ext_phy_type ==
8617                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8618                             (ext_phy_type ==
8619                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
8620                                 /* force 10G, no AN */
8621                                 bp->link_params.req_line_speed = SPEED_10000;
8622                                 bp->port.advertising =
8623                                                 (ADVERTISED_10000baseT_Full |
8624                                                  ADVERTISED_FIBRE);
8625                                 break;
8626                         }
8627                         BNX2X_ERR("NVRAM config error. "
8628                                   "Invalid link_config 0x%x"
8629                                   "  Autoneg not supported\n",
8630                                   bp->port.link_config);
8631                         return;
8632                 }
8633                 break;
8634
8635         case PORT_FEATURE_LINK_SPEED_10M_FULL:
8636                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
8637                         bp->link_params.req_line_speed = SPEED_10;
8638                         bp->port.advertising = (ADVERTISED_10baseT_Full |
8639                                                 ADVERTISED_TP);
8640                 } else {
8641                         BNX2X_ERR("NVRAM config error. "
8642                                   "Invalid link_config 0x%x"
8643                                   "  speed_cap_mask 0x%x\n",
8644                                   bp->port.link_config,
8645                                   bp->link_params.speed_cap_mask);
8646                         return;
8647                 }
8648                 break;
8649
8650         case PORT_FEATURE_LINK_SPEED_10M_HALF:
8651                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
8652                         bp->link_params.req_line_speed = SPEED_10;
8653                         bp->link_params.req_duplex = DUPLEX_HALF;
8654                         bp->port.advertising = (ADVERTISED_10baseT_Half |
8655                                                 ADVERTISED_TP);
8656                 } else {
8657                         BNX2X_ERR("NVRAM config error. "
8658                                   "Invalid link_config 0x%x"
8659                                   "  speed_cap_mask 0x%x\n",
8660                                   bp->port.link_config,
8661                                   bp->link_params.speed_cap_mask);
8662                         return;
8663                 }
8664                 break;
8665
8666         case PORT_FEATURE_LINK_SPEED_100M_FULL:
8667                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
8668                         bp->link_params.req_line_speed = SPEED_100;
8669                         bp->port.advertising = (ADVERTISED_100baseT_Full |
8670                                                 ADVERTISED_TP);
8671                 } else {
8672                         BNX2X_ERR("NVRAM config error. "
8673                                   "Invalid link_config 0x%x"
8674                                   "  speed_cap_mask 0x%x\n",
8675                                   bp->port.link_config,
8676                                   bp->link_params.speed_cap_mask);
8677                         return;
8678                 }
8679                 break;
8680
8681         case PORT_FEATURE_LINK_SPEED_100M_HALF:
8682                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
8683                         bp->link_params.req_line_speed = SPEED_100;
8684                         bp->link_params.req_duplex = DUPLEX_HALF;
8685                         bp->port.advertising = (ADVERTISED_100baseT_Half |
8686                                                 ADVERTISED_TP);
8687                 } else {
8688                         BNX2X_ERR("NVRAM config error. "
8689                                   "Invalid link_config 0x%x"
8690                                   "  speed_cap_mask 0x%x\n",
8691                                   bp->port.link_config,
8692                                   bp->link_params.speed_cap_mask);
8693                         return;
8694                 }
8695                 break;
8696
8697         case PORT_FEATURE_LINK_SPEED_1G:
8698                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
8699                         bp->link_params.req_line_speed = SPEED_1000;
8700                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
8701                                                 ADVERTISED_TP);
8702                 } else {
8703                         BNX2X_ERR("NVRAM config error. "
8704                                   "Invalid link_config 0x%x"
8705                                   "  speed_cap_mask 0x%x\n",
8706                                   bp->port.link_config,
8707                                   bp->link_params.speed_cap_mask);
8708                         return;
8709                 }
8710                 break;
8711
8712         case PORT_FEATURE_LINK_SPEED_2_5G:
8713                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
8714                         bp->link_params.req_line_speed = SPEED_2500;
8715                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
8716                                                 ADVERTISED_TP);
8717                 } else {
8718                         BNX2X_ERR("NVRAM config error. "
8719                                   "Invalid link_config 0x%x"
8720                                   "  speed_cap_mask 0x%x\n",
8721                                   bp->port.link_config,
8722                                   bp->link_params.speed_cap_mask);
8723                         return;
8724                 }
8725                 break;
8726
8727         case PORT_FEATURE_LINK_SPEED_10G_CX4:
8728         case PORT_FEATURE_LINK_SPEED_10G_KX4:
8729         case PORT_FEATURE_LINK_SPEED_10G_KR:
8730                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
8731                         bp->link_params.req_line_speed = SPEED_10000;
8732                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
8733                                                 ADVERTISED_FIBRE);
8734                 } else {
8735                         BNX2X_ERR("NVRAM config error. "
8736                                   "Invalid link_config 0x%x"
8737                                   "  speed_cap_mask 0x%x\n",
8738                                   bp->port.link_config,
8739                                   bp->link_params.speed_cap_mask);
8740                         return;
8741                 }
8742                 break;
8743
8744         default:
8745                 BNX2X_ERR("NVRAM config error. "
8746                           "BAD link speed link_config 0x%x\n",
8747                           bp->port.link_config);
8748                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8749                 bp->port.advertising = bp->port.supported;
8750                 break;
8751         }
8752
8753         bp->link_params.req_flow_ctrl = (bp->port.link_config &
8754                                          PORT_FEATURE_FLOW_CONTROL_MASK);
8755         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8756             !(bp->port.supported & SUPPORTED_Autoneg))
8757                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8758
8759         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
8760                        "  advertising 0x%x\n",
8761                        bp->link_params.req_line_speed,
8762                        bp->link_params.req_duplex,
8763                        bp->link_params.req_flow_ctrl, bp->port.advertising);
8764 }
8765
8766 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8767 {
8768         mac_hi = cpu_to_be16(mac_hi);
8769         mac_lo = cpu_to_be32(mac_lo);
8770         memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8771         memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8772 }
8773
8774 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8775 {
8776         int port = BP_PORT(bp);
8777         u32 val, val2;
8778         u32 config;
8779         u16 i;
8780         u32 ext_phy_type;
8781
8782         bp->link_params.bp = bp;
8783         bp->link_params.port = port;
8784
8785         bp->link_params.lane_config =
8786                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8787         bp->link_params.ext_phy_config =
8788                 SHMEM_RD(bp,
8789                          dev_info.port_hw_config[port].external_phy_config);
8790         /* BCM8727_NOC => BCM8727 no over current */
8791         if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8792             PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8793                 bp->link_params.ext_phy_config &=
8794                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8795                 bp->link_params.ext_phy_config |=
8796                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8797                 bp->link_params.feature_config_flags |=
8798                         FEATURE_CONFIG_BCM8727_NOC;
8799         }
8800
8801         bp->link_params.speed_cap_mask =
8802                 SHMEM_RD(bp,
8803                          dev_info.port_hw_config[port].speed_capability_mask);
8804
8805         bp->port.link_config =
8806                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8807
8808         /* Get the 4 lanes xgxs config rx and tx */
8809         for (i = 0; i < 2; i++) {
8810                 val = SHMEM_RD(bp,
8811                            dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8812                 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8813                 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8814
8815                 val = SHMEM_RD(bp,
8816                            dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8817                 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8818                 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8819         }
8820
8821         /* If the device is capable of WoL, set the default state according
8822          * to the HW
8823          */
8824         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8825         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8826                    (config & PORT_FEATURE_WOL_ENABLED));
8827
8828         BNX2X_DEV_INFO("lane_config 0x%08x  ext_phy_config 0x%08x"
8829                        "  speed_cap_mask 0x%08x  link_config 0x%08x\n",
8830                        bp->link_params.lane_config,
8831                        bp->link_params.ext_phy_config,
8832                        bp->link_params.speed_cap_mask, bp->port.link_config);
8833
8834         bp->link_params.switch_cfg |= (bp->port.link_config &
8835                                        PORT_FEATURE_CONNECTED_SWITCH_MASK);
8836         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8837
8838         bnx2x_link_settings_requested(bp);
8839
8840         /*
8841          * If connected directly, work with the internal PHY, otherwise, work
8842          * with the external PHY
8843          */
8844         ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8845         if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8846                 bp->mdio.prtad = bp->link_params.phy_addr;
8847
8848         else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8849                  (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8850                 bp->mdio.prtad =
8851                         XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
8852
8853         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8854         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8855         bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8856         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8857         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8858
8859 #ifdef BCM_CNIC
8860         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8861         val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8862         bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8863 #endif
8864 }
8865
8866 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8867 {
8868         int func = BP_FUNC(bp);
8869         u32 val, val2;
8870         int rc = 0;
8871
8872         bnx2x_get_common_hwinfo(bp);
8873
8874         bp->e1hov = 0;
8875         bp->e1hmf = 0;
8876         if (CHIP_IS_E1H(bp)) {
8877                 bp->mf_config =
8878                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8879
8880                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
8881                        FUNC_MF_CFG_E1HOV_TAG_MASK);
8882                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
8883                         bp->e1hmf = 1;
8884                 BNX2X_DEV_INFO("%s function mode\n",
8885                                IS_E1HMF(bp) ? "multi" : "single");
8886
8887                 if (IS_E1HMF(bp)) {
8888                         val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8889                                                                 e1hov_tag) &
8890                                FUNC_MF_CFG_E1HOV_TAG_MASK);
8891                         if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8892                                 bp->e1hov = val;
8893                                 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8894                                                "(0x%04x)\n",
8895                                                func, bp->e1hov, bp->e1hov);
8896                         } else {
8897                                 BNX2X_ERR("!!!  No valid E1HOV for func %d,"
8898                                           "  aborting\n", func);
8899                                 rc = -EPERM;
8900                         }
8901                 } else {
8902                         if (BP_E1HVN(bp)) {
8903                                 BNX2X_ERR("!!!  VN %d in single function mode,"
8904                                           "  aborting\n", BP_E1HVN(bp));
8905                                 rc = -EPERM;
8906                         }
8907                 }
8908         }
8909
8910         if (!BP_NOMCP(bp)) {
8911                 bnx2x_get_port_hwinfo(bp);
8912
8913                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8914                               DRV_MSG_SEQ_NUMBER_MASK);
8915                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8916         }
8917
8918         if (IS_E1HMF(bp)) {
8919                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8920                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
8921                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8922                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8923                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8924                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8925                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8926                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8927                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
8928                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
8929                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8930                                ETH_ALEN);
8931                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8932                                ETH_ALEN);
8933                 }
8934
8935                 return rc;
8936         }
8937
8938         if (BP_NOMCP(bp)) {
8939                 /* only supposed to happen on emulation/FPGA */
8940                 BNX2X_ERR("warning random MAC workaround active\n");
8941                 random_ether_addr(bp->dev->dev_addr);
8942                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8943         }
8944
8945         return rc;
8946 }
8947
8948 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8949 {
8950         int func = BP_FUNC(bp);
8951         int timer_interval;
8952         int rc;
8953
8954         /* Disable interrupt handling until HW is initialized */
8955         atomic_set(&bp->intr_sem, 1);
8956         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8957
8958         mutex_init(&bp->port.phy_mutex);
8959 #ifdef BCM_CNIC
8960         mutex_init(&bp->cnic_mutex);
8961 #endif
8962
8963         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8964         INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8965
8966         rc = bnx2x_get_hwinfo(bp);
8967
8968         /* need to reset chip if undi was active */
8969         if (!BP_NOMCP(bp))
8970                 bnx2x_undi_unload(bp);
8971
8972         if (CHIP_REV_IS_FPGA(bp))
8973                 printk(KERN_ERR PFX "FPGA detected\n");
8974
8975         if (BP_NOMCP(bp) && (func == 0))
8976                 printk(KERN_ERR PFX
8977                        "MCP disabled, must load devices in order!\n");
8978
8979         /* Set multi queue mode */
8980         if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8981             ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8982                 printk(KERN_ERR PFX
8983                       "Multi disabled since int_mode requested is not MSI-X\n");
8984                 multi_mode = ETH_RSS_MODE_DISABLED;
8985         }
8986         bp->multi_mode = multi_mode;
8987
8988
8989         /* Set TPA flags */
8990         if (disable_tpa) {
8991                 bp->flags &= ~TPA_ENABLE_FLAG;
8992                 bp->dev->features &= ~NETIF_F_LRO;
8993         } else {
8994                 bp->flags |= TPA_ENABLE_FLAG;
8995                 bp->dev->features |= NETIF_F_LRO;
8996         }
8997
8998         if (CHIP_IS_E1(bp))
8999                 bp->dropless_fc = 0;
9000         else
9001                 bp->dropless_fc = dropless_fc;
9002
9003         bp->mrrs = mrrs;
9004
9005         bp->tx_ring_size = MAX_TX_AVAIL;
9006         bp->rx_ring_size = MAX_RX_AVAIL;
9007
9008         bp->rx_csum = 1;
9009
9010         bp->tx_ticks = 50;
9011         bp->rx_ticks = 25;
9012
9013         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
9014         bp->current_interval = (poll ? poll : timer_interval);
9015
9016         init_timer(&bp->timer);
9017         bp->timer.expires = jiffies + bp->current_interval;
9018         bp->timer.data = (unsigned long) bp;
9019         bp->timer.function = bnx2x_timer;
9020
9021         return rc;
9022 }
9023
9024 /*
9025  * ethtool service functions
9026  */
9027
9028 /* All ethtool functions called with rtnl_lock */
9029
9030 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9031 {
9032         struct bnx2x *bp = netdev_priv(dev);
9033
9034         cmd->supported = bp->port.supported;
9035         cmd->advertising = bp->port.advertising;
9036
9037         if (netif_carrier_ok(dev)) {
9038                 cmd->speed = bp->link_vars.line_speed;
9039                 cmd->duplex = bp->link_vars.duplex;
9040                 if (IS_E1HMF(bp)) {
9041                         u16 vn_max_rate;
9042
9043                         vn_max_rate =
9044                                 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
9045                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
9046                         if (vn_max_rate < cmd->speed)
9047                                 cmd->speed = vn_max_rate;
9048                 }
9049         } else {
9050                 cmd->speed = -1;
9051                 cmd->duplex = -1;
9052         }
9053
9054         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
9055                 u32 ext_phy_type =
9056                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9057
9058                 switch (ext_phy_type) {
9059                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
9060                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
9061                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
9062                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9063                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9064                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
9065                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
9066                         cmd->port = PORT_FIBRE;
9067                         break;
9068
9069                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
9070                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
9071                         cmd->port = PORT_TP;
9072                         break;
9073
9074                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9075                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9076                                   bp->link_params.ext_phy_config);
9077                         break;
9078
9079                 default:
9080                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
9081                            bp->link_params.ext_phy_config);
9082                         break;
9083                 }
9084         } else
9085                 cmd->port = PORT_TP;
9086
9087         cmd->phy_address = bp->mdio.prtad;
9088         cmd->transceiver = XCVR_INTERNAL;
9089
9090         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9091                 cmd->autoneg = AUTONEG_ENABLE;
9092         else
9093                 cmd->autoneg = AUTONEG_DISABLE;
9094
9095         cmd->maxtxpkt = 0;
9096         cmd->maxrxpkt = 0;
9097
9098         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9099            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
9100            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
9101            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
9102            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9103            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9104            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9105
9106         return 0;
9107 }
9108
9109 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9110 {
9111         struct bnx2x *bp = netdev_priv(dev);
9112         u32 advertising;
9113
9114         if (IS_E1HMF(bp))
9115                 return 0;
9116
9117         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9118            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
9119            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
9120            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
9121            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9122            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9123            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9124
9125         if (cmd->autoneg == AUTONEG_ENABLE) {
9126                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9127                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
9128                         return -EINVAL;
9129                 }
9130
9131                 /* advertise the requested speed and duplex if supported */
9132                 cmd->advertising &= bp->port.supported;
9133
9134                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9135                 bp->link_params.req_duplex = DUPLEX_FULL;
9136                 bp->port.advertising |= (ADVERTISED_Autoneg |
9137                                          cmd->advertising);
9138
9139         } else { /* forced speed */
9140                 /* advertise the requested speed and duplex if supported */
9141                 switch (cmd->speed) {
9142                 case SPEED_10:
9143                         if (cmd->duplex == DUPLEX_FULL) {
9144                                 if (!(bp->port.supported &
9145                                       SUPPORTED_10baseT_Full)) {
9146                                         DP(NETIF_MSG_LINK,
9147                                            "10M full not supported\n");
9148                                         return -EINVAL;
9149                                 }
9150
9151                                 advertising = (ADVERTISED_10baseT_Full |
9152                                                ADVERTISED_TP);
9153                         } else {
9154                                 if (!(bp->port.supported &
9155                                       SUPPORTED_10baseT_Half)) {
9156                                         DP(NETIF_MSG_LINK,
9157                                            "10M half not supported\n");
9158                                         return -EINVAL;
9159                                 }
9160
9161                                 advertising = (ADVERTISED_10baseT_Half |
9162                                                ADVERTISED_TP);
9163                         }
9164                         break;
9165
9166                 case SPEED_100:
9167                         if (cmd->duplex == DUPLEX_FULL) {
9168                                 if (!(bp->port.supported &
9169                                                 SUPPORTED_100baseT_Full)) {
9170                                         DP(NETIF_MSG_LINK,
9171                                            "100M full not supported\n");
9172                                         return -EINVAL;
9173                                 }
9174
9175                                 advertising = (ADVERTISED_100baseT_Full |
9176                                                ADVERTISED_TP);
9177                         } else {
9178                                 if (!(bp->port.supported &
9179                                                 SUPPORTED_100baseT_Half)) {
9180                                         DP(NETIF_MSG_LINK,
9181                                            "100M half not supported\n");
9182                                         return -EINVAL;
9183                                 }
9184
9185                                 advertising = (ADVERTISED_100baseT_Half |
9186                                                ADVERTISED_TP);
9187                         }
9188                         break;
9189
9190                 case SPEED_1000:
9191                         if (cmd->duplex != DUPLEX_FULL) {
9192                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
9193                                 return -EINVAL;
9194                         }
9195
9196                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
9197                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
9198                                 return -EINVAL;
9199                         }
9200
9201                         advertising = (ADVERTISED_1000baseT_Full |
9202                                        ADVERTISED_TP);
9203                         break;
9204
9205                 case SPEED_2500:
9206                         if (cmd->duplex != DUPLEX_FULL) {
9207                                 DP(NETIF_MSG_LINK,
9208                                    "2.5G half not supported\n");
9209                                 return -EINVAL;
9210                         }
9211
9212                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
9213                                 DP(NETIF_MSG_LINK,
9214                                    "2.5G full not supported\n");
9215                                 return -EINVAL;
9216                         }
9217
9218                         advertising = (ADVERTISED_2500baseX_Full |
9219                                        ADVERTISED_TP);
9220                         break;
9221
9222                 case SPEED_10000:
9223                         if (cmd->duplex != DUPLEX_FULL) {
9224                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
9225                                 return -EINVAL;
9226                         }
9227
9228                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
9229                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
9230                                 return -EINVAL;
9231                         }
9232
9233                         advertising = (ADVERTISED_10000baseT_Full |
9234                                        ADVERTISED_FIBRE);
9235                         break;
9236
9237                 default:
9238                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
9239                         return -EINVAL;
9240                 }
9241
9242                 bp->link_params.req_line_speed = cmd->speed;
9243                 bp->link_params.req_duplex = cmd->duplex;
9244                 bp->port.advertising = advertising;
9245         }
9246
9247         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
9248            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
9249            bp->link_params.req_line_speed, bp->link_params.req_duplex,
9250            bp->port.advertising);
9251
9252         if (netif_running(dev)) {
9253                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9254                 bnx2x_link_set(bp);
9255         }
9256
9257         return 0;
9258 }
9259
9260 #define IS_E1_ONLINE(info)      (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
9261 #define IS_E1H_ONLINE(info)     (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
9262
9263 static int bnx2x_get_regs_len(struct net_device *dev)
9264 {
9265         struct bnx2x *bp = netdev_priv(dev);
9266         int regdump_len = 0;
9267         int i;
9268
9269         if (CHIP_IS_E1(bp)) {
9270                 for (i = 0; i < REGS_COUNT; i++)
9271                         if (IS_E1_ONLINE(reg_addrs[i].info))
9272                                 regdump_len += reg_addrs[i].size;
9273
9274                 for (i = 0; i < WREGS_COUNT_E1; i++)
9275                         if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
9276                                 regdump_len += wreg_addrs_e1[i].size *
9277                                         (1 + wreg_addrs_e1[i].read_regs_count);
9278
9279         } else { /* E1H */
9280                 for (i = 0; i < REGS_COUNT; i++)
9281                         if (IS_E1H_ONLINE(reg_addrs[i].info))
9282                                 regdump_len += reg_addrs[i].size;
9283
9284                 for (i = 0; i < WREGS_COUNT_E1H; i++)
9285                         if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
9286                                 regdump_len += wreg_addrs_e1h[i].size *
9287                                         (1 + wreg_addrs_e1h[i].read_regs_count);
9288         }
9289         regdump_len *= 4;
9290         regdump_len += sizeof(struct dump_hdr);
9291
9292         return regdump_len;
9293 }
9294
9295 static void bnx2x_get_regs(struct net_device *dev,
9296                            struct ethtool_regs *regs, void *_p)
9297 {
9298         u32 *p = _p, i, j;
9299         struct bnx2x *bp = netdev_priv(dev);
9300         struct dump_hdr dump_hdr = {0};
9301
9302         regs->version = 0;
9303         memset(p, 0, regs->len);
9304
9305         if (!netif_running(bp->dev))
9306                 return;
9307
9308         dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9309         dump_hdr.dump_sign = dump_sign_all;
9310         dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9311         dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9312         dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9313         dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9314         dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9315
9316         memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9317         p += dump_hdr.hdr_size + 1;
9318
9319         if (CHIP_IS_E1(bp)) {
9320                 for (i = 0; i < REGS_COUNT; i++)
9321                         if (IS_E1_ONLINE(reg_addrs[i].info))
9322                                 for (j = 0; j < reg_addrs[i].size; j++)
9323                                         *p++ = REG_RD(bp,
9324                                                       reg_addrs[i].addr + j*4);
9325
9326         } else { /* E1H */
9327                 for (i = 0; i < REGS_COUNT; i++)
9328                         if (IS_E1H_ONLINE(reg_addrs[i].info))
9329                                 for (j = 0; j < reg_addrs[i].size; j++)
9330                                         *p++ = REG_RD(bp,
9331                                                       reg_addrs[i].addr + j*4);
9332         }
9333 }
9334
9335 #define PHY_FW_VER_LEN                  10
9336
9337 static void bnx2x_get_drvinfo(struct net_device *dev,
9338                               struct ethtool_drvinfo *info)
9339 {
9340         struct bnx2x *bp = netdev_priv(dev);
9341         u8 phy_fw_ver[PHY_FW_VER_LEN];
9342
9343         strcpy(info->driver, DRV_MODULE_NAME);
9344         strcpy(info->version, DRV_MODULE_VERSION);
9345
9346         phy_fw_ver[0] = '\0';
9347         if (bp->port.pmf) {
9348                 bnx2x_acquire_phy_lock(bp);
9349                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9350                                              (bp->state != BNX2X_STATE_CLOSED),
9351                                              phy_fw_ver, PHY_FW_VER_LEN);
9352                 bnx2x_release_phy_lock(bp);
9353         }
9354
9355         snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9356                  (bp->common.bc_ver & 0xff0000) >> 16,
9357                  (bp->common.bc_ver & 0xff00) >> 8,
9358                  (bp->common.bc_ver & 0xff),
9359                  ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9360         strcpy(info->bus_info, pci_name(bp->pdev));
9361         info->n_stats = BNX2X_NUM_STATS;
9362         info->testinfo_len = BNX2X_NUM_TESTS;
9363         info->eedump_len = bp->common.flash_size;
9364         info->regdump_len = bnx2x_get_regs_len(dev);
9365 }
9366
9367 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9368 {
9369         struct bnx2x *bp = netdev_priv(dev);
9370
9371         if (bp->flags & NO_WOL_FLAG) {
9372                 wol->supported = 0;
9373                 wol->wolopts = 0;
9374         } else {
9375                 wol->supported = WAKE_MAGIC;
9376                 if (bp->wol)
9377                         wol->wolopts = WAKE_MAGIC;
9378                 else
9379                         wol->wolopts = 0;
9380         }
9381         memset(&wol->sopass, 0, sizeof(wol->sopass));
9382 }
9383
9384 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9385 {
9386         struct bnx2x *bp = netdev_priv(dev);
9387
9388         if (wol->wolopts & ~WAKE_MAGIC)
9389                 return -EINVAL;
9390
9391         if (wol->wolopts & WAKE_MAGIC) {
9392                 if (bp->flags & NO_WOL_FLAG)
9393                         return -EINVAL;
9394
9395                 bp->wol = 1;
9396         } else
9397                 bp->wol = 0;
9398
9399         return 0;
9400 }
9401
9402 static u32 bnx2x_get_msglevel(struct net_device *dev)
9403 {
9404         struct bnx2x *bp = netdev_priv(dev);
9405
9406         return bp->msglevel;
9407 }
9408
9409 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9410 {
9411         struct bnx2x *bp = netdev_priv(dev);
9412
9413         if (capable(CAP_NET_ADMIN))
9414                 bp->msglevel = level;
9415 }
9416
9417 static int bnx2x_nway_reset(struct net_device *dev)
9418 {
9419         struct bnx2x *bp = netdev_priv(dev);
9420
9421         if (!bp->port.pmf)
9422                 return 0;
9423
9424         if (netif_running(dev)) {
9425                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9426                 bnx2x_link_set(bp);
9427         }
9428
9429         return 0;
9430 }
9431
9432 static u32 bnx2x_get_link(struct net_device *dev)
9433 {
9434         struct bnx2x *bp = netdev_priv(dev);
9435
9436         return bp->link_vars.link_up;
9437 }
9438
9439 static int bnx2x_get_eeprom_len(struct net_device *dev)
9440 {
9441         struct bnx2x *bp = netdev_priv(dev);
9442
9443         return bp->common.flash_size;
9444 }
9445
9446 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9447 {
9448         int port = BP_PORT(bp);
9449         int count, i;
9450         u32 val = 0;
9451
9452         /* adjust timeout for emulation/FPGA */
9453         count = NVRAM_TIMEOUT_COUNT;
9454         if (CHIP_REV_IS_SLOW(bp))
9455                 count *= 100;
9456
9457         /* request access to nvram interface */
9458         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9459                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9460
9461         for (i = 0; i < count*10; i++) {
9462                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9463                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9464                         break;
9465
9466                 udelay(5);
9467         }
9468
9469         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
9470                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
9471                 return -EBUSY;
9472         }
9473
9474         return 0;
9475 }
9476
9477 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9478 {
9479         int port = BP_PORT(bp);
9480         int count, i;
9481         u32 val = 0;
9482
9483         /* adjust timeout for emulation/FPGA */
9484         count = NVRAM_TIMEOUT_COUNT;
9485         if (CHIP_REV_IS_SLOW(bp))
9486                 count *= 100;
9487
9488         /* relinquish nvram interface */
9489         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9490                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9491
9492         for (i = 0; i < count*10; i++) {
9493                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9494                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9495                         break;
9496
9497                 udelay(5);
9498         }
9499
9500         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
9501                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
9502                 return -EBUSY;
9503         }
9504
9505         return 0;
9506 }
9507
9508 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9509 {
9510         u32 val;
9511
9512         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9513
9514         /* enable both bits, even on read */
9515         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9516                (val | MCPR_NVM_ACCESS_ENABLE_EN |
9517                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
9518 }
9519
9520 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9521 {
9522         u32 val;
9523
9524         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9525
9526         /* disable both bits, even after read */
9527         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9528                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9529                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9530 }
9531
9532 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
9533                                   u32 cmd_flags)
9534 {
9535         int count, i, rc;
9536         u32 val;
9537
9538         /* build the command word */
9539         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9540
9541         /* need to clear DONE bit separately */
9542         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9543
9544         /* address of the NVRAM to read from */
9545         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9546                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9547
9548         /* issue a read command */
9549         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9550
9551         /* adjust timeout for emulation/FPGA */
9552         count = NVRAM_TIMEOUT_COUNT;
9553         if (CHIP_REV_IS_SLOW(bp))
9554                 count *= 100;
9555
9556         /* wait for completion */
9557         *ret_val = 0;
9558         rc = -EBUSY;
9559         for (i = 0; i < count; i++) {
9560                 udelay(5);
9561                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9562
9563                 if (val & MCPR_NVM_COMMAND_DONE) {
9564                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
9565                         /* we read nvram data in cpu order
9566                          * but ethtool sees it as an array of bytes
9567                          * converting to big-endian will do the work */
9568                         *ret_val = cpu_to_be32(val);
9569                         rc = 0;
9570                         break;
9571                 }
9572         }
9573
9574         return rc;
9575 }
9576
9577 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9578                             int buf_size)
9579 {
9580         int rc;
9581         u32 cmd_flags;
9582         __be32 val;
9583
9584         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9585                 DP(BNX2X_MSG_NVM,
9586                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
9587                    offset, buf_size);
9588                 return -EINVAL;
9589         }
9590
9591         if (offset + buf_size > bp->common.flash_size) {
9592                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9593                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9594                    offset, buf_size, bp->common.flash_size);
9595                 return -EINVAL;
9596         }
9597
9598         /* request access to nvram interface */
9599         rc = bnx2x_acquire_nvram_lock(bp);
9600         if (rc)
9601                 return rc;
9602
9603         /* enable access to nvram interface */
9604         bnx2x_enable_nvram_access(bp);
9605
9606         /* read the first word(s) */
9607         cmd_flags = MCPR_NVM_COMMAND_FIRST;
9608         while ((buf_size > sizeof(u32)) && (rc == 0)) {
9609                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9610                 memcpy(ret_buf, &val, 4);
9611
9612                 /* advance to the next dword */
9613                 offset += sizeof(u32);
9614                 ret_buf += sizeof(u32);
9615                 buf_size -= sizeof(u32);
9616                 cmd_flags = 0;
9617         }
9618
9619         if (rc == 0) {
9620                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9621                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9622                 memcpy(ret_buf, &val, 4);
9623         }
9624
9625         /* disable access to nvram interface */
9626         bnx2x_disable_nvram_access(bp);
9627         bnx2x_release_nvram_lock(bp);
9628
9629         return rc;
9630 }
9631
9632 static int bnx2x_get_eeprom(struct net_device *dev,
9633                             struct ethtool_eeprom *eeprom, u8 *eebuf)
9634 {
9635         struct bnx2x *bp = netdev_priv(dev);
9636         int rc;
9637
9638         if (!netif_running(dev))
9639                 return -EAGAIN;
9640
9641         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9642            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
9643            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9644            eeprom->len, eeprom->len);
9645
9646         /* parameters already validated in ethtool_get_eeprom */
9647
9648         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9649
9650         return rc;
9651 }
9652
9653 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9654                                    u32 cmd_flags)
9655 {
9656         int count, i, rc;
9657
9658         /* build the command word */
9659         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9660
9661         /* need to clear DONE bit separately */
9662         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9663
9664         /* write the data */
9665         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9666
9667         /* address of the NVRAM to write to */
9668         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9669                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9670
9671         /* issue the write command */
9672         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9673
9674         /* adjust timeout for emulation/FPGA */
9675         count = NVRAM_TIMEOUT_COUNT;
9676         if (CHIP_REV_IS_SLOW(bp))
9677                 count *= 100;
9678
9679         /* wait for completion */
9680         rc = -EBUSY;
9681         for (i = 0; i < count; i++) {
9682                 udelay(5);
9683                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9684                 if (val & MCPR_NVM_COMMAND_DONE) {
9685                         rc = 0;
9686                         break;
9687                 }
9688         }
9689
9690         return rc;
9691 }
9692
9693 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
9694
9695 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9696                               int buf_size)
9697 {
9698         int rc;
9699         u32 cmd_flags;
9700         u32 align_offset;
9701         __be32 val;
9702
9703         if (offset + buf_size > bp->common.flash_size) {
9704                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9705                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9706                    offset, buf_size, bp->common.flash_size);
9707                 return -EINVAL;
9708         }
9709
9710         /* request access to nvram interface */
9711         rc = bnx2x_acquire_nvram_lock(bp);
9712         if (rc)
9713                 return rc;
9714
9715         /* enable access to nvram interface */
9716         bnx2x_enable_nvram_access(bp);
9717
9718         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9719         align_offset = (offset & ~0x03);
9720         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9721
9722         if (rc == 0) {
9723                 val &= ~(0xff << BYTE_OFFSET(offset));
9724                 val |= (*data_buf << BYTE_OFFSET(offset));
9725
9726                 /* nvram data is returned as an array of bytes
9727                  * convert it back to cpu order */
9728                 val = be32_to_cpu(val);
9729
9730                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9731                                              cmd_flags);
9732         }
9733
9734         /* disable access to nvram interface */
9735         bnx2x_disable_nvram_access(bp);
9736         bnx2x_release_nvram_lock(bp);
9737
9738         return rc;
9739 }
9740
9741 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9742                              int buf_size)
9743 {
9744         int rc;
9745         u32 cmd_flags;
9746         u32 val;
9747         u32 written_so_far;
9748
9749         if (buf_size == 1)      /* ethtool */
9750                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
9751
9752         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9753                 DP(BNX2X_MSG_NVM,
9754                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
9755                    offset, buf_size);
9756                 return -EINVAL;
9757         }
9758
9759         if (offset + buf_size > bp->common.flash_size) {
9760                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9761                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9762                    offset, buf_size, bp->common.flash_size);
9763                 return -EINVAL;
9764         }
9765
9766         /* request access to nvram interface */
9767         rc = bnx2x_acquire_nvram_lock(bp);
9768         if (rc)
9769                 return rc;
9770
9771         /* enable access to nvram interface */
9772         bnx2x_enable_nvram_access(bp);
9773
9774         written_so_far = 0;
9775         cmd_flags = MCPR_NVM_COMMAND_FIRST;
9776         while ((written_so_far < buf_size) && (rc == 0)) {
9777                 if (written_so_far == (buf_size - sizeof(u32)))
9778                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
9779                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9780                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
9781                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9782                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9783
9784                 memcpy(&val, data_buf, 4);
9785
9786                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9787
9788                 /* advance to the next dword */
9789                 offset += sizeof(u32);
9790                 data_buf += sizeof(u32);
9791                 written_so_far += sizeof(u32);
9792                 cmd_flags = 0;
9793         }
9794
9795         /* disable access to nvram interface */
9796         bnx2x_disable_nvram_access(bp);
9797         bnx2x_release_nvram_lock(bp);
9798
9799         return rc;
9800 }
9801
9802 static int bnx2x_set_eeprom(struct net_device *dev,
9803                             struct ethtool_eeprom *eeprom, u8 *eebuf)
9804 {
9805         struct bnx2x *bp = netdev_priv(dev);
9806         int port = BP_PORT(bp);
9807         int rc = 0;
9808
9809         if (!netif_running(dev))
9810                 return -EAGAIN;
9811
9812         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9813            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
9814            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9815            eeprom->len, eeprom->len);
9816
9817         /* parameters already validated in ethtool_set_eeprom */
9818
9819         /* PHY eeprom can be accessed only by the PMF */
9820         if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9821             !bp->port.pmf)
9822                 return -EINVAL;
9823
9824         if (eeprom->magic == 0x50485950) {
9825                 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9826                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9827
9828                 bnx2x_acquire_phy_lock(bp);
9829                 rc |= bnx2x_link_reset(&bp->link_params,
9830                                        &bp->link_vars, 0);
9831                 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9832                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9833                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9834                                        MISC_REGISTERS_GPIO_HIGH, port);
9835                 bnx2x_release_phy_lock(bp);
9836                 bnx2x_link_report(bp);
9837
9838         } else if (eeprom->magic == 0x50485952) {
9839                 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
9840                 if ((bp->state == BNX2X_STATE_OPEN) ||
9841                     (bp->state == BNX2X_STATE_DISABLED)) {
9842                         bnx2x_acquire_phy_lock(bp);
9843                         rc |= bnx2x_link_reset(&bp->link_params,
9844                                                &bp->link_vars, 1);
9845
9846                         rc |= bnx2x_phy_init(&bp->link_params,
9847                                              &bp->link_vars);
9848                         bnx2x_release_phy_lock(bp);
9849                         bnx2x_calc_fc_adv(bp);
9850                 }
9851         } else if (eeprom->magic == 0x53985943) {
9852                 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9853                 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9854                                        PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9855                         u8 ext_phy_addr =
9856                              XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
9857
9858                         /* DSP Remove Download Mode */
9859                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9860                                        MISC_REGISTERS_GPIO_LOW, port);
9861
9862                         bnx2x_acquire_phy_lock(bp);
9863
9864                         bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9865
9866                         /* wait 0.5 sec to allow it to run */
9867                         msleep(500);
9868                         bnx2x_ext_phy_hw_reset(bp, port);
9869                         msleep(500);
9870                         bnx2x_release_phy_lock(bp);
9871                 }
9872         } else
9873                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
9874
9875         return rc;
9876 }
9877
9878 static int bnx2x_get_coalesce(struct net_device *dev,
9879                               struct ethtool_coalesce *coal)
9880 {
9881         struct bnx2x *bp = netdev_priv(dev);
9882
9883         memset(coal, 0, sizeof(struct ethtool_coalesce));
9884
9885         coal->rx_coalesce_usecs = bp->rx_ticks;
9886         coal->tx_coalesce_usecs = bp->tx_ticks;
9887
9888         return 0;
9889 }
9890
9891 #define BNX2X_MAX_COALES_TOUT  (0xf0*12) /* Maximal coalescing timeout in us */
9892 static int bnx2x_set_coalesce(struct net_device *dev,
9893                               struct ethtool_coalesce *coal)
9894 {
9895         struct bnx2x *bp = netdev_priv(dev);
9896
9897         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
9898         if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9899                 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
9900
9901         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
9902         if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9903                 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
9904
9905         if (netif_running(dev))
9906                 bnx2x_update_coalesce(bp);
9907
9908         return 0;
9909 }
9910
9911 static void bnx2x_get_ringparam(struct net_device *dev,
9912                                 struct ethtool_ringparam *ering)
9913 {
9914         struct bnx2x *bp = netdev_priv(dev);
9915
9916         ering->rx_max_pending = MAX_RX_AVAIL;
9917         ering->rx_mini_max_pending = 0;
9918         ering->rx_jumbo_max_pending = 0;
9919
9920         ering->rx_pending = bp->rx_ring_size;
9921         ering->rx_mini_pending = 0;
9922         ering->rx_jumbo_pending = 0;
9923
9924         ering->tx_max_pending = MAX_TX_AVAIL;
9925         ering->tx_pending = bp->tx_ring_size;
9926 }
9927
9928 static int bnx2x_set_ringparam(struct net_device *dev,
9929                                struct ethtool_ringparam *ering)
9930 {
9931         struct bnx2x *bp = netdev_priv(dev);
9932         int rc = 0;
9933
9934         if ((ering->rx_pending > MAX_RX_AVAIL) ||
9935             (ering->tx_pending > MAX_TX_AVAIL) ||
9936             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9937                 return -EINVAL;
9938
9939         bp->rx_ring_size = ering->rx_pending;
9940         bp->tx_ring_size = ering->tx_pending;
9941
9942         if (netif_running(dev)) {
9943                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9944                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9945         }
9946
9947         return rc;
9948 }
9949
9950 static void bnx2x_get_pauseparam(struct net_device *dev,
9951                                  struct ethtool_pauseparam *epause)
9952 {
9953         struct bnx2x *bp = netdev_priv(dev);
9954
9955         epause->autoneg = (bp->link_params.req_flow_ctrl ==
9956                            BNX2X_FLOW_CTRL_AUTO) &&
9957                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9958
9959         epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9960                             BNX2X_FLOW_CTRL_RX);
9961         epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9962                             BNX2X_FLOW_CTRL_TX);
9963
9964         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9965            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9966            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9967 }
9968
9969 static int bnx2x_set_pauseparam(struct net_device *dev,
9970                                 struct ethtool_pauseparam *epause)
9971 {
9972         struct bnx2x *bp = netdev_priv(dev);
9973
9974         if (IS_E1HMF(bp))
9975                 return 0;
9976
9977         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9978            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9979            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9980
9981         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9982
9983         if (epause->rx_pause)
9984                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9985
9986         if (epause->tx_pause)
9987                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9988
9989         if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9990                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9991
9992         if (epause->autoneg) {
9993                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9994                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
9995                         return -EINVAL;
9996                 }
9997
9998                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9999                         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
10000         }
10001
10002         DP(NETIF_MSG_LINK,
10003            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
10004
10005         if (netif_running(dev)) {
10006                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10007                 bnx2x_link_set(bp);
10008         }
10009
10010         return 0;
10011 }
10012
10013 static int bnx2x_set_flags(struct net_device *dev, u32 data)
10014 {
10015         struct bnx2x *bp = netdev_priv(dev);
10016         int changed = 0;
10017         int rc = 0;
10018
10019         /* TPA requires Rx CSUM offloading */
10020         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
10021                 if (!(dev->features & NETIF_F_LRO)) {
10022                         dev->features |= NETIF_F_LRO;
10023                         bp->flags |= TPA_ENABLE_FLAG;
10024                         changed = 1;
10025                 }
10026
10027         } else if (dev->features & NETIF_F_LRO) {
10028                 dev->features &= ~NETIF_F_LRO;
10029                 bp->flags &= ~TPA_ENABLE_FLAG;
10030                 changed = 1;
10031         }
10032
10033         if (changed && netif_running(dev)) {
10034                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10035                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10036         }
10037
10038         return rc;
10039 }
10040
10041 static u32 bnx2x_get_rx_csum(struct net_device *dev)
10042 {
10043         struct bnx2x *bp = netdev_priv(dev);
10044
10045         return bp->rx_csum;
10046 }
10047
10048 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
10049 {
10050         struct bnx2x *bp = netdev_priv(dev);
10051         int rc = 0;
10052
10053         bp->rx_csum = data;
10054
10055         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
10056            TPA'ed packets will be discarded due to wrong TCP CSUM */
10057         if (!data) {
10058                 u32 flags = ethtool_op_get_flags(dev);
10059
10060                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
10061         }
10062
10063         return rc;
10064 }
10065
10066 static int bnx2x_set_tso(struct net_device *dev, u32 data)
10067 {
10068         if (data) {
10069                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10070                 dev->features |= NETIF_F_TSO6;
10071         } else {
10072                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
10073                 dev->features &= ~NETIF_F_TSO6;
10074         }
10075
10076         return 0;
10077 }
10078
10079 static const struct {
10080         char string[ETH_GSTRING_LEN];
10081 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
10082         { "register_test (offline)" },
10083         { "memory_test (offline)" },
10084         { "loopback_test (offline)" },
10085         { "nvram_test (online)" },
10086         { "interrupt_test (online)" },
10087         { "link_test (online)" },
10088         { "idle check (online)" }
10089 };
10090
10091 static int bnx2x_test_registers(struct bnx2x *bp)
10092 {
10093         int idx, i, rc = -ENODEV;
10094         u32 wr_val = 0;
10095         int port = BP_PORT(bp);
10096         static const struct {
10097                 u32  offset0;
10098                 u32  offset1;
10099                 u32  mask;
10100         } reg_tbl[] = {
10101 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
10102                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
10103                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
10104                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
10105                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
10106                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
10107                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
10108                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
10109                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
10110                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
10111 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
10112                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
10113                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
10114                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
10115                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
10116                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
10117                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
10118                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
10119                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
10120                 { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
10121 /* 20 */        { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
10122                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
10123                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
10124                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
10125                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
10126                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
10127                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
10128                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
10129                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
10130                 { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
10131 /* 30 */        { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
10132                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
10133                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
10134                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
10135                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
10136                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
10137                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
10138
10139                 { 0xffffffff, 0, 0x00000000 }
10140         };
10141
10142         if (!netif_running(bp->dev))
10143                 return rc;
10144
10145         /* Repeat the test twice:
10146            First by writing 0x00000000, second by writing 0xffffffff */
10147         for (idx = 0; idx < 2; idx++) {
10148
10149                 switch (idx) {
10150                 case 0:
10151                         wr_val = 0;
10152                         break;
10153                 case 1:
10154                         wr_val = 0xffffffff;
10155                         break;
10156                 }
10157
10158                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
10159                         u32 offset, mask, save_val, val;
10160
10161                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
10162                         mask = reg_tbl[i].mask;
10163
10164                         save_val = REG_RD(bp, offset);
10165
10166                         REG_WR(bp, offset, wr_val);
10167                         val = REG_RD(bp, offset);
10168
10169                         /* Restore the original register's value */
10170                         REG_WR(bp, offset, save_val);
10171
10172                         /* verify that value is as expected value */
10173                         if ((val & mask) != (wr_val & mask))
10174                                 goto test_reg_exit;
10175                 }
10176         }
10177
10178         rc = 0;
10179
10180 test_reg_exit:
10181         return rc;
10182 }
10183
10184 static int bnx2x_test_memory(struct bnx2x *bp)
10185 {
10186         int i, j, rc = -ENODEV;
10187         u32 val;
10188         static const struct {
10189                 u32 offset;
10190                 int size;
10191         } mem_tbl[] = {
10192                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
10193                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
10194                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
10195                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
10196                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
10197                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
10198                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
10199
10200                 { 0xffffffff, 0 }
10201         };
10202         static const struct {
10203                 char *name;
10204                 u32 offset;
10205                 u32 e1_mask;
10206                 u32 e1h_mask;
10207         } prty_tbl[] = {
10208                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
10209                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
10210                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
10211                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
10212                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
10213                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
10214
10215                 { NULL, 0xffffffff, 0, 0 }
10216         };
10217
10218         if (!netif_running(bp->dev))
10219                 return rc;
10220
10221         /* Go through all the memories */
10222         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
10223                 for (j = 0; j < mem_tbl[i].size; j++)
10224                         REG_RD(bp, mem_tbl[i].offset + j*4);
10225
10226         /* Check the parity status */
10227         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
10228                 val = REG_RD(bp, prty_tbl[i].offset);
10229                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
10230                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
10231                         DP(NETIF_MSG_HW,
10232                            "%s is 0x%x\n", prty_tbl[i].name, val);
10233                         goto test_mem_exit;
10234                 }
10235         }
10236
10237         rc = 0;
10238
10239 test_mem_exit:
10240         return rc;
10241 }
10242
10243 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
10244 {
10245         int cnt = 1000;
10246
10247         if (link_up)
10248                 while (bnx2x_link_test(bp) && cnt--)
10249                         msleep(10);
10250 }
10251
10252 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10253 {
10254         unsigned int pkt_size, num_pkts, i;
10255         struct sk_buff *skb;
10256         unsigned char *packet;
10257         struct bnx2x_fastpath *fp_rx = &bp->fp[0];
10258         struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues];
10259         u16 tx_start_idx, tx_idx;
10260         u16 rx_start_idx, rx_idx;
10261         u16 pkt_prod, bd_prod;
10262         struct sw_tx_bd *tx_buf;
10263         struct eth_tx_start_bd *tx_start_bd;
10264         struct eth_tx_parse_bd *pbd = NULL;
10265         dma_addr_t mapping;
10266         union eth_rx_cqe *cqe;
10267         u8 cqe_fp_flags;
10268         struct sw_rx_bd *rx_buf;
10269         u16 len;
10270         int rc = -ENODEV;
10271
10272         /* check the loopback mode */
10273         switch (loopback_mode) {
10274         case BNX2X_PHY_LOOPBACK:
10275                 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
10276                         return -EINVAL;
10277                 break;
10278         case BNX2X_MAC_LOOPBACK:
10279                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
10280                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
10281                 break;
10282         default:
10283                 return -EINVAL;
10284         }
10285
10286         /* prepare the loopback packet */
10287         pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
10288                      bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
10289         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
10290         if (!skb) {
10291                 rc = -ENOMEM;
10292                 goto test_loopback_exit;
10293         }
10294         packet = skb_put(skb, pkt_size);
10295         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
10296         memset(packet + ETH_ALEN, 0, ETH_ALEN);
10297         memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
10298         for (i = ETH_HLEN; i < pkt_size; i++)
10299                 packet[i] = (unsigned char) (i & 0xff);
10300
10301         /* send the loopback packet */
10302         num_pkts = 0;
10303         tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10304         rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10305
10306         pkt_prod = fp_tx->tx_pkt_prod++;
10307         tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10308         tx_buf->first_bd = fp_tx->tx_bd_prod;
10309         tx_buf->skb = skb;
10310         tx_buf->flags = 0;
10311
10312         bd_prod = TX_BD(fp_tx->tx_bd_prod);
10313         tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
10314         mapping = pci_map_single(bp->pdev, skb->data,
10315                                  skb_headlen(skb), PCI_DMA_TODEVICE);
10316         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10317         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10318         tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10319         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10320         tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10321         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10322         tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10323                                 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10324
10325         /* turn on parsing and get a BD */
10326         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10327         pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10328
10329         memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10330
10331         wmb();
10332
10333         fp_tx->tx_db.data.prod += 2;
10334         barrier();
10335         DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw);
10336
10337         mmiowb();
10338
10339         num_pkts++;
10340         fp_tx->tx_bd_prod += 2; /* start + pbd */
10341         bp->dev->trans_start = jiffies;
10342
10343         udelay(100);
10344
10345         tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10346         if (tx_idx != tx_start_idx + num_pkts)
10347                 goto test_loopback_exit;
10348
10349         rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10350         if (rx_idx != rx_start_idx + num_pkts)
10351                 goto test_loopback_exit;
10352
10353         cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
10354         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10355         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10356                 goto test_loopback_rx_exit;
10357
10358         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10359         if (len != pkt_size)
10360                 goto test_loopback_rx_exit;
10361
10362         rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
10363         skb = rx_buf->skb;
10364         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10365         for (i = ETH_HLEN; i < pkt_size; i++)
10366                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10367                         goto test_loopback_rx_exit;
10368
10369         rc = 0;
10370
10371 test_loopback_rx_exit:
10372
10373         fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10374         fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10375         fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10376         fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
10377
10378         /* Update producers */
10379         bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10380                              fp_rx->rx_sge_prod);
10381
10382 test_loopback_exit:
10383         bp->link_params.loopback_mode = LOOPBACK_NONE;
10384
10385         return rc;
10386 }
10387
10388 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10389 {
10390         int rc = 0, res;
10391
10392         if (!netif_running(bp->dev))
10393                 return BNX2X_LOOPBACK_FAILED;
10394
10395         bnx2x_netif_stop(bp, 1);
10396         bnx2x_acquire_phy_lock(bp);
10397
10398         res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10399         if (res) {
10400                 DP(NETIF_MSG_PROBE, "  PHY loopback failed  (res %d)\n", res);
10401                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
10402         }
10403
10404         res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10405         if (res) {
10406                 DP(NETIF_MSG_PROBE, "  MAC loopback failed  (res %d)\n", res);
10407                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
10408         }
10409
10410         bnx2x_release_phy_lock(bp);
10411         bnx2x_netif_start(bp);
10412
10413         return rc;
10414 }
10415
10416 #define CRC32_RESIDUAL                  0xdebb20e3
10417
10418 static int bnx2x_test_nvram(struct bnx2x *bp)
10419 {
10420         static const struct {
10421                 int offset;
10422                 int size;
10423         } nvram_tbl[] = {
10424                 {     0,  0x14 }, /* bootstrap */
10425                 {  0x14,  0xec }, /* dir */
10426                 { 0x100, 0x350 }, /* manuf_info */
10427                 { 0x450,  0xf0 }, /* feature_info */
10428                 { 0x640,  0x64 }, /* upgrade_key_info */
10429                 { 0x6a4,  0x64 },
10430                 { 0x708,  0x70 }, /* manuf_key_info */
10431                 { 0x778,  0x70 },
10432                 {     0,     0 }
10433         };
10434         __be32 buf[0x350 / 4];
10435         u8 *data = (u8 *)buf;
10436         int i, rc;
10437         u32 magic, crc;
10438
10439         rc = bnx2x_nvram_read(bp, 0, data, 4);
10440         if (rc) {
10441                 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
10442                 goto test_nvram_exit;
10443         }
10444
10445         magic = be32_to_cpu(buf[0]);
10446         if (magic != 0x669955aa) {
10447                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10448                 rc = -ENODEV;
10449                 goto test_nvram_exit;
10450         }
10451
10452         for (i = 0; nvram_tbl[i].size; i++) {
10453
10454                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10455                                       nvram_tbl[i].size);
10456                 if (rc) {
10457                         DP(NETIF_MSG_PROBE,
10458                            "nvram_tbl[%d] read data (rc %d)\n", i, rc);
10459                         goto test_nvram_exit;
10460                 }
10461
10462                 crc = ether_crc_le(nvram_tbl[i].size, data);
10463                 if (crc != CRC32_RESIDUAL) {
10464                         DP(NETIF_MSG_PROBE,
10465                            "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
10466                         rc = -ENODEV;
10467                         goto test_nvram_exit;
10468                 }
10469         }
10470
10471 test_nvram_exit:
10472         return rc;
10473 }
10474
10475 static int bnx2x_test_intr(struct bnx2x *bp)
10476 {
10477         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10478         int i, rc;
10479
10480         if (!netif_running(bp->dev))
10481                 return -ENODEV;
10482
10483         config->hdr.length = 0;
10484         if (CHIP_IS_E1(bp))
10485                 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10486         else
10487                 config->hdr.offset = BP_FUNC(bp);
10488         config->hdr.client_id = bp->fp->cl_id;
10489         config->hdr.reserved1 = 0;
10490
10491         bp->set_mac_pending++;
10492         smp_wmb();
10493         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10494                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10495                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10496         if (rc == 0) {
10497                 for (i = 0; i < 10; i++) {
10498                         if (!bp->set_mac_pending)
10499                                 break;
10500                         smp_rmb();
10501                         msleep_interruptible(10);
10502                 }
10503                 if (i == 10)
10504                         rc = -ENODEV;
10505         }
10506
10507         return rc;
10508 }
10509
10510 static void bnx2x_self_test(struct net_device *dev,
10511                             struct ethtool_test *etest, u64 *buf)
10512 {
10513         struct bnx2x *bp = netdev_priv(dev);
10514
10515         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10516
10517         if (!netif_running(dev))
10518                 return;
10519
10520         /* offline tests are not supported in MF mode */
10521         if (IS_E1HMF(bp))
10522                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10523
10524         if (etest->flags & ETH_TEST_FL_OFFLINE) {
10525                 int port = BP_PORT(bp);
10526                 u32 val;
10527                 u8 link_up;
10528
10529                 /* save current value of input enable for TX port IF */
10530                 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10531                 /* disable input for TX port IF */
10532                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10533
10534                 link_up = bp->link_vars.link_up;
10535                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10536                 bnx2x_nic_load(bp, LOAD_DIAG);
10537                 /* wait until link state is restored */
10538                 bnx2x_wait_for_link(bp, link_up);
10539
10540                 if (bnx2x_test_registers(bp) != 0) {
10541                         buf[0] = 1;
10542                         etest->flags |= ETH_TEST_FL_FAILED;
10543                 }
10544                 if (bnx2x_test_memory(bp) != 0) {
10545                         buf[1] = 1;
10546                         etest->flags |= ETH_TEST_FL_FAILED;
10547                 }
10548                 buf[2] = bnx2x_test_loopback(bp, link_up);
10549                 if (buf[2] != 0)
10550                         etest->flags |= ETH_TEST_FL_FAILED;
10551
10552                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10553
10554                 /* restore input for TX port IF */
10555                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10556
10557                 bnx2x_nic_load(bp, LOAD_NORMAL);
10558                 /* wait until link state is restored */
10559                 bnx2x_wait_for_link(bp, link_up);
10560         }
10561         if (bnx2x_test_nvram(bp) != 0) {
10562                 buf[3] = 1;
10563                 etest->flags |= ETH_TEST_FL_FAILED;
10564         }
10565         if (bnx2x_test_intr(bp) != 0) {
10566                 buf[4] = 1;
10567                 etest->flags |= ETH_TEST_FL_FAILED;
10568         }
10569         if (bp->port.pmf)
10570                 if (bnx2x_link_test(bp) != 0) {
10571                         buf[5] = 1;
10572                         etest->flags |= ETH_TEST_FL_FAILED;
10573                 }
10574
10575 #ifdef BNX2X_EXTRA_DEBUG
10576         bnx2x_panic_dump(bp);
10577 #endif
10578 }
10579
10580 static const struct {
10581         long offset;
10582         int size;
10583         u8 string[ETH_GSTRING_LEN];
10584 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10585 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10586         { Q_STATS_OFFSET32(error_bytes_received_hi),
10587                                                 8, "[%d]: rx_error_bytes" },
10588         { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10589                                                 8, "[%d]: rx_ucast_packets" },
10590         { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10591                                                 8, "[%d]: rx_mcast_packets" },
10592         { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10593                                                 8, "[%d]: rx_bcast_packets" },
10594         { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10595         { Q_STATS_OFFSET32(rx_err_discard_pkt),
10596                                          4, "[%d]: rx_phy_ip_err_discards"},
10597         { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10598                                          4, "[%d]: rx_skb_alloc_discard" },
10599         { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10600
10601 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10602         { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10603                                                         8, "[%d]: tx_packets" }
10604 };
10605
10606 static const struct {
10607         long offset;
10608         int size;
10609         u32 flags;
10610 #define STATS_FLAGS_PORT                1
10611 #define STATS_FLAGS_FUNC                2
10612 #define STATS_FLAGS_BOTH                (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
10613         u8 string[ETH_GSTRING_LEN];
10614 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
10615 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10616                                 8, STATS_FLAGS_BOTH, "rx_bytes" },
10617         { STATS_OFFSET32(error_bytes_received_hi),
10618                                 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
10619         { STATS_OFFSET32(total_unicast_packets_received_hi),
10620                                 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
10621         { STATS_OFFSET32(total_multicast_packets_received_hi),
10622                                 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
10623         { STATS_OFFSET32(total_broadcast_packets_received_hi),
10624                                 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
10625         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
10626                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
10627         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
10628                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
10629         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10630                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10631         { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10632                                 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10633 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10634                                 8, STATS_FLAGS_PORT, "rx_fragments" },
10635         { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10636                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
10637         { STATS_OFFSET32(no_buff_discard_hi),
10638                                 8, STATS_FLAGS_BOTH, "rx_discards" },
10639         { STATS_OFFSET32(mac_filter_discard),
10640                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10641         { STATS_OFFSET32(xxoverflow_discard),
10642                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10643         { STATS_OFFSET32(brb_drop_hi),
10644                                 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10645         { STATS_OFFSET32(brb_truncate_hi),
10646                                 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10647         { STATS_OFFSET32(pause_frames_received_hi),
10648                                 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10649         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10650                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10651         { STATS_OFFSET32(nig_timer_max),
10652                         4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10653 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10654                                 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10655         { STATS_OFFSET32(rx_skb_alloc_failed),
10656                                 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10657         { STATS_OFFSET32(hw_csum_err),
10658                                 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10659
10660         { STATS_OFFSET32(total_bytes_transmitted_hi),
10661                                 8, STATS_FLAGS_BOTH, "tx_bytes" },
10662         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10663                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10664         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10665                                 8, STATS_FLAGS_BOTH, "tx_packets" },
10666         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10667                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10668         { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10669                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
10670         { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
10671                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
10672         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
10673                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
10674 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
10675                                 8, STATS_FLAGS_PORT, "tx_deferred" },
10676         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
10677                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
10678         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
10679                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
10680         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
10681                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
10682         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
10683                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
10684         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
10685                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
10686         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
10687                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
10688         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
10689                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
10690         { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
10691                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
10692         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
10693                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
10694 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
10695                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
10696         { STATS_OFFSET32(pause_frames_sent_hi),
10697                                 8, STATS_FLAGS_PORT, "tx_pause_frames" }
10698 };
10699
10700 #define IS_PORT_STAT(i) \
10701         ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10702 #define IS_FUNC_STAT(i)         (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10703 #define IS_E1HMF_MODE_STAT(bp) \
10704                         (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
10705
10706 static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10707 {
10708         struct bnx2x *bp = netdev_priv(dev);
10709         int i, num_stats;
10710
10711         switch(stringset) {
10712         case ETH_SS_STATS:
10713                 if (is_multi(bp)) {
10714                         num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
10715                         if (!IS_E1HMF_MODE_STAT(bp))
10716                                 num_stats += BNX2X_NUM_STATS;
10717                 } else {
10718                         if (IS_E1HMF_MODE_STAT(bp)) {
10719                                 num_stats = 0;
10720                                 for (i = 0; i < BNX2X_NUM_STATS; i++)
10721                                         if (IS_FUNC_STAT(i))
10722                                                 num_stats++;
10723                         } else
10724                                 num_stats = BNX2X_NUM_STATS;
10725                 }
10726                 return num_stats;
10727
10728         case ETH_SS_TEST:
10729                 return BNX2X_NUM_TESTS;
10730
10731         default:
10732                 return -EINVAL;
10733         }
10734 }
10735
10736 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10737 {
10738         struct bnx2x *bp = netdev_priv(dev);
10739         int i, j, k;
10740
10741         switch (stringset) {
10742         case ETH_SS_STATS:
10743                 if (is_multi(bp)) {
10744                         k = 0;
10745                         for_each_rx_queue(bp, i) {
10746                                 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10747                                         sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10748                                                 bnx2x_q_stats_arr[j].string, i);
10749                                 k += BNX2X_NUM_Q_STATS;
10750                         }
10751                         if (IS_E1HMF_MODE_STAT(bp))
10752                                 break;
10753                         for (j = 0; j < BNX2X_NUM_STATS; j++)
10754                                 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10755                                        bnx2x_stats_arr[j].string);
10756                 } else {
10757                         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10758                                 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10759                                         continue;
10760                                 strcpy(buf + j*ETH_GSTRING_LEN,
10761                                        bnx2x_stats_arr[i].string);
10762                                 j++;
10763                         }
10764                 }
10765                 break;
10766
10767         case ETH_SS_TEST:
10768                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10769                 break;
10770         }
10771 }
10772
10773 static void bnx2x_get_ethtool_stats(struct net_device *dev,
10774                                     struct ethtool_stats *stats, u64 *buf)
10775 {
10776         struct bnx2x *bp = netdev_priv(dev);
10777         u32 *hw_stats, *offset;
10778         int i, j, k;
10779
10780         if (is_multi(bp)) {
10781                 k = 0;
10782                 for_each_rx_queue(bp, i) {
10783                         hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10784                         for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10785                                 if (bnx2x_q_stats_arr[j].size == 0) {
10786                                         /* skip this counter */
10787                                         buf[k + j] = 0;
10788                                         continue;
10789                                 }
10790                                 offset = (hw_stats +
10791                                           bnx2x_q_stats_arr[j].offset);
10792                                 if (bnx2x_q_stats_arr[j].size == 4) {
10793                                         /* 4-byte counter */
10794                                         buf[k + j] = (u64) *offset;
10795                                         continue;
10796                                 }
10797                                 /* 8-byte counter */
10798                                 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10799                         }
10800                         k += BNX2X_NUM_Q_STATS;
10801                 }
10802                 if (IS_E1HMF_MODE_STAT(bp))
10803                         return;
10804                 hw_stats = (u32 *)&bp->eth_stats;
10805                 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10806                         if (bnx2x_stats_arr[j].size == 0) {
10807                                 /* skip this counter */
10808                                 buf[k + j] = 0;
10809                                 continue;
10810                         }
10811                         offset = (hw_stats + bnx2x_stats_arr[j].offset);
10812                         if (bnx2x_stats_arr[j].size == 4) {
10813                                 /* 4-byte counter */
10814                                 buf[k + j] = (u64) *offset;
10815                                 continue;
10816                         }
10817                         /* 8-byte counter */
10818                         buf[k + j] = HILO_U64(*offset, *(offset + 1));
10819                 }
10820         } else {
10821                 hw_stats = (u32 *)&bp->eth_stats;
10822                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10823                         if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10824                                 continue;
10825                         if (bnx2x_stats_arr[i].size == 0) {
10826                                 /* skip this counter */
10827                                 buf[j] = 0;
10828                                 j++;
10829                                 continue;
10830                         }
10831                         offset = (hw_stats + bnx2x_stats_arr[i].offset);
10832                         if (bnx2x_stats_arr[i].size == 4) {
10833                                 /* 4-byte counter */
10834                                 buf[j] = (u64) *offset;
10835                                 j++;
10836                                 continue;
10837                         }
10838                         /* 8-byte counter */
10839                         buf[j] = HILO_U64(*offset, *(offset + 1));
10840                         j++;
10841                 }
10842         }
10843 }
10844
10845 static int bnx2x_phys_id(struct net_device *dev, u32 data)
10846 {
10847         struct bnx2x *bp = netdev_priv(dev);
10848         int port = BP_PORT(bp);
10849         int i;
10850
10851         if (!netif_running(dev))
10852                 return 0;
10853
10854         if (!bp->port.pmf)
10855                 return 0;
10856
10857         if (data == 0)
10858                 data = 2;
10859
10860         for (i = 0; i < (data * 2); i++) {
10861                 if ((i % 2) == 0)
10862                         bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
10863                                       bp->link_params.hw_led_mode,
10864                                       bp->link_params.chip_id);
10865                 else
10866                         bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
10867                                       bp->link_params.hw_led_mode,
10868                                       bp->link_params.chip_id);
10869
10870                 msleep_interruptible(500);
10871                 if (signal_pending(current))
10872                         break;
10873         }
10874
10875         if (bp->link_vars.link_up)
10876                 bnx2x_set_led(bp, port, LED_MODE_OPER,
10877                               bp->link_vars.line_speed,
10878                               bp->link_params.hw_led_mode,
10879                               bp->link_params.chip_id);
10880
10881         return 0;
10882 }
10883
10884 static const struct ethtool_ops bnx2x_ethtool_ops = {
10885         .get_settings           = bnx2x_get_settings,
10886         .set_settings           = bnx2x_set_settings,
10887         .get_drvinfo            = bnx2x_get_drvinfo,
10888         .get_regs_len           = bnx2x_get_regs_len,
10889         .get_regs               = bnx2x_get_regs,
10890         .get_wol                = bnx2x_get_wol,
10891         .set_wol                = bnx2x_set_wol,
10892         .get_msglevel           = bnx2x_get_msglevel,
10893         .set_msglevel           = bnx2x_set_msglevel,
10894         .nway_reset             = bnx2x_nway_reset,
10895         .get_link               = bnx2x_get_link,
10896         .get_eeprom_len         = bnx2x_get_eeprom_len,
10897         .get_eeprom             = bnx2x_get_eeprom,
10898         .set_eeprom             = bnx2x_set_eeprom,
10899         .get_coalesce           = bnx2x_get_coalesce,
10900         .set_coalesce           = bnx2x_set_coalesce,
10901         .get_ringparam          = bnx2x_get_ringparam,
10902         .set_ringparam          = bnx2x_set_ringparam,
10903         .get_pauseparam         = bnx2x_get_pauseparam,
10904         .set_pauseparam         = bnx2x_set_pauseparam,
10905         .get_rx_csum            = bnx2x_get_rx_csum,
10906         .set_rx_csum            = bnx2x_set_rx_csum,
10907         .get_tx_csum            = ethtool_op_get_tx_csum,
10908         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
10909         .set_flags              = bnx2x_set_flags,
10910         .get_flags              = ethtool_op_get_flags,
10911         .get_sg                 = ethtool_op_get_sg,
10912         .set_sg                 = ethtool_op_set_sg,
10913         .get_tso                = ethtool_op_get_tso,
10914         .set_tso                = bnx2x_set_tso,
10915         .self_test              = bnx2x_self_test,
10916         .get_sset_count         = bnx2x_get_sset_count,
10917         .get_strings            = bnx2x_get_strings,
10918         .phys_id                = bnx2x_phys_id,
10919         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
10920 };
10921
10922 /* end of ethtool_ops */
10923
10924 /****************************************************************************
10925 * General service functions
10926 ****************************************************************************/
10927
10928 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10929 {
10930         u16 pmcsr;
10931
10932         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10933
10934         switch (state) {
10935         case PCI_D0:
10936                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10937                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10938                                        PCI_PM_CTRL_PME_STATUS));
10939
10940                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10941                         /* delay required during transition out of D3hot */
10942                         msleep(20);
10943                 break;
10944
10945         case PCI_D3hot:
10946                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10947                 pmcsr |= 3;
10948
10949                 if (bp->wol)
10950                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10951
10952                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10953                                       pmcsr);
10954
10955                 /* No more memory access after this point until
10956                 * device is brought back to D0.
10957                 */
10958                 break;
10959
10960         default:
10961                 return -EINVAL;
10962         }
10963         return 0;
10964 }
10965
10966 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10967 {
10968         u16 rx_cons_sb;
10969
10970         /* Tell compiler that status block fields can change */
10971         barrier();
10972         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10973         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10974                 rx_cons_sb++;
10975         return (fp->rx_comp_cons != rx_cons_sb);
10976 }
10977
10978 /*
10979  * net_device service functions
10980  */
10981
10982 static int bnx2x_poll(struct napi_struct *napi, int budget)
10983 {
10984         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10985                                                  napi);
10986         struct bnx2x *bp = fp->bp;
10987         int work_done = 0;
10988
10989 #ifdef BNX2X_STOP_ON_ERROR
10990         if (unlikely(bp->panic))
10991                 goto poll_panic;
10992 #endif
10993
10994         prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10995         prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10996
10997         bnx2x_update_fpsb_idx(fp);
10998
10999         if (bnx2x_has_rx_work(fp)) {
11000                 work_done = bnx2x_rx_int(fp, budget);
11001
11002                 /* must not complete if we consumed full budget */
11003                 if (work_done >= budget)
11004                         goto poll_again;
11005         }
11006
11007         /* bnx2x_has_rx_work() reads the status block, thus we need to
11008          * ensure that status block indices have been actually read
11009          * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work)
11010          * so that we won't write the "newer" value of the status block to IGU
11011          * (if there was a DMA right after bnx2x_has_rx_work and
11012          * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
11013          * may be postponed to right before bnx2x_ack_sb). In this case
11014          * there will never be another interrupt until there is another update
11015          * of the status block, while there is still unhandled work.
11016          */
11017         rmb();
11018
11019         if (!bnx2x_has_rx_work(fp)) {
11020 #ifdef BNX2X_STOP_ON_ERROR
11021 poll_panic:
11022 #endif
11023                 napi_complete(napi);
11024
11025                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
11026                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
11027                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
11028                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
11029         }
11030
11031 poll_again:
11032         return work_done;
11033 }
11034
11035
11036 /* we split the first BD into headers and data BDs
11037  * to ease the pain of our fellow microcode engineers
11038  * we use one mapping for both BDs
11039  * So far this has only been observed to happen
11040  * in Other Operating Systems(TM)
11041  */
11042 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
11043                                    struct bnx2x_fastpath *fp,
11044                                    struct sw_tx_bd *tx_buf,
11045                                    struct eth_tx_start_bd **tx_bd, u16 hlen,
11046                                    u16 bd_prod, int nbd)
11047 {
11048         struct eth_tx_start_bd *h_tx_bd = *tx_bd;
11049         struct eth_tx_bd *d_tx_bd;
11050         dma_addr_t mapping;
11051         int old_len = le16_to_cpu(h_tx_bd->nbytes);
11052
11053         /* first fix first BD */
11054         h_tx_bd->nbd = cpu_to_le16(nbd);
11055         h_tx_bd->nbytes = cpu_to_le16(hlen);
11056
11057         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
11058            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
11059            h_tx_bd->addr_lo, h_tx_bd->nbd);
11060
11061         /* now get a new data BD
11062          * (after the pbd) and fill it */
11063         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11064         d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11065
11066         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
11067                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
11068
11069         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11070         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11071         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
11072
11073         /* this marks the BD as one that has no individual mapping */
11074         tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
11075
11076         DP(NETIF_MSG_TX_QUEUED,
11077            "TSO split data size is %d (%x:%x)\n",
11078            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
11079
11080         /* update tx_bd */
11081         *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
11082
11083         return bd_prod;
11084 }
11085
11086 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
11087 {
11088         if (fix > 0)
11089                 csum = (u16) ~csum_fold(csum_sub(csum,
11090                                 csum_partial(t_header - fix, fix, 0)));
11091
11092         else if (fix < 0)
11093                 csum = (u16) ~csum_fold(csum_add(csum,
11094                                 csum_partial(t_header, -fix, 0)));
11095
11096         return swab16(csum);
11097 }
11098
11099 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
11100 {
11101         u32 rc;
11102
11103         if (skb->ip_summed != CHECKSUM_PARTIAL)
11104                 rc = XMIT_PLAIN;
11105
11106         else {
11107                 if (skb->protocol == htons(ETH_P_IPV6)) {
11108                         rc = XMIT_CSUM_V6;
11109                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
11110                                 rc |= XMIT_CSUM_TCP;
11111
11112                 } else {
11113                         rc = XMIT_CSUM_V4;
11114                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
11115                                 rc |= XMIT_CSUM_TCP;
11116                 }
11117         }
11118
11119         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
11120                 rc |= XMIT_GSO_V4;
11121
11122         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
11123                 rc |= XMIT_GSO_V6;
11124
11125         return rc;
11126 }
11127
11128 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
11129 /* check if packet requires linearization (packet is too fragmented)
11130    no need to check fragmentation if page size > 8K (there will be no
11131    violation to FW restrictions) */
11132 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
11133                              u32 xmit_type)
11134 {
11135         int to_copy = 0;
11136         int hlen = 0;
11137         int first_bd_sz = 0;
11138
11139         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
11140         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
11141
11142                 if (xmit_type & XMIT_GSO) {
11143                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
11144                         /* Check if LSO packet needs to be copied:
11145                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
11146                         int wnd_size = MAX_FETCH_BD - 3;
11147                         /* Number of windows to check */
11148                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
11149                         int wnd_idx = 0;
11150                         int frag_idx = 0;
11151                         u32 wnd_sum = 0;
11152
11153                         /* Headers length */
11154                         hlen = (int)(skb_transport_header(skb) - skb->data) +
11155                                 tcp_hdrlen(skb);
11156
11157                         /* Amount of data (w/o headers) on linear part of SKB*/
11158                         first_bd_sz = skb_headlen(skb) - hlen;
11159
11160                         wnd_sum  = first_bd_sz;
11161
11162                         /* Calculate the first sum - it's special */
11163                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
11164                                 wnd_sum +=
11165                                         skb_shinfo(skb)->frags[frag_idx].size;
11166
11167                         /* If there was data on linear skb data - check it */
11168                         if (first_bd_sz > 0) {
11169                                 if (unlikely(wnd_sum < lso_mss)) {
11170                                         to_copy = 1;
11171                                         goto exit_lbl;
11172                                 }
11173
11174                                 wnd_sum -= first_bd_sz;
11175                         }
11176
11177                         /* Others are easier: run through the frag list and
11178                            check all windows */
11179                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
11180                                 wnd_sum +=
11181                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
11182
11183                                 if (unlikely(wnd_sum < lso_mss)) {
11184                                         to_copy = 1;
11185                                         break;
11186                                 }
11187                                 wnd_sum -=
11188                                         skb_shinfo(skb)->frags[wnd_idx].size;
11189                         }
11190                 } else {
11191                         /* in non-LSO too fragmented packet should always
11192                            be linearized */
11193                         to_copy = 1;
11194                 }
11195         }
11196
11197 exit_lbl:
11198         if (unlikely(to_copy))
11199                 DP(NETIF_MSG_TX_QUEUED,
11200                    "Linearization IS REQUIRED for %s packet. "
11201                    "num_frags %d  hlen %d  first_bd_sz %d\n",
11202                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
11203                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
11204
11205         return to_copy;
11206 }
11207 #endif
11208
11209 /* called with netif_tx_lock
11210  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
11211  * netif_wake_queue()
11212  */
11213 static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11214 {
11215         struct bnx2x *bp = netdev_priv(dev);
11216         struct bnx2x_fastpath *fp, *fp_stat;
11217         struct netdev_queue *txq;
11218         struct sw_tx_bd *tx_buf;
11219         struct eth_tx_start_bd *tx_start_bd;
11220         struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
11221         struct eth_tx_parse_bd *pbd = NULL;
11222         u16 pkt_prod, bd_prod;
11223         int nbd, fp_index;
11224         dma_addr_t mapping;
11225         u32 xmit_type = bnx2x_xmit_type(bp, skb);
11226         int i;
11227         u8 hlen = 0;
11228         __le16 pkt_size = 0;
11229
11230 #ifdef BNX2X_STOP_ON_ERROR
11231         if (unlikely(bp->panic))
11232                 return NETDEV_TX_BUSY;
11233 #endif
11234
11235         fp_index = skb_get_queue_mapping(skb);
11236         txq = netdev_get_tx_queue(dev, fp_index);
11237
11238         fp = &bp->fp[fp_index + bp->num_rx_queues];
11239         fp_stat = &bp->fp[fp_index];
11240
11241         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
11242                 fp_stat->eth_q_stats.driver_xoff++;
11243                 netif_tx_stop_queue(txq);
11244                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
11245                 return NETDEV_TX_BUSY;
11246         }
11247
11248         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
11249            "  gso type %x  xmit_type %x\n",
11250            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
11251            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
11252
11253 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
11254         /* First, check if we need to linearize the skb (due to FW
11255            restrictions). No need to check fragmentation if page size > 8K
11256            (there will be no violation to FW restrictions) */
11257         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
11258                 /* Statistics of linearization */
11259                 bp->lin_cnt++;
11260                 if (skb_linearize(skb) != 0) {
11261                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
11262                            "silently dropping this SKB\n");
11263                         dev_kfree_skb_any(skb);
11264                         return NETDEV_TX_OK;
11265                 }
11266         }
11267 #endif
11268
11269         /*
11270         Please read carefully. First we use one BD which we mark as start,
11271         then we have a parsing info BD (used for TSO or xsum),
11272         and only then we have the rest of the TSO BDs.
11273         (don't forget to mark the last one as last,
11274         and to unmap only AFTER you write to the BD ...)
11275         And above all, all pdb sizes are in words - NOT DWORDS!
11276         */
11277
11278         pkt_prod = fp->tx_pkt_prod++;
11279         bd_prod = TX_BD(fp->tx_bd_prod);
11280
11281         /* get a tx_buf and first BD */
11282         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
11283         tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
11284
11285         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11286         tx_start_bd->general_data = (UNICAST_ADDRESS <<
11287                                      ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
11288         /* header nbd */
11289         tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
11290
11291         /* remember the first BD of the packet */
11292         tx_buf->first_bd = fp->tx_bd_prod;
11293         tx_buf->skb = skb;
11294         tx_buf->flags = 0;
11295
11296         DP(NETIF_MSG_TX_QUEUED,
11297            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
11298            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
11299
11300 #ifdef BCM_VLAN
11301         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
11302             (bp->flags & HW_VLAN_TX_FLAG)) {
11303                 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
11304                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
11305         } else
11306 #endif
11307                 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11308
11309         /* turn on parsing and get a BD */
11310         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11311         pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
11312
11313         memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
11314
11315         if (xmit_type & XMIT_CSUM) {
11316                 hlen = (skb_network_header(skb) - skb->data) / 2;
11317
11318                 /* for now NS flag is not used in Linux */
11319                 pbd->global_data =
11320                         (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11321                                  ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
11322
11323                 pbd->ip_hlen = (skb_transport_header(skb) -
11324                                 skb_network_header(skb)) / 2;
11325
11326                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
11327
11328                 pbd->total_hlen = cpu_to_le16(hlen);
11329                 hlen = hlen*2;
11330
11331                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
11332
11333                 if (xmit_type & XMIT_CSUM_V4)
11334                         tx_start_bd->bd_flags.as_bitfield |=
11335                                                 ETH_TX_BD_FLAGS_IP_CSUM;
11336                 else
11337                         tx_start_bd->bd_flags.as_bitfield |=
11338                                                 ETH_TX_BD_FLAGS_IPV6;
11339
11340                 if (xmit_type & XMIT_CSUM_TCP) {
11341                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11342
11343                 } else {
11344                         s8 fix = SKB_CS_OFF(skb); /* signed! */
11345
11346                         pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
11347
11348                         DP(NETIF_MSG_TX_QUEUED,
11349                            "hlen %d  fix %d  csum before fix %x\n",
11350                            le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
11351
11352                         /* HW bug: fixup the CSUM */
11353                         pbd->tcp_pseudo_csum =
11354                                 bnx2x_csum_fix(skb_transport_header(skb),
11355                                                SKB_CS(skb), fix);
11356
11357                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11358                            pbd->tcp_pseudo_csum);
11359                 }
11360         }
11361
11362         mapping = pci_map_single(bp->pdev, skb->data,
11363                                  skb_headlen(skb), PCI_DMA_TODEVICE);
11364
11365         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11366         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11367         nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11368         tx_start_bd->nbd = cpu_to_le16(nbd);
11369         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11370         pkt_size = tx_start_bd->nbytes;
11371
11372         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
11373            "  nbytes %d  flags %x  vlan %x\n",
11374            tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11375            le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11376            tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
11377
11378         if (xmit_type & XMIT_GSO) {
11379
11380                 DP(NETIF_MSG_TX_QUEUED,
11381                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
11382                    skb->len, hlen, skb_headlen(skb),
11383                    skb_shinfo(skb)->gso_size);
11384
11385                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
11386
11387                 if (unlikely(skb_headlen(skb) > hlen))
11388                         bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11389                                                  hlen, bd_prod, ++nbd);
11390
11391                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11392                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
11393                 pbd->tcp_flags = pbd_tcp_flags(skb);
11394
11395                 if (xmit_type & XMIT_GSO_V4) {
11396                         pbd->ip_id = swab16(ip_hdr(skb)->id);
11397                         pbd->tcp_pseudo_csum =
11398                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11399                                                           ip_hdr(skb)->daddr,
11400                                                           0, IPPROTO_TCP, 0));
11401
11402                 } else
11403                         pbd->tcp_pseudo_csum =
11404                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11405                                                         &ipv6_hdr(skb)->daddr,
11406                                                         0, IPPROTO_TCP, 0));
11407
11408                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11409         }
11410         tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
11411
11412         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11413                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
11414
11415                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11416                 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11417                 if (total_pkt_bd == NULL)
11418                         total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11419
11420                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11421                                        frag->size, PCI_DMA_TODEVICE);
11422
11423                 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11424                 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11425                 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11426                 le16_add_cpu(&pkt_size, frag->size);
11427
11428                 DP(NETIF_MSG_TX_QUEUED,
11429                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
11430                    i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11431                    le16_to_cpu(tx_data_bd->nbytes));
11432         }
11433
11434         DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
11435
11436         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11437
11438         /* now send a tx doorbell, counting the next BD
11439          * if the packet contains or ends with it
11440          */
11441         if (TX_BD_POFF(bd_prod) < nbd)
11442                 nbd++;
11443
11444         if (total_pkt_bd != NULL)
11445                 total_pkt_bd->total_pkt_bytes = pkt_size;
11446
11447         if (pbd)
11448                 DP(NETIF_MSG_TX_QUEUED,
11449                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
11450                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
11451                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11452                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
11453                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
11454
11455         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
11456
11457         /*
11458          * Make sure that the BD data is updated before updating the producer
11459          * since FW might read the BD right after the producer is updated.
11460          * This is only applicable for weak-ordered memory model archs such
11461          * as IA-64. The following barrier is also mandatory since FW will
11462          * assumes packets must have BDs.
11463          */
11464         wmb();
11465
11466         fp->tx_db.data.prod += nbd;
11467         barrier();
11468         DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw);
11469
11470         mmiowb();
11471
11472         fp->tx_bd_prod += nbd;
11473
11474         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
11475                 netif_tx_stop_queue(txq);
11476                 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11477                    if we put Tx into XOFF state. */
11478                 smp_mb();
11479                 fp_stat->eth_q_stats.driver_xoff++;
11480                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
11481                         netif_tx_wake_queue(txq);
11482         }
11483         fp_stat->tx_pkt++;
11484
11485         return NETDEV_TX_OK;
11486 }
11487
11488 /* called with rtnl_lock */
11489 static int bnx2x_open(struct net_device *dev)
11490 {
11491         struct bnx2x *bp = netdev_priv(dev);
11492
11493         netif_carrier_off(dev);
11494
11495         bnx2x_set_power_state(bp, PCI_D0);
11496
11497         return bnx2x_nic_load(bp, LOAD_OPEN);
11498 }
11499
11500 /* called with rtnl_lock */
11501 static int bnx2x_close(struct net_device *dev)
11502 {
11503         struct bnx2x *bp = netdev_priv(dev);
11504
11505         /* Unload the driver, release IRQs */
11506         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11507         if (atomic_read(&bp->pdev->enable_cnt) == 1)
11508                 if (!CHIP_REV_IS_SLOW(bp))
11509                         bnx2x_set_power_state(bp, PCI_D3hot);
11510
11511         return 0;
11512 }
11513
11514 /* called with netif_tx_lock from dev_mcast.c */
11515 static void bnx2x_set_rx_mode(struct net_device *dev)
11516 {
11517         struct bnx2x *bp = netdev_priv(dev);
11518         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11519         int port = BP_PORT(bp);
11520
11521         if (bp->state != BNX2X_STATE_OPEN) {
11522                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11523                 return;
11524         }
11525
11526         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11527
11528         if (dev->flags & IFF_PROMISC)
11529                 rx_mode = BNX2X_RX_MODE_PROMISC;
11530
11531         else if ((dev->flags & IFF_ALLMULTI) ||
11532                  ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11533                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11534
11535         else { /* some multicasts */
11536                 if (CHIP_IS_E1(bp)) {
11537                         int i, old, offset;
11538                         struct dev_mc_list *mclist;
11539                         struct mac_configuration_cmd *config =
11540                                                 bnx2x_sp(bp, mcast_config);
11541
11542                         for (i = 0, mclist = dev->mc_list;
11543                              mclist && (i < dev->mc_count);
11544                              i++, mclist = mclist->next) {
11545
11546                                 config->config_table[i].
11547                                         cam_entry.msb_mac_addr =
11548                                         swab16(*(u16 *)&mclist->dmi_addr[0]);
11549                                 config->config_table[i].
11550                                         cam_entry.middle_mac_addr =
11551                                         swab16(*(u16 *)&mclist->dmi_addr[2]);
11552                                 config->config_table[i].
11553                                         cam_entry.lsb_mac_addr =
11554                                         swab16(*(u16 *)&mclist->dmi_addr[4]);
11555                                 config->config_table[i].cam_entry.flags =
11556                                                         cpu_to_le16(port);
11557                                 config->config_table[i].
11558                                         target_table_entry.flags = 0;
11559                                 config->config_table[i].target_table_entry.
11560                                         clients_bit_vector =
11561                                                 cpu_to_le32(1 << BP_L_ID(bp));
11562                                 config->config_table[i].
11563                                         target_table_entry.vlan_id = 0;
11564
11565                                 DP(NETIF_MSG_IFUP,
11566                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11567                                    config->config_table[i].
11568                                                 cam_entry.msb_mac_addr,
11569                                    config->config_table[i].
11570                                                 cam_entry.middle_mac_addr,
11571                                    config->config_table[i].
11572                                                 cam_entry.lsb_mac_addr);
11573                         }
11574                         old = config->hdr.length;
11575                         if (old > i) {
11576                                 for (; i < old; i++) {
11577                                         if (CAM_IS_INVALID(config->
11578                                                            config_table[i])) {
11579                                                 /* already invalidated */
11580                                                 break;
11581                                         }
11582                                         /* invalidate */
11583                                         CAM_INVALIDATE(config->
11584                                                        config_table[i]);
11585                                 }
11586                         }
11587
11588                         if (CHIP_REV_IS_SLOW(bp))
11589                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11590                         else
11591                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
11592
11593                         config->hdr.length = i;
11594                         config->hdr.offset = offset;
11595                         config->hdr.client_id = bp->fp->cl_id;
11596                         config->hdr.reserved1 = 0;
11597
11598                         bp->set_mac_pending++;
11599                         smp_wmb();
11600
11601                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11602                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11603                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11604                                       0);
11605                 } else { /* E1H */
11606                         /* Accept one or more multicasts */
11607                         struct dev_mc_list *mclist;
11608                         u32 mc_filter[MC_HASH_SIZE];
11609                         u32 crc, bit, regidx;
11610                         int i;
11611
11612                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11613
11614                         for (i = 0, mclist = dev->mc_list;
11615                              mclist && (i < dev->mc_count);
11616                              i++, mclist = mclist->next) {
11617
11618                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11619                                    mclist->dmi_addr);
11620
11621                                 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11622                                 bit = (crc >> 24) & 0xff;
11623                                 regidx = bit >> 5;
11624                                 bit &= 0x1f;
11625                                 mc_filter[regidx] |= (1 << bit);
11626                         }
11627
11628                         for (i = 0; i < MC_HASH_SIZE; i++)
11629                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11630                                        mc_filter[i]);
11631                 }
11632         }
11633
11634         bp->rx_mode = rx_mode;
11635         bnx2x_set_storm_rx_mode(bp);
11636 }
11637
11638 /* called with rtnl_lock */
11639 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11640 {
11641         struct sockaddr *addr = p;
11642         struct bnx2x *bp = netdev_priv(dev);
11643
11644         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
11645                 return -EINVAL;
11646
11647         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
11648         if (netif_running(dev)) {
11649                 if (CHIP_IS_E1(bp))
11650                         bnx2x_set_eth_mac_addr_e1(bp, 1);
11651                 else
11652                         bnx2x_set_eth_mac_addr_e1h(bp, 1);
11653         }
11654
11655         return 0;
11656 }
11657
11658 /* called with rtnl_lock */
11659 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11660                            int devad, u16 addr)
11661 {
11662         struct bnx2x *bp = netdev_priv(netdev);
11663         u16 value;
11664         int rc;
11665         u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11666
11667         DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11668            prtad, devad, addr);
11669
11670         if (prtad != bp->mdio.prtad) {
11671                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11672                    prtad, bp->mdio.prtad);
11673                 return -EINVAL;
11674         }
11675
11676         /* The HW expects different devad if CL22 is used */
11677         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11678
11679         bnx2x_acquire_phy_lock(bp);
11680         rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11681                              devad, addr, &value);
11682         bnx2x_release_phy_lock(bp);
11683         DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
11684
11685         if (!rc)
11686                 rc = value;
11687         return rc;
11688 }
11689
11690 /* called with rtnl_lock */
11691 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11692                             u16 addr, u16 value)
11693 {
11694         struct bnx2x *bp = netdev_priv(netdev);
11695         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11696         int rc;
11697
11698         DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11699                            " value 0x%x\n", prtad, devad, addr, value);
11700
11701         if (prtad != bp->mdio.prtad) {
11702                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11703                    prtad, bp->mdio.prtad);
11704                 return -EINVAL;
11705         }
11706
11707         /* The HW expects different devad if CL22 is used */
11708         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11709
11710         bnx2x_acquire_phy_lock(bp);
11711         rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11712                               devad, addr, value);
11713         bnx2x_release_phy_lock(bp);
11714         return rc;
11715 }
11716
11717 /* called with rtnl_lock */
11718 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11719 {
11720         struct bnx2x *bp = netdev_priv(dev);
11721         struct mii_ioctl_data *mdio = if_mii(ifr);
11722
11723         DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11724            mdio->phy_id, mdio->reg_num, mdio->val_in);
11725
11726         if (!netif_running(dev))
11727                 return -EAGAIN;
11728
11729         return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
11730 }
11731
11732 /* called with rtnl_lock */
11733 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11734 {
11735         struct bnx2x *bp = netdev_priv(dev);
11736         int rc = 0;
11737
11738         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11739             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11740                 return -EINVAL;
11741
11742         /* This does not race with packet allocation
11743          * because the actual alloc size is
11744          * only updated as part of load
11745          */
11746         dev->mtu = new_mtu;
11747
11748         if (netif_running(dev)) {
11749                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11750                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11751         }
11752
11753         return rc;
11754 }
11755
11756 static void bnx2x_tx_timeout(struct net_device *dev)
11757 {
11758         struct bnx2x *bp = netdev_priv(dev);
11759
11760 #ifdef BNX2X_STOP_ON_ERROR
11761         if (!bp->panic)
11762                 bnx2x_panic();
11763 #endif
11764         /* This allows the netif to be shutdown gracefully before resetting */
11765         schedule_work(&bp->reset_task);
11766 }
11767
11768 #ifdef BCM_VLAN
11769 /* called with rtnl_lock */
11770 static void bnx2x_vlan_rx_register(struct net_device *dev,
11771                                    struct vlan_group *vlgrp)
11772 {
11773         struct bnx2x *bp = netdev_priv(dev);
11774
11775         bp->vlgrp = vlgrp;
11776
11777         /* Set flags according to the required capabilities */
11778         bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11779
11780         if (dev->features & NETIF_F_HW_VLAN_TX)
11781                 bp->flags |= HW_VLAN_TX_FLAG;
11782
11783         if (dev->features & NETIF_F_HW_VLAN_RX)
11784                 bp->flags |= HW_VLAN_RX_FLAG;
11785
11786         if (netif_running(dev))
11787                 bnx2x_set_client_config(bp);
11788 }
11789
11790 #endif
11791
11792 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11793 static void poll_bnx2x(struct net_device *dev)
11794 {
11795         struct bnx2x *bp = netdev_priv(dev);
11796
11797         disable_irq(bp->pdev->irq);
11798         bnx2x_interrupt(bp->pdev->irq, dev);
11799         enable_irq(bp->pdev->irq);
11800 }
11801 #endif
11802
11803 static const struct net_device_ops bnx2x_netdev_ops = {
11804         .ndo_open               = bnx2x_open,
11805         .ndo_stop               = bnx2x_close,
11806         .ndo_start_xmit         = bnx2x_start_xmit,
11807         .ndo_set_multicast_list = bnx2x_set_rx_mode,
11808         .ndo_set_mac_address    = bnx2x_change_mac_addr,
11809         .ndo_validate_addr      = eth_validate_addr,
11810         .ndo_do_ioctl           = bnx2x_ioctl,
11811         .ndo_change_mtu         = bnx2x_change_mtu,
11812         .ndo_tx_timeout         = bnx2x_tx_timeout,
11813 #ifdef BCM_VLAN
11814         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
11815 #endif
11816 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11817         .ndo_poll_controller    = poll_bnx2x,
11818 #endif
11819 };
11820
11821 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11822                                     struct net_device *dev)
11823 {
11824         struct bnx2x *bp;
11825         int rc;
11826
11827         SET_NETDEV_DEV(dev, &pdev->dev);
11828         bp = netdev_priv(dev);
11829
11830         bp->dev = dev;
11831         bp->pdev = pdev;
11832         bp->flags = 0;
11833         bp->func = PCI_FUNC(pdev->devfn);
11834
11835         rc = pci_enable_device(pdev);
11836         if (rc) {
11837                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11838                 goto err_out;
11839         }
11840
11841         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11842                 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11843                        " aborting\n");
11844                 rc = -ENODEV;
11845                 goto err_out_disable;
11846         }
11847
11848         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11849                 printk(KERN_ERR PFX "Cannot find second PCI device"
11850                        " base address, aborting\n");
11851                 rc = -ENODEV;
11852                 goto err_out_disable;
11853         }
11854
11855         if (atomic_read(&pdev->enable_cnt) == 1) {
11856                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11857                 if (rc) {
11858                         printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11859                                " aborting\n");
11860                         goto err_out_disable;
11861                 }
11862
11863                 pci_set_master(pdev);
11864                 pci_save_state(pdev);
11865         }
11866
11867         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11868         if (bp->pm_cap == 0) {
11869                 printk(KERN_ERR PFX "Cannot find power management"
11870                        " capability, aborting\n");
11871                 rc = -EIO;
11872                 goto err_out_release;
11873         }
11874
11875         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11876         if (bp->pcie_cap == 0) {
11877                 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11878                        " aborting\n");
11879                 rc = -EIO;
11880                 goto err_out_release;
11881         }
11882
11883         if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
11884                 bp->flags |= USING_DAC_FLAG;
11885                 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
11886                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11887                                " failed, aborting\n");
11888                         rc = -EIO;
11889                         goto err_out_release;
11890                 }
11891
11892         } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
11893                 printk(KERN_ERR PFX "System does not support DMA,"
11894                        " aborting\n");
11895                 rc = -EIO;
11896                 goto err_out_release;
11897         }
11898
11899         dev->mem_start = pci_resource_start(pdev, 0);
11900         dev->base_addr = dev->mem_start;
11901         dev->mem_end = pci_resource_end(pdev, 0);
11902
11903         dev->irq = pdev->irq;
11904
11905         bp->regview = pci_ioremap_bar(pdev, 0);
11906         if (!bp->regview) {
11907                 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11908                 rc = -ENOMEM;
11909                 goto err_out_release;
11910         }
11911
11912         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11913                                         min_t(u64, BNX2X_DB_SIZE,
11914                                               pci_resource_len(pdev, 2)));
11915         if (!bp->doorbells) {
11916                 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11917                 rc = -ENOMEM;
11918                 goto err_out_unmap;
11919         }
11920
11921         bnx2x_set_power_state(bp, PCI_D0);
11922
11923         /* clean indirect addresses */
11924         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11925                                PCICFG_VENDOR_ID_OFFSET);
11926         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11927         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11928         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11929         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11930
11931         dev->watchdog_timeo = TX_TIMEOUT;
11932
11933         dev->netdev_ops = &bnx2x_netdev_ops;
11934         dev->ethtool_ops = &bnx2x_ethtool_ops;
11935         dev->features |= NETIF_F_SG;
11936         dev->features |= NETIF_F_HW_CSUM;
11937         if (bp->flags & USING_DAC_FLAG)
11938                 dev->features |= NETIF_F_HIGHDMA;
11939         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11940         dev->features |= NETIF_F_TSO6;
11941 #ifdef BCM_VLAN
11942         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
11943         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11944
11945         dev->vlan_features |= NETIF_F_SG;
11946         dev->vlan_features |= NETIF_F_HW_CSUM;
11947         if (bp->flags & USING_DAC_FLAG)
11948                 dev->vlan_features |= NETIF_F_HIGHDMA;
11949         dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11950         dev->vlan_features |= NETIF_F_TSO6;
11951 #endif
11952
11953         /* get_port_hwinfo() will set prtad and mmds properly */
11954         bp->mdio.prtad = MDIO_PRTAD_NONE;
11955         bp->mdio.mmds = 0;
11956         bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11957         bp->mdio.dev = dev;
11958         bp->mdio.mdio_read = bnx2x_mdio_read;
11959         bp->mdio.mdio_write = bnx2x_mdio_write;
11960
11961         return 0;
11962
11963 err_out_unmap:
11964         if (bp->regview) {
11965                 iounmap(bp->regview);
11966                 bp->regview = NULL;
11967         }
11968         if (bp->doorbells) {
11969                 iounmap(bp->doorbells);
11970                 bp->doorbells = NULL;
11971         }
11972
11973 err_out_release:
11974         if (atomic_read(&pdev->enable_cnt) == 1)
11975                 pci_release_regions(pdev);
11976
11977 err_out_disable:
11978         pci_disable_device(pdev);
11979         pci_set_drvdata(pdev, NULL);
11980
11981 err_out:
11982         return rc;
11983 }
11984
11985 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11986                                                  int *width, int *speed)
11987 {
11988         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11989
11990         *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11991
11992         /* return value of 1=2.5GHz 2=5GHz */
11993         *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11994 }
11995
11996 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11997 {
11998         const struct firmware *firmware = bp->firmware;
11999         struct bnx2x_fw_file_hdr *fw_hdr;
12000         struct bnx2x_fw_file_section *sections;
12001         u32 offset, len, num_ops;
12002         u16 *ops_offsets;
12003         int i;
12004         const u8 *fw_ver;
12005
12006         if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
12007                 return -EINVAL;
12008
12009         fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
12010         sections = (struct bnx2x_fw_file_section *)fw_hdr;
12011
12012         /* Make sure none of the offsets and sizes make us read beyond
12013          * the end of the firmware data */
12014         for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
12015                 offset = be32_to_cpu(sections[i].offset);
12016                 len = be32_to_cpu(sections[i].len);
12017                 if (offset + len > firmware->size) {
12018                         printk(KERN_ERR PFX "Section %d length is out of "
12019                                             "bounds\n", i);
12020                         return -EINVAL;
12021                 }
12022         }
12023
12024         /* Likewise for the init_ops offsets */
12025         offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
12026         ops_offsets = (u16 *)(firmware->data + offset);
12027         num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
12028
12029         for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
12030                 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
12031                         printk(KERN_ERR PFX "Section offset %d is out of "
12032                                             "bounds\n", i);
12033                         return -EINVAL;
12034                 }
12035         }
12036
12037         /* Check FW version */
12038         offset = be32_to_cpu(fw_hdr->fw_version.offset);
12039         fw_ver = firmware->data + offset;
12040         if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
12041             (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
12042             (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
12043             (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
12044                 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
12045                                     " Should be %d.%d.%d.%d\n",
12046                        fw_ver[0], fw_ver[1], fw_ver[2],
12047                        fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
12048                        BCM_5710_FW_MINOR_VERSION,
12049                        BCM_5710_FW_REVISION_VERSION,
12050                        BCM_5710_FW_ENGINEERING_VERSION);
12051                 return -EINVAL;
12052         }
12053
12054         return 0;
12055 }
12056
12057 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
12058 {
12059         const __be32 *source = (const __be32 *)_source;
12060         u32 *target = (u32 *)_target;
12061         u32 i;
12062
12063         for (i = 0; i < n/4; i++)
12064                 target[i] = be32_to_cpu(source[i]);
12065 }
12066
12067 /*
12068    Ops array is stored in the following format:
12069    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
12070  */
12071 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
12072 {
12073         const __be32 *source = (const __be32 *)_source;
12074         struct raw_op *target = (struct raw_op *)_target;
12075         u32 i, j, tmp;
12076
12077         for (i = 0, j = 0; i < n/8; i++, j += 2) {
12078                 tmp = be32_to_cpu(source[j]);
12079                 target[i].op = (tmp >> 24) & 0xff;
12080                 target[i].offset =  tmp & 0xffffff;
12081                 target[i].raw_data = be32_to_cpu(source[j+1]);
12082         }
12083 }
12084
12085 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
12086 {
12087         const __be16 *source = (const __be16 *)_source;
12088         u16 *target = (u16 *)_target;
12089         u32 i;
12090
12091         for (i = 0; i < n/2; i++)
12092                 target[i] = be16_to_cpu(source[i]);
12093 }
12094
12095 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
12096         do { \
12097                 u32 len = be32_to_cpu(fw_hdr->arr.len); \
12098                 bp->arr = kmalloc(len, GFP_KERNEL); \
12099                 if (!bp->arr) { \
12100                         printk(KERN_ERR PFX "Failed to allocate %d bytes " \
12101                                             "for "#arr"\n", len); \
12102                         goto lbl; \
12103                 } \
12104                 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
12105                      (u8 *)bp->arr, len); \
12106         } while (0)
12107
12108 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
12109 {
12110         char fw_file_name[40] = {0};
12111         struct bnx2x_fw_file_hdr *fw_hdr;
12112         int rc, offset;
12113
12114         /* Create a FW file name */
12115         if (CHIP_IS_E1(bp))
12116                 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
12117         else
12118                 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
12119
12120         sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
12121                 BCM_5710_FW_MAJOR_VERSION,
12122                 BCM_5710_FW_MINOR_VERSION,
12123                 BCM_5710_FW_REVISION_VERSION,
12124                 BCM_5710_FW_ENGINEERING_VERSION);
12125
12126         printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
12127
12128         rc = request_firmware(&bp->firmware, fw_file_name, dev);
12129         if (rc) {
12130                 printk(KERN_ERR PFX "Can't load firmware file %s\n",
12131                        fw_file_name);
12132                 goto request_firmware_exit;
12133         }
12134
12135         rc = bnx2x_check_firmware(bp);
12136         if (rc) {
12137                 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
12138                 goto request_firmware_exit;
12139         }
12140
12141         fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
12142
12143         /* Initialize the pointers to the init arrays */
12144         /* Blob */
12145         BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
12146
12147         /* Opcodes */
12148         BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
12149
12150         /* Offsets */
12151         BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
12152                             be16_to_cpu_n);
12153
12154         /* STORMs firmware */
12155         INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12156                         be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
12157         INIT_TSEM_PRAM_DATA(bp)      = bp->firmware->data +
12158                         be32_to_cpu(fw_hdr->tsem_pram_data.offset);
12159         INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12160                         be32_to_cpu(fw_hdr->usem_int_table_data.offset);
12161         INIT_USEM_PRAM_DATA(bp)      = bp->firmware->data +
12162                         be32_to_cpu(fw_hdr->usem_pram_data.offset);
12163         INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12164                         be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
12165         INIT_XSEM_PRAM_DATA(bp)      = bp->firmware->data +
12166                         be32_to_cpu(fw_hdr->xsem_pram_data.offset);
12167         INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12168                         be32_to_cpu(fw_hdr->csem_int_table_data.offset);
12169         INIT_CSEM_PRAM_DATA(bp)      = bp->firmware->data +
12170                         be32_to_cpu(fw_hdr->csem_pram_data.offset);
12171
12172         return 0;
12173
12174 init_offsets_alloc_err:
12175         kfree(bp->init_ops);
12176 init_ops_alloc_err:
12177         kfree(bp->init_data);
12178 request_firmware_exit:
12179         release_firmware(bp->firmware);
12180
12181         return rc;
12182 }
12183
12184
12185 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12186                                     const struct pci_device_id *ent)
12187 {
12188         struct net_device *dev = NULL;
12189         struct bnx2x *bp;
12190         int pcie_width, pcie_speed;
12191         int rc;
12192
12193         /* dev zeroed in init_etherdev */
12194         dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
12195         if (!dev) {
12196                 printk(KERN_ERR PFX "Cannot allocate net device\n");
12197                 return -ENOMEM;
12198         }
12199
12200         bp = netdev_priv(dev);
12201         bp->msglevel = debug;
12202
12203         pci_set_drvdata(pdev, dev);
12204
12205         rc = bnx2x_init_dev(pdev, dev);
12206         if (rc < 0) {
12207                 free_netdev(dev);
12208                 return rc;
12209         }
12210
12211         rc = bnx2x_init_bp(bp);
12212         if (rc)
12213                 goto init_one_exit;
12214
12215         /* Set init arrays */
12216         rc = bnx2x_init_firmware(bp, &pdev->dev);
12217         if (rc) {
12218                 printk(KERN_ERR PFX "Error loading firmware\n");
12219                 goto init_one_exit;
12220         }
12221
12222         rc = register_netdev(dev);
12223         if (rc) {
12224                 dev_err(&pdev->dev, "Cannot register net device\n");
12225                 goto init_one_exit;
12226         }
12227
12228         bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
12229         printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
12230                " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
12231                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
12232                pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
12233                dev->base_addr, bp->pdev->irq);
12234         printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
12235
12236         return 0;
12237
12238 init_one_exit:
12239         if (bp->regview)
12240                 iounmap(bp->regview);
12241
12242         if (bp->doorbells)
12243                 iounmap(bp->doorbells);
12244
12245         free_netdev(dev);
12246
12247         if (atomic_read(&pdev->enable_cnt) == 1)
12248                 pci_release_regions(pdev);
12249
12250         pci_disable_device(pdev);
12251         pci_set_drvdata(pdev, NULL);
12252
12253         return rc;
12254 }
12255
12256 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
12257 {
12258         struct net_device *dev = pci_get_drvdata(pdev);
12259         struct bnx2x *bp;
12260
12261         if (!dev) {
12262                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12263                 return;
12264         }
12265         bp = netdev_priv(dev);
12266
12267         unregister_netdev(dev);
12268
12269         kfree(bp->init_ops_offsets);
12270         kfree(bp->init_ops);
12271         kfree(bp->init_data);
12272         release_firmware(bp->firmware);
12273
12274         if (bp->regview)
12275                 iounmap(bp->regview);
12276
12277         if (bp->doorbells)
12278                 iounmap(bp->doorbells);
12279
12280         free_netdev(dev);
12281
12282         if (atomic_read(&pdev->enable_cnt) == 1)
12283                 pci_release_regions(pdev);
12284
12285         pci_disable_device(pdev);
12286         pci_set_drvdata(pdev, NULL);
12287 }
12288
12289 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12290 {
12291         struct net_device *dev = pci_get_drvdata(pdev);
12292         struct bnx2x *bp;
12293
12294         if (!dev) {
12295                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12296                 return -ENODEV;
12297         }
12298         bp = netdev_priv(dev);
12299
12300         rtnl_lock();
12301
12302         pci_save_state(pdev);
12303
12304         if (!netif_running(dev)) {
12305                 rtnl_unlock();
12306                 return 0;
12307         }
12308
12309         netif_device_detach(dev);
12310
12311         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
12312
12313         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
12314
12315         rtnl_unlock();
12316
12317         return 0;
12318 }
12319
12320 static int bnx2x_resume(struct pci_dev *pdev)
12321 {
12322         struct net_device *dev = pci_get_drvdata(pdev);
12323         struct bnx2x *bp;
12324         int rc;
12325
12326         if (!dev) {
12327                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12328                 return -ENODEV;
12329         }
12330         bp = netdev_priv(dev);
12331
12332         rtnl_lock();
12333
12334         pci_restore_state(pdev);
12335
12336         if (!netif_running(dev)) {
12337                 rtnl_unlock();
12338                 return 0;
12339         }
12340
12341         bnx2x_set_power_state(bp, PCI_D0);
12342         netif_device_attach(dev);
12343
12344         rc = bnx2x_nic_load(bp, LOAD_OPEN);
12345
12346         rtnl_unlock();
12347
12348         return rc;
12349 }
12350
12351 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12352 {
12353         int i;
12354
12355         bp->state = BNX2X_STATE_ERROR;
12356
12357         bp->rx_mode = BNX2X_RX_MODE_NONE;
12358
12359         bnx2x_netif_stop(bp, 0);
12360
12361         del_timer_sync(&bp->timer);
12362         bp->stats_state = STATS_STATE_DISABLED;
12363         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12364
12365         /* Release IRQs */
12366         bnx2x_free_irq(bp);
12367
12368         if (CHIP_IS_E1(bp)) {
12369                 struct mac_configuration_cmd *config =
12370                                                 bnx2x_sp(bp, mcast_config);
12371
12372                 for (i = 0; i < config->hdr.length; i++)
12373                         CAM_INVALIDATE(config->config_table[i]);
12374         }
12375
12376         /* Free SKBs, SGEs, TPA pool and driver internals */
12377         bnx2x_free_skbs(bp);
12378         for_each_rx_queue(bp, i)
12379                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
12380         for_each_rx_queue(bp, i)
12381                 netif_napi_del(&bnx2x_fp(bp, i, napi));
12382         bnx2x_free_mem(bp);
12383
12384         bp->state = BNX2X_STATE_CLOSED;
12385
12386         netif_carrier_off(bp->dev);
12387
12388         return 0;
12389 }
12390
12391 static void bnx2x_eeh_recover(struct bnx2x *bp)
12392 {
12393         u32 val;
12394
12395         mutex_init(&bp->port.phy_mutex);
12396
12397         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12398         bp->link_params.shmem_base = bp->common.shmem_base;
12399         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12400
12401         if (!bp->common.shmem_base ||
12402             (bp->common.shmem_base < 0xA0000) ||
12403             (bp->common.shmem_base >= 0xC0000)) {
12404                 BNX2X_DEV_INFO("MCP not active\n");
12405                 bp->flags |= NO_MCP_FLAG;
12406                 return;
12407         }
12408
12409         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12410         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12411                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12412                 BNX2X_ERR("BAD MCP validity signature\n");
12413
12414         if (!BP_NOMCP(bp)) {
12415                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12416                               & DRV_MSG_SEQ_NUMBER_MASK);
12417                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12418         }
12419 }
12420
12421 /**
12422  * bnx2x_io_error_detected - called when PCI error is detected
12423  * @pdev: Pointer to PCI device
12424  * @state: The current pci connection state
12425  *
12426  * This function is called after a PCI bus error affecting
12427  * this device has been detected.
12428  */
12429 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12430                                                 pci_channel_state_t state)
12431 {
12432         struct net_device *dev = pci_get_drvdata(pdev);
12433         struct bnx2x *bp = netdev_priv(dev);
12434
12435         rtnl_lock();
12436
12437         netif_device_detach(dev);
12438
12439         if (state == pci_channel_io_perm_failure) {
12440                 rtnl_unlock();
12441                 return PCI_ERS_RESULT_DISCONNECT;
12442         }
12443
12444         if (netif_running(dev))
12445                 bnx2x_eeh_nic_unload(bp);
12446
12447         pci_disable_device(pdev);
12448
12449         rtnl_unlock();
12450
12451         /* Request a slot reset */
12452         return PCI_ERS_RESULT_NEED_RESET;
12453 }
12454
12455 /**
12456  * bnx2x_io_slot_reset - called after the PCI bus has been reset
12457  * @pdev: Pointer to PCI device
12458  *
12459  * Restart the card from scratch, as if from a cold-boot.
12460  */
12461 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12462 {
12463         struct net_device *dev = pci_get_drvdata(pdev);
12464         struct bnx2x *bp = netdev_priv(dev);
12465
12466         rtnl_lock();
12467
12468         if (pci_enable_device(pdev)) {
12469                 dev_err(&pdev->dev,
12470                         "Cannot re-enable PCI device after reset\n");
12471                 rtnl_unlock();
12472                 return PCI_ERS_RESULT_DISCONNECT;
12473         }
12474
12475         pci_set_master(pdev);
12476         pci_restore_state(pdev);
12477
12478         if (netif_running(dev))
12479                 bnx2x_set_power_state(bp, PCI_D0);
12480
12481         rtnl_unlock();
12482
12483         return PCI_ERS_RESULT_RECOVERED;
12484 }
12485
12486 /**
12487  * bnx2x_io_resume - called when traffic can start flowing again
12488  * @pdev: Pointer to PCI device
12489  *
12490  * This callback is called when the error recovery driver tells us that
12491  * its OK to resume normal operation.
12492  */
12493 static void bnx2x_io_resume(struct pci_dev *pdev)
12494 {
12495         struct net_device *dev = pci_get_drvdata(pdev);
12496         struct bnx2x *bp = netdev_priv(dev);
12497
12498         rtnl_lock();
12499
12500         bnx2x_eeh_recover(bp);
12501
12502         if (netif_running(dev))
12503                 bnx2x_nic_load(bp, LOAD_NORMAL);
12504
12505         netif_device_attach(dev);
12506
12507         rtnl_unlock();
12508 }
12509
12510 static struct pci_error_handlers bnx2x_err_handler = {
12511         .error_detected = bnx2x_io_error_detected,
12512         .slot_reset     = bnx2x_io_slot_reset,
12513         .resume         = bnx2x_io_resume,
12514 };
12515
12516 static struct pci_driver bnx2x_pci_driver = {
12517         .name        = DRV_MODULE_NAME,
12518         .id_table    = bnx2x_pci_tbl,
12519         .probe       = bnx2x_init_one,
12520         .remove      = __devexit_p(bnx2x_remove_one),
12521         .suspend     = bnx2x_suspend,
12522         .resume      = bnx2x_resume,
12523         .err_handler = &bnx2x_err_handler,
12524 };
12525
12526 static int __init bnx2x_init(void)
12527 {
12528         int ret;
12529
12530         printk(KERN_INFO "%s", version);
12531
12532         bnx2x_wq = create_singlethread_workqueue("bnx2x");
12533         if (bnx2x_wq == NULL) {
12534                 printk(KERN_ERR PFX "Cannot create workqueue\n");
12535                 return -ENOMEM;
12536         }
12537
12538         ret = pci_register_driver(&bnx2x_pci_driver);
12539         if (ret) {
12540                 printk(KERN_ERR PFX "Cannot register driver\n");
12541                 destroy_workqueue(bnx2x_wq);
12542         }
12543         return ret;
12544 }
12545
12546 static void __exit bnx2x_cleanup(void)
12547 {
12548         pci_unregister_driver(&bnx2x_pci_driver);
12549
12550         destroy_workqueue(bnx2x_wq);
12551 }
12552
12553 module_init(bnx2x_init);
12554 module_exit(bnx2x_cleanup);
12555
12556 #ifdef BCM_CNIC
12557
12558 /* count denotes the number of new completions we have seen */
12559 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
12560 {
12561         struct eth_spe *spe;
12562
12563 #ifdef BNX2X_STOP_ON_ERROR
12564         if (unlikely(bp->panic))
12565                 return;
12566 #endif
12567
12568         spin_lock_bh(&bp->spq_lock);
12569         bp->cnic_spq_pending -= count;
12570
12571         for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
12572              bp->cnic_spq_pending++) {
12573
12574                 if (!bp->cnic_kwq_pending)
12575                         break;
12576
12577                 spe = bnx2x_sp_get_next(bp);
12578                 *spe = *bp->cnic_kwq_cons;
12579
12580                 bp->cnic_kwq_pending--;
12581
12582                 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
12583                    bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
12584
12585                 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
12586                         bp->cnic_kwq_cons = bp->cnic_kwq;
12587                 else
12588                         bp->cnic_kwq_cons++;
12589         }
12590         bnx2x_sp_prod_update(bp);
12591         spin_unlock_bh(&bp->spq_lock);
12592 }
12593
12594 static int bnx2x_cnic_sp_queue(struct net_device *dev,
12595                                struct kwqe_16 *kwqes[], u32 count)
12596 {
12597         struct bnx2x *bp = netdev_priv(dev);
12598         int i;
12599
12600 #ifdef BNX2X_STOP_ON_ERROR
12601         if (unlikely(bp->panic))
12602                 return -EIO;
12603 #endif
12604
12605         spin_lock_bh(&bp->spq_lock);
12606
12607         for (i = 0; i < count; i++) {
12608                 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
12609
12610                 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
12611                         break;
12612
12613                 *bp->cnic_kwq_prod = *spe;
12614
12615                 bp->cnic_kwq_pending++;
12616
12617                 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
12618                    spe->hdr.conn_and_cmd_data, spe->hdr.type,
12619                    spe->data.mac_config_addr.hi,
12620                    spe->data.mac_config_addr.lo,
12621                    bp->cnic_kwq_pending);
12622
12623                 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
12624                         bp->cnic_kwq_prod = bp->cnic_kwq;
12625                 else
12626                         bp->cnic_kwq_prod++;
12627         }
12628
12629         spin_unlock_bh(&bp->spq_lock);
12630
12631         if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
12632                 bnx2x_cnic_sp_post(bp, 0);
12633
12634         return i;
12635 }
12636
12637 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12638 {
12639         struct cnic_ops *c_ops;
12640         int rc = 0;
12641
12642         mutex_lock(&bp->cnic_mutex);
12643         c_ops = bp->cnic_ops;
12644         if (c_ops)
12645                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12646         mutex_unlock(&bp->cnic_mutex);
12647
12648         return rc;
12649 }
12650
12651 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12652 {
12653         struct cnic_ops *c_ops;
12654         int rc = 0;
12655
12656         rcu_read_lock();
12657         c_ops = rcu_dereference(bp->cnic_ops);
12658         if (c_ops)
12659                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12660         rcu_read_unlock();
12661
12662         return rc;
12663 }
12664
12665 /*
12666  * for commands that have no data
12667  */
12668 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
12669 {
12670         struct cnic_ctl_info ctl = {0};
12671
12672         ctl.cmd = cmd;
12673
12674         return bnx2x_cnic_ctl_send(bp, &ctl);
12675 }
12676
12677 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
12678 {
12679         struct cnic_ctl_info ctl;
12680
12681         /* first we tell CNIC and only then we count this as a completion */
12682         ctl.cmd = CNIC_CTL_COMPLETION_CMD;
12683         ctl.data.comp.cid = cid;
12684
12685         bnx2x_cnic_ctl_send_bh(bp, &ctl);
12686         bnx2x_cnic_sp_post(bp, 1);
12687 }
12688
12689 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
12690 {
12691         struct bnx2x *bp = netdev_priv(dev);
12692         int rc = 0;
12693
12694         switch (ctl->cmd) {
12695         case DRV_CTL_CTXTBL_WR_CMD: {
12696                 u32 index = ctl->data.io.offset;
12697                 dma_addr_t addr = ctl->data.io.dma_addr;
12698
12699                 bnx2x_ilt_wr(bp, index, addr);
12700                 break;
12701         }
12702
12703         case DRV_CTL_COMPLETION_CMD: {
12704                 int count = ctl->data.comp.comp_count;
12705
12706                 bnx2x_cnic_sp_post(bp, count);
12707                 break;
12708         }
12709
12710         /* rtnl_lock is held.  */
12711         case DRV_CTL_START_L2_CMD: {
12712                 u32 cli = ctl->data.ring.client_id;
12713
12714                 bp->rx_mode_cl_mask |= (1 << cli);
12715                 bnx2x_set_storm_rx_mode(bp);
12716                 break;
12717         }
12718
12719         /* rtnl_lock is held.  */
12720         case DRV_CTL_STOP_L2_CMD: {
12721                 u32 cli = ctl->data.ring.client_id;
12722
12723                 bp->rx_mode_cl_mask &= ~(1 << cli);
12724                 bnx2x_set_storm_rx_mode(bp);
12725                 break;
12726         }
12727
12728         default:
12729                 BNX2X_ERR("unknown command %x\n", ctl->cmd);
12730                 rc = -EINVAL;
12731         }
12732
12733         return rc;
12734 }
12735
12736 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
12737 {
12738         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12739
12740         if (bp->flags & USING_MSIX_FLAG) {
12741                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
12742                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
12743                 cp->irq_arr[0].vector = bp->msix_table[1].vector;
12744         } else {
12745                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
12746                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
12747         }
12748         cp->irq_arr[0].status_blk = bp->cnic_sb;
12749         cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
12750         cp->irq_arr[1].status_blk = bp->def_status_blk;
12751         cp->irq_arr[1].status_blk_num = DEF_SB_ID;
12752
12753         cp->num_irq = 2;
12754 }
12755
12756 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
12757                                void *data)
12758 {
12759         struct bnx2x *bp = netdev_priv(dev);
12760         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12761
12762         if (ops == NULL)
12763                 return -EINVAL;
12764
12765         if (atomic_read(&bp->intr_sem) != 0)
12766                 return -EBUSY;
12767
12768         bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
12769         if (!bp->cnic_kwq)
12770                 return -ENOMEM;
12771
12772         bp->cnic_kwq_cons = bp->cnic_kwq;
12773         bp->cnic_kwq_prod = bp->cnic_kwq;
12774         bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
12775
12776         bp->cnic_spq_pending = 0;
12777         bp->cnic_kwq_pending = 0;
12778
12779         bp->cnic_data = data;
12780
12781         cp->num_irq = 0;
12782         cp->drv_state = CNIC_DRV_STATE_REGD;
12783
12784         bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
12785
12786         bnx2x_setup_cnic_irq_info(bp);
12787         bnx2x_set_iscsi_eth_mac_addr(bp, 1);
12788         bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
12789         rcu_assign_pointer(bp->cnic_ops, ops);
12790
12791         return 0;
12792 }
12793
12794 static int bnx2x_unregister_cnic(struct net_device *dev)
12795 {
12796         struct bnx2x *bp = netdev_priv(dev);
12797         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12798
12799         mutex_lock(&bp->cnic_mutex);
12800         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
12801                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
12802                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
12803         }
12804         cp->drv_state = 0;
12805         rcu_assign_pointer(bp->cnic_ops, NULL);
12806         mutex_unlock(&bp->cnic_mutex);
12807         synchronize_rcu();
12808         kfree(bp->cnic_kwq);
12809         bp->cnic_kwq = NULL;
12810
12811         return 0;
12812 }
12813
12814 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
12815 {
12816         struct bnx2x *bp = netdev_priv(dev);
12817         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12818
12819         cp->drv_owner = THIS_MODULE;
12820         cp->chip_id = CHIP_ID(bp);
12821         cp->pdev = bp->pdev;
12822         cp->io_base = bp->regview;
12823         cp->io_base2 = bp->doorbells;
12824         cp->max_kwqe_pending = 8;
12825         cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
12826         cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
12827         cp->ctx_tbl_len = CNIC_ILT_LINES;
12828         cp->starting_cid = BCM_CNIC_CID_START;
12829         cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
12830         cp->drv_ctl = bnx2x_drv_ctl;
12831         cp->drv_register_cnic = bnx2x_register_cnic;
12832         cp->drv_unregister_cnic = bnx2x_unregister_cnic;
12833
12834         return cp;
12835 }
12836 EXPORT_SYMBOL(bnx2x_cnic_probe);
12837
12838 #endif /* BCM_CNIC */
12839