bnx2x: Adding FW mailbox mutex
[safe/jmp/linux-2.6] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52
53
54 #include "bnx2x.h"
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_dump.h"
58
59 #define DRV_MODULE_VERSION      "1.52.1"
60 #define DRV_MODULE_RELDATE      "2009/08/12"
61 #define BNX2X_BC_VER            0x040200
62
63 #include <linux/firmware.h>
64 #include "bnx2x_fw_file_hdr.h"
65 /* FW files */
66 #define FW_FILE_PREFIX_E1       "bnx2x-e1-"
67 #define FW_FILE_PREFIX_E1H      "bnx2x-e1h-"
68
69 /* Time in jiffies before concluding the transmitter is hung */
70 #define TX_TIMEOUT              (5*HZ)
71
72 static char version[] __devinitdata =
73         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
74         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
76 MODULE_AUTHOR("Eliezer Tamir");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
80
81 static int multi_mode = 1;
82 module_param(multi_mode, int, 0);
83 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84                              "(0 Disable; 1 Enable (default))");
85
86 static int num_rx_queues;
87 module_param(num_rx_queues, int, 0);
88 MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89                                 " (default is half number of CPUs)");
90
91 static int num_tx_queues;
92 module_param(num_tx_queues, int, 0);
93 MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94                                 " (default is half number of CPUs)");
95
96 static int disable_tpa;
97 module_param(disable_tpa, int, 0);
98 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
99
100 static int int_mode;
101 module_param(int_mode, int, 0);
102 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
103
104 static int dropless_fc;
105 module_param(dropless_fc, int, 0);
106 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
107
108 static int poll;
109 module_param(poll, int, 0);
110 MODULE_PARM_DESC(poll, " Use polling (for debug)");
111
112 static int mrrs = -1;
113 module_param(mrrs, int, 0);
114 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
115
116 static int debug;
117 module_param(debug, int, 0);
118 MODULE_PARM_DESC(debug, " Default debug msglevel");
119
120 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
121
122 static struct workqueue_struct *bnx2x_wq;
123
124 enum bnx2x_board_type {
125         BCM57710 = 0,
126         BCM57711 = 1,
127         BCM57711E = 2,
128 };
129
130 /* indexed by board_type, above */
131 static struct {
132         char *name;
133 } board_info[] __devinitdata = {
134         { "Broadcom NetXtreme II BCM57710 XGb" },
135         { "Broadcom NetXtreme II BCM57711 XGb" },
136         { "Broadcom NetXtreme II BCM57711E XGb" }
137 };
138
139
140 static const struct pci_device_id bnx2x_pci_tbl[] = {
141         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
142         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
143         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
144         { 0 }
145 };
146
147 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
148
149 /****************************************************************************
150 * General service functions
151 ****************************************************************************/
152
153 /* used only at init
154  * locking is done by mcp
155  */
156 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
157 {
158         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
159         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
160         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
161                                PCICFG_VENDOR_ID_OFFSET);
162 }
163
164 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
165 {
166         u32 val;
167
168         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
169         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
170         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
171                                PCICFG_VENDOR_ID_OFFSET);
172
173         return val;
174 }
175
176 static const u32 dmae_reg_go_c[] = {
177         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
178         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
179         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
180         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
181 };
182
183 /* copy command into DMAE command memory and set DMAE command go */
184 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
185                             int idx)
186 {
187         u32 cmd_offset;
188         int i;
189
190         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
191         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
192                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
193
194                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
195                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
196         }
197         REG_WR(bp, dmae_reg_go_c[idx], 1);
198 }
199
200 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
201                       u32 len32)
202 {
203         struct dmae_command dmae;
204         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
205         int cnt = 200;
206
207         if (!bp->dmae_ready) {
208                 u32 *data = bnx2x_sp(bp, wb_data[0]);
209
210                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
211                    "  using indirect\n", dst_addr, len32);
212                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
213                 return;
214         }
215
216         memset(&dmae, 0, sizeof(struct dmae_command));
217
218         dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
219                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
220                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
221 #ifdef __BIG_ENDIAN
222                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
223 #else
224                        DMAE_CMD_ENDIANITY_DW_SWAP |
225 #endif
226                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
227                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
228         dmae.src_addr_lo = U64_LO(dma_addr);
229         dmae.src_addr_hi = U64_HI(dma_addr);
230         dmae.dst_addr_lo = dst_addr >> 2;
231         dmae.dst_addr_hi = 0;
232         dmae.len = len32;
233         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
234         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
235         dmae.comp_val = DMAE_COMP_VAL;
236
237         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
238            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
239                     "dst_addr [%x:%08x (%08x)]\n"
240            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
241            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
242            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
243            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
244         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
245            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
246            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
247
248         mutex_lock(&bp->dmae_mutex);
249
250         *wb_comp = 0;
251
252         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
253
254         udelay(5);
255
256         while (*wb_comp != DMAE_COMP_VAL) {
257                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
258
259                 if (!cnt) {
260                         BNX2X_ERR("DMAE timeout!\n");
261                         break;
262                 }
263                 cnt--;
264                 /* adjust delay for emulation/FPGA */
265                 if (CHIP_REV_IS_SLOW(bp))
266                         msleep(100);
267                 else
268                         udelay(5);
269         }
270
271         mutex_unlock(&bp->dmae_mutex);
272 }
273
274 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
275 {
276         struct dmae_command dmae;
277         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
278         int cnt = 200;
279
280         if (!bp->dmae_ready) {
281                 u32 *data = bnx2x_sp(bp, wb_data[0]);
282                 int i;
283
284                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
285                    "  using indirect\n", src_addr, len32);
286                 for (i = 0; i < len32; i++)
287                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
288                 return;
289         }
290
291         memset(&dmae, 0, sizeof(struct dmae_command));
292
293         dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
294                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
295                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
296 #ifdef __BIG_ENDIAN
297                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
298 #else
299                        DMAE_CMD_ENDIANITY_DW_SWAP |
300 #endif
301                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
302                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
303         dmae.src_addr_lo = src_addr >> 2;
304         dmae.src_addr_hi = 0;
305         dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
306         dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
307         dmae.len = len32;
308         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
309         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
310         dmae.comp_val = DMAE_COMP_VAL;
311
312         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
313            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
314                     "dst_addr [%x:%08x (%08x)]\n"
315            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
316            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
317            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
318            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
319
320         mutex_lock(&bp->dmae_mutex);
321
322         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
323         *wb_comp = 0;
324
325         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
326
327         udelay(5);
328
329         while (*wb_comp != DMAE_COMP_VAL) {
330
331                 if (!cnt) {
332                         BNX2X_ERR("DMAE timeout!\n");
333                         break;
334                 }
335                 cnt--;
336                 /* adjust delay for emulation/FPGA */
337                 if (CHIP_REV_IS_SLOW(bp))
338                         msleep(100);
339                 else
340                         udelay(5);
341         }
342         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
343            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
344            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
345
346         mutex_unlock(&bp->dmae_mutex);
347 }
348
349 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
350                                u32 addr, u32 len)
351 {
352         int offset = 0;
353
354         while (len > DMAE_LEN32_WR_MAX) {
355                 bnx2x_write_dmae(bp, phys_addr + offset,
356                                  addr + offset, DMAE_LEN32_WR_MAX);
357                 offset += DMAE_LEN32_WR_MAX * 4;
358                 len -= DMAE_LEN32_WR_MAX;
359         }
360
361         bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
362 }
363
364 /* used only for slowpath so not inlined */
365 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
366 {
367         u32 wb_write[2];
368
369         wb_write[0] = val_hi;
370         wb_write[1] = val_lo;
371         REG_WR_DMAE(bp, reg, wb_write, 2);
372 }
373
374 #ifdef USE_WB_RD
375 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
376 {
377         u32 wb_data[2];
378
379         REG_RD_DMAE(bp, reg, wb_data, 2);
380
381         return HILO_U64(wb_data[0], wb_data[1]);
382 }
383 #endif
384
385 static int bnx2x_mc_assert(struct bnx2x *bp)
386 {
387         char last_idx;
388         int i, rc = 0;
389         u32 row0, row1, row2, row3;
390
391         /* XSTORM */
392         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
393                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
394         if (last_idx)
395                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
396
397         /* print the asserts */
398         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
399
400                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
401                               XSTORM_ASSERT_LIST_OFFSET(i));
402                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
403                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
404                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
405                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
406                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
407                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
408
409                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
410                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
411                                   " 0x%08x 0x%08x 0x%08x\n",
412                                   i, row3, row2, row1, row0);
413                         rc++;
414                 } else {
415                         break;
416                 }
417         }
418
419         /* TSTORM */
420         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
421                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
422         if (last_idx)
423                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
424
425         /* print the asserts */
426         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
427
428                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
429                               TSTORM_ASSERT_LIST_OFFSET(i));
430                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
431                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
432                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
433                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
434                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
435                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
436
437                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
438                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
439                                   " 0x%08x 0x%08x 0x%08x\n",
440                                   i, row3, row2, row1, row0);
441                         rc++;
442                 } else {
443                         break;
444                 }
445         }
446
447         /* CSTORM */
448         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
449                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
450         if (last_idx)
451                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
452
453         /* print the asserts */
454         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
455
456                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
457                               CSTORM_ASSERT_LIST_OFFSET(i));
458                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
459                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
460                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
461                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
462                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
463                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
464
465                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
466                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
467                                   " 0x%08x 0x%08x 0x%08x\n",
468                                   i, row3, row2, row1, row0);
469                         rc++;
470                 } else {
471                         break;
472                 }
473         }
474
475         /* USTORM */
476         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
477                            USTORM_ASSERT_LIST_INDEX_OFFSET);
478         if (last_idx)
479                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
480
481         /* print the asserts */
482         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
483
484                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
485                               USTORM_ASSERT_LIST_OFFSET(i));
486                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
487                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
488                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
489                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
490                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
491                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
492
493                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
494                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
495                                   " 0x%08x 0x%08x 0x%08x\n",
496                                   i, row3, row2, row1, row0);
497                         rc++;
498                 } else {
499                         break;
500                 }
501         }
502
503         return rc;
504 }
505
506 static void bnx2x_fw_dump(struct bnx2x *bp)
507 {
508         u32 mark, offset;
509         __be32 data[9];
510         int word;
511
512         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
513         mark = ((mark + 0x3) & ~0x3);
514         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
515
516         printk(KERN_ERR PFX);
517         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
518                 for (word = 0; word < 8; word++)
519                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
520                                                   offset + 4*word));
521                 data[8] = 0x0;
522                 printk(KERN_CONT "%s", (char *)data);
523         }
524         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
525                 for (word = 0; word < 8; word++)
526                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
527                                                   offset + 4*word));
528                 data[8] = 0x0;
529                 printk(KERN_CONT "%s", (char *)data);
530         }
531         printk(KERN_ERR PFX "end of fw dump\n");
532 }
533
534 static void bnx2x_panic_dump(struct bnx2x *bp)
535 {
536         int i;
537         u16 j, start, end;
538
539         bp->stats_state = STATS_STATE_DISABLED;
540         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
541
542         BNX2X_ERR("begin crash dump -----------------\n");
543
544         /* Indices */
545         /* Common */
546         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
547                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
548                   "  spq_prod_idx(%u)\n",
549                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
550                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
551
552         /* Rx */
553         for_each_rx_queue(bp, i) {
554                 struct bnx2x_fastpath *fp = &bp->fp[i];
555
556                 BNX2X_ERR("fp%d: rx_bd_prod(%x)  rx_bd_cons(%x)"
557                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
558                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
559                           i, fp->rx_bd_prod, fp->rx_bd_cons,
560                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
561                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
562                 BNX2X_ERR("      rx_sge_prod(%x)  last_max_sge(%x)"
563                           "  fp_u_idx(%x) *sb_u_idx(%x)\n",
564                           fp->rx_sge_prod, fp->last_max_sge,
565                           le16_to_cpu(fp->fp_u_idx),
566                           fp->status_blk->u_status_block.status_block_index);
567         }
568
569         /* Tx */
570         for_each_tx_queue(bp, i) {
571                 struct bnx2x_fastpath *fp = &bp->fp[i];
572
573                 BNX2X_ERR("fp%d: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
574                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
575                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
576                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
577                 BNX2X_ERR("      fp_c_idx(%x)  *sb_c_idx(%x)"
578                           "  tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
579                           fp->status_blk->c_status_block.status_block_index,
580                           fp->tx_db.data.prod);
581         }
582
583         /* Rings */
584         /* Rx */
585         for_each_rx_queue(bp, i) {
586                 struct bnx2x_fastpath *fp = &bp->fp[i];
587
588                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
589                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
590                 for (j = start; j != end; j = RX_BD(j + 1)) {
591                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
592                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
593
594                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
595                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
596                 }
597
598                 start = RX_SGE(fp->rx_sge_prod);
599                 end = RX_SGE(fp->last_max_sge);
600                 for (j = start; j != end; j = RX_SGE(j + 1)) {
601                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
602                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
603
604                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
605                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
606                 }
607
608                 start = RCQ_BD(fp->rx_comp_cons - 10);
609                 end = RCQ_BD(fp->rx_comp_cons + 503);
610                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
611                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
612
613                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
614                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
615                 }
616         }
617
618         /* Tx */
619         for_each_tx_queue(bp, i) {
620                 struct bnx2x_fastpath *fp = &bp->fp[i];
621
622                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
623                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
624                 for (j = start; j != end; j = TX_BD(j + 1)) {
625                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
626
627                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
628                                   i, j, sw_bd->skb, sw_bd->first_bd);
629                 }
630
631                 start = TX_BD(fp->tx_bd_cons - 10);
632                 end = TX_BD(fp->tx_bd_cons + 254);
633                 for (j = start; j != end; j = TX_BD(j + 1)) {
634                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
635
636                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
637                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
638                 }
639         }
640
641         bnx2x_fw_dump(bp);
642         bnx2x_mc_assert(bp);
643         BNX2X_ERR("end crash dump -----------------\n");
644 }
645
646 static void bnx2x_int_enable(struct bnx2x *bp)
647 {
648         int port = BP_PORT(bp);
649         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
650         u32 val = REG_RD(bp, addr);
651         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
652         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
653
654         if (msix) {
655                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
656                          HC_CONFIG_0_REG_INT_LINE_EN_0);
657                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
658                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
659         } else if (msi) {
660                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
661                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
662                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
663                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
664         } else {
665                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
666                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
667                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
668                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
669
670                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
671                    val, port, addr);
672
673                 REG_WR(bp, addr, val);
674
675                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
676         }
677
678         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
679            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
680
681         REG_WR(bp, addr, val);
682         /*
683          * Ensure that HC_CONFIG is written before leading/trailing edge config
684          */
685         mmiowb();
686         barrier();
687
688         if (CHIP_IS_E1H(bp)) {
689                 /* init leading/trailing edge */
690                 if (IS_E1HMF(bp)) {
691                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
692                         if (bp->port.pmf)
693                                 /* enable nig and gpio3 attention */
694                                 val |= 0x1100;
695                 } else
696                         val = 0xffff;
697
698                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
699                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
700         }
701
702         /* Make sure that interrupts are indeed enabled from here on */
703         mmiowb();
704 }
705
706 static void bnx2x_int_disable(struct bnx2x *bp)
707 {
708         int port = BP_PORT(bp);
709         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
710         u32 val = REG_RD(bp, addr);
711
712         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
713                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
714                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
715                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
716
717         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
718            val, port, addr);
719
720         /* flush all outstanding writes */
721         mmiowb();
722
723         REG_WR(bp, addr, val);
724         if (REG_RD(bp, addr) != val)
725                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
726 }
727
728 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
729 {
730         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
731         int i, offset;
732
733         /* disable interrupt handling */
734         atomic_inc(&bp->intr_sem);
735         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
736
737         if (disable_hw)
738                 /* prevent the HW from sending interrupts */
739                 bnx2x_int_disable(bp);
740
741         /* make sure all ISRs are done */
742         if (msix) {
743                 synchronize_irq(bp->msix_table[0].vector);
744                 offset = 1;
745 #ifdef BCM_CNIC
746                 offset++;
747 #endif
748                 for_each_queue(bp, i)
749                         synchronize_irq(bp->msix_table[i + offset].vector);
750         } else
751                 synchronize_irq(bp->pdev->irq);
752
753         /* make sure sp_task is not running */
754         cancel_delayed_work(&bp->sp_task);
755         flush_workqueue(bnx2x_wq);
756 }
757
758 /* fast path */
759
760 /*
761  * General service functions
762  */
763
764 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
765                                 u8 storm, u16 index, u8 op, u8 update)
766 {
767         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
768                        COMMAND_REG_INT_ACK);
769         struct igu_ack_register igu_ack;
770
771         igu_ack.status_block_index = index;
772         igu_ack.sb_id_and_flags =
773                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
774                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
775                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
776                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
777
778         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
779            (*(u32 *)&igu_ack), hc_addr);
780         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
781
782         /* Make sure that ACK is written */
783         mmiowb();
784         barrier();
785 }
786
787 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
788 {
789         struct host_status_block *fpsb = fp->status_blk;
790         u16 rc = 0;
791
792         barrier(); /* status block is written to by the chip */
793         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
794                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
795                 rc |= 1;
796         }
797         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
798                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
799                 rc |= 2;
800         }
801         return rc;
802 }
803
804 static u16 bnx2x_ack_int(struct bnx2x *bp)
805 {
806         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
807                        COMMAND_REG_SIMD_MASK);
808         u32 result = REG_RD(bp, hc_addr);
809
810         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
811            result, hc_addr);
812
813         return result;
814 }
815
816
817 /*
818  * fast path service functions
819  */
820
821 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
822 {
823         /* Tell compiler that consumer and producer can change */
824         barrier();
825         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
826 }
827
828 /* free skb in the packet ring at pos idx
829  * return idx of last bd freed
830  */
831 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
832                              u16 idx)
833 {
834         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
835         struct eth_tx_start_bd *tx_start_bd;
836         struct eth_tx_bd *tx_data_bd;
837         struct sk_buff *skb = tx_buf->skb;
838         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
839         int nbd;
840
841         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
842            idx, tx_buf, skb);
843
844         /* unmap first bd */
845         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
846         tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
847         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
848                          BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
849
850         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
851 #ifdef BNX2X_STOP_ON_ERROR
852         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
853                 BNX2X_ERR("BAD nbd!\n");
854                 bnx2x_panic();
855         }
856 #endif
857         new_cons = nbd + tx_buf->first_bd;
858
859         /* Get the next bd */
860         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
861
862         /* Skip a parse bd... */
863         --nbd;
864         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
865
866         /* ...and the TSO split header bd since they have no mapping */
867         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
868                 --nbd;
869                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
870         }
871
872         /* now free frags */
873         while (nbd > 0) {
874
875                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
876                 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
877                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
878                                BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
879                 if (--nbd)
880                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
881         }
882
883         /* release skb */
884         WARN_ON(!skb);
885         dev_kfree_skb_any(skb);
886         tx_buf->first_bd = 0;
887         tx_buf->skb = NULL;
888
889         return new_cons;
890 }
891
892 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
893 {
894         s16 used;
895         u16 prod;
896         u16 cons;
897
898         barrier(); /* Tell compiler that prod and cons can change */
899         prod = fp->tx_bd_prod;
900         cons = fp->tx_bd_cons;
901
902         /* NUM_TX_RINGS = number of "next-page" entries
903            It will be used as a threshold */
904         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
905
906 #ifdef BNX2X_STOP_ON_ERROR
907         WARN_ON(used < 0);
908         WARN_ON(used > fp->bp->tx_ring_size);
909         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
910 #endif
911
912         return (s16)(fp->bp->tx_ring_size) - used;
913 }
914
915 static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
916 {
917         struct bnx2x *bp = fp->bp;
918         struct netdev_queue *txq;
919         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
920         int done = 0;
921
922 #ifdef BNX2X_STOP_ON_ERROR
923         if (unlikely(bp->panic))
924                 return;
925 #endif
926
927         txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
928         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
929         sw_cons = fp->tx_pkt_cons;
930
931         while (sw_cons != hw_cons) {
932                 u16 pkt_cons;
933
934                 pkt_cons = TX_BD(sw_cons);
935
936                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
937
938                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
939                    hw_cons, sw_cons, pkt_cons);
940
941 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
942                         rmb();
943                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
944                 }
945 */
946                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
947                 sw_cons++;
948                 done++;
949         }
950
951         fp->tx_pkt_cons = sw_cons;
952         fp->tx_bd_cons = bd_cons;
953
954         /* TBD need a thresh? */
955         if (unlikely(netif_tx_queue_stopped(txq))) {
956
957                 /* Need to make the tx_bd_cons update visible to start_xmit()
958                  * before checking for netif_tx_queue_stopped().  Without the
959                  * memory barrier, there is a small possibility that
960                  * start_xmit() will miss it and cause the queue to be stopped
961                  * forever.
962                  */
963                 smp_mb();
964
965                 if ((netif_tx_queue_stopped(txq)) &&
966                     (bp->state == BNX2X_STATE_OPEN) &&
967                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
968                         netif_tx_wake_queue(txq);
969         }
970 }
971
972 #ifdef BCM_CNIC
973 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
974 #endif
975
976 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
977                            union eth_rx_cqe *rr_cqe)
978 {
979         struct bnx2x *bp = fp->bp;
980         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
981         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
982
983         DP(BNX2X_MSG_SP,
984            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
985            fp->index, cid, command, bp->state,
986            rr_cqe->ramrod_cqe.ramrod_type);
987
988         bp->spq_left++;
989
990         if (fp->index) {
991                 switch (command | fp->state) {
992                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
993                                                 BNX2X_FP_STATE_OPENING):
994                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
995                            cid);
996                         fp->state = BNX2X_FP_STATE_OPEN;
997                         break;
998
999                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1000                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1001                            cid);
1002                         fp->state = BNX2X_FP_STATE_HALTED;
1003                         break;
1004
1005                 default:
1006                         BNX2X_ERR("unexpected MC reply (%d)  "
1007                                   "fp->state is %x\n", command, fp->state);
1008                         break;
1009                 }
1010                 mb(); /* force bnx2x_wait_ramrod() to see the change */
1011                 return;
1012         }
1013
1014         switch (command | bp->state) {
1015         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1016                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1017                 bp->state = BNX2X_STATE_OPEN;
1018                 break;
1019
1020         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1021                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1022                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1023                 fp->state = BNX2X_FP_STATE_HALTED;
1024                 break;
1025
1026         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1027                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1028                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1029                 break;
1030
1031 #ifdef BCM_CNIC
1032         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1033                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1034                 bnx2x_cnic_cfc_comp(bp, cid);
1035                 break;
1036 #endif
1037
1038         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1039         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1040                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1041                 bp->set_mac_pending--;
1042                 smp_wmb();
1043                 break;
1044
1045         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1046                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1047                 bp->set_mac_pending--;
1048                 smp_wmb();
1049                 break;
1050
1051         default:
1052                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
1053                           command, bp->state);
1054                 break;
1055         }
1056         mb(); /* force bnx2x_wait_ramrod() to see the change */
1057 }
1058
1059 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1060                                      struct bnx2x_fastpath *fp, u16 index)
1061 {
1062         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1063         struct page *page = sw_buf->page;
1064         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1065
1066         /* Skip "next page" elements */
1067         if (!page)
1068                 return;
1069
1070         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1071                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1072         __free_pages(page, PAGES_PER_SGE_SHIFT);
1073
1074         sw_buf->page = NULL;
1075         sge->addr_hi = 0;
1076         sge->addr_lo = 0;
1077 }
1078
1079 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1080                                            struct bnx2x_fastpath *fp, int last)
1081 {
1082         int i;
1083
1084         for (i = 0; i < last; i++)
1085                 bnx2x_free_rx_sge(bp, fp, i);
1086 }
1087
1088 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1089                                      struct bnx2x_fastpath *fp, u16 index)
1090 {
1091         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1092         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1093         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1094         dma_addr_t mapping;
1095
1096         if (unlikely(page == NULL))
1097                 return -ENOMEM;
1098
1099         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1100                                PCI_DMA_FROMDEVICE);
1101         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1102                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1103                 return -ENOMEM;
1104         }
1105
1106         sw_buf->page = page;
1107         pci_unmap_addr_set(sw_buf, mapping, mapping);
1108
1109         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1110         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1111
1112         return 0;
1113 }
1114
1115 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1116                                      struct bnx2x_fastpath *fp, u16 index)
1117 {
1118         struct sk_buff *skb;
1119         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1120         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1121         dma_addr_t mapping;
1122
1123         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1124         if (unlikely(skb == NULL))
1125                 return -ENOMEM;
1126
1127         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1128                                  PCI_DMA_FROMDEVICE);
1129         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1130                 dev_kfree_skb(skb);
1131                 return -ENOMEM;
1132         }
1133
1134         rx_buf->skb = skb;
1135         pci_unmap_addr_set(rx_buf, mapping, mapping);
1136
1137         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1138         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1139
1140         return 0;
1141 }
1142
1143 /* note that we are not allocating a new skb,
1144  * we are just moving one from cons to prod
1145  * we are not creating a new mapping,
1146  * so there is no need to check for dma_mapping_error().
1147  */
1148 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1149                                struct sk_buff *skb, u16 cons, u16 prod)
1150 {
1151         struct bnx2x *bp = fp->bp;
1152         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1153         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1154         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1155         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1156
1157         pci_dma_sync_single_for_device(bp->pdev,
1158                                        pci_unmap_addr(cons_rx_buf, mapping),
1159                                        RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1160
1161         prod_rx_buf->skb = cons_rx_buf->skb;
1162         pci_unmap_addr_set(prod_rx_buf, mapping,
1163                            pci_unmap_addr(cons_rx_buf, mapping));
1164         *prod_bd = *cons_bd;
1165 }
1166
1167 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1168                                              u16 idx)
1169 {
1170         u16 last_max = fp->last_max_sge;
1171
1172         if (SUB_S16(idx, last_max) > 0)
1173                 fp->last_max_sge = idx;
1174 }
1175
1176 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1177 {
1178         int i, j;
1179
1180         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1181                 int idx = RX_SGE_CNT * i - 1;
1182
1183                 for (j = 0; j < 2; j++) {
1184                         SGE_MASK_CLEAR_BIT(fp, idx);
1185                         idx--;
1186                 }
1187         }
1188 }
1189
1190 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1191                                   struct eth_fast_path_rx_cqe *fp_cqe)
1192 {
1193         struct bnx2x *bp = fp->bp;
1194         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1195                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1196                       SGE_PAGE_SHIFT;
1197         u16 last_max, last_elem, first_elem;
1198         u16 delta = 0;
1199         u16 i;
1200
1201         if (!sge_len)
1202                 return;
1203
1204         /* First mark all used pages */
1205         for (i = 0; i < sge_len; i++)
1206                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1207
1208         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1209            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1210
1211         /* Here we assume that the last SGE index is the biggest */
1212         prefetch((void *)(fp->sge_mask));
1213         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1214
1215         last_max = RX_SGE(fp->last_max_sge);
1216         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1217         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1218
1219         /* If ring is not full */
1220         if (last_elem + 1 != first_elem)
1221                 last_elem++;
1222
1223         /* Now update the prod */
1224         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1225                 if (likely(fp->sge_mask[i]))
1226                         break;
1227
1228                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1229                 delta += RX_SGE_MASK_ELEM_SZ;
1230         }
1231
1232         if (delta > 0) {
1233                 fp->rx_sge_prod += delta;
1234                 /* clear page-end entries */
1235                 bnx2x_clear_sge_mask_next_elems(fp);
1236         }
1237
1238         DP(NETIF_MSG_RX_STATUS,
1239            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1240            fp->last_max_sge, fp->rx_sge_prod);
1241 }
1242
1243 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1244 {
1245         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1246         memset(fp->sge_mask, 0xff,
1247                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1248
1249         /* Clear the two last indices in the page to 1:
1250            these are the indices that correspond to the "next" element,
1251            hence will never be indicated and should be removed from
1252            the calculations. */
1253         bnx2x_clear_sge_mask_next_elems(fp);
1254 }
1255
1256 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1257                             struct sk_buff *skb, u16 cons, u16 prod)
1258 {
1259         struct bnx2x *bp = fp->bp;
1260         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1261         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1262         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1263         dma_addr_t mapping;
1264
1265         /* move empty skb from pool to prod and map it */
1266         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1267         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1268                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1269         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1270
1271         /* move partial skb from cons to pool (don't unmap yet) */
1272         fp->tpa_pool[queue] = *cons_rx_buf;
1273
1274         /* mark bin state as start - print error if current state != stop */
1275         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1276                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1277
1278         fp->tpa_state[queue] = BNX2X_TPA_START;
1279
1280         /* point prod_bd to new skb */
1281         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1282         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1283
1284 #ifdef BNX2X_STOP_ON_ERROR
1285         fp->tpa_queue_used |= (1 << queue);
1286 #ifdef __powerpc64__
1287         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1288 #else
1289         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1290 #endif
1291            fp->tpa_queue_used);
1292 #endif
1293 }
1294
1295 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1296                                struct sk_buff *skb,
1297                                struct eth_fast_path_rx_cqe *fp_cqe,
1298                                u16 cqe_idx)
1299 {
1300         struct sw_rx_page *rx_pg, old_rx_pg;
1301         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1302         u32 i, frag_len, frag_size, pages;
1303         int err;
1304         int j;
1305
1306         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1307         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1308
1309         /* This is needed in order to enable forwarding support */
1310         if (frag_size)
1311                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1312                                                max(frag_size, (u32)len_on_bd));
1313
1314 #ifdef BNX2X_STOP_ON_ERROR
1315         if (pages >
1316             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1317                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1318                           pages, cqe_idx);
1319                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1320                           fp_cqe->pkt_len, len_on_bd);
1321                 bnx2x_panic();
1322                 return -EINVAL;
1323         }
1324 #endif
1325
1326         /* Run through the SGL and compose the fragmented skb */
1327         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1328                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1329
1330                 /* FW gives the indices of the SGE as if the ring is an array
1331                    (meaning that "next" element will consume 2 indices) */
1332                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1333                 rx_pg = &fp->rx_page_ring[sge_idx];
1334                 old_rx_pg = *rx_pg;
1335
1336                 /* If we fail to allocate a substitute page, we simply stop
1337                    where we are and drop the whole packet */
1338                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1339                 if (unlikely(err)) {
1340                         fp->eth_q_stats.rx_skb_alloc_failed++;
1341                         return err;
1342                 }
1343
1344                 /* Unmap the page as we r going to pass it to the stack */
1345                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1346                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1347
1348                 /* Add one frag and update the appropriate fields in the skb */
1349                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1350
1351                 skb->data_len += frag_len;
1352                 skb->truesize += frag_len;
1353                 skb->len += frag_len;
1354
1355                 frag_size -= frag_len;
1356         }
1357
1358         return 0;
1359 }
1360
1361 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1362                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1363                            u16 cqe_idx)
1364 {
1365         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1366         struct sk_buff *skb = rx_buf->skb;
1367         /* alloc new skb */
1368         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1369
1370         /* Unmap skb in the pool anyway, as we are going to change
1371            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1372            fails. */
1373         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1374                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1375
1376         if (likely(new_skb)) {
1377                 /* fix ip xsum and give it to the stack */
1378                 /* (no need to map the new skb) */
1379 #ifdef BCM_VLAN
1380                 int is_vlan_cqe =
1381                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1382                          PARSING_FLAGS_VLAN);
1383                 int is_not_hwaccel_vlan_cqe =
1384                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1385 #endif
1386
1387                 prefetch(skb);
1388                 prefetch(((char *)(skb)) + 128);
1389
1390 #ifdef BNX2X_STOP_ON_ERROR
1391                 if (pad + len > bp->rx_buf_size) {
1392                         BNX2X_ERR("skb_put is about to fail...  "
1393                                   "pad %d  len %d  rx_buf_size %d\n",
1394                                   pad, len, bp->rx_buf_size);
1395                         bnx2x_panic();
1396                         return;
1397                 }
1398 #endif
1399
1400                 skb_reserve(skb, pad);
1401                 skb_put(skb, len);
1402
1403                 skb->protocol = eth_type_trans(skb, bp->dev);
1404                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1405
1406                 {
1407                         struct iphdr *iph;
1408
1409                         iph = (struct iphdr *)skb->data;
1410 #ifdef BCM_VLAN
1411                         /* If there is no Rx VLAN offloading -
1412                            take VLAN tag into an account */
1413                         if (unlikely(is_not_hwaccel_vlan_cqe))
1414                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1415 #endif
1416                         iph->check = 0;
1417                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1418                 }
1419
1420                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1421                                          &cqe->fast_path_cqe, cqe_idx)) {
1422 #ifdef BCM_VLAN
1423                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1424                             (!is_not_hwaccel_vlan_cqe))
1425                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1426                                                 le16_to_cpu(cqe->fast_path_cqe.
1427                                                             vlan_tag));
1428                         else
1429 #endif
1430                                 netif_receive_skb(skb);
1431                 } else {
1432                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1433                            " - dropping packet!\n");
1434                         dev_kfree_skb(skb);
1435                 }
1436
1437
1438                 /* put new skb in bin */
1439                 fp->tpa_pool[queue].skb = new_skb;
1440
1441         } else {
1442                 /* else drop the packet and keep the buffer in the bin */
1443                 DP(NETIF_MSG_RX_STATUS,
1444                    "Failed to allocate new skb - dropping packet!\n");
1445                 fp->eth_q_stats.rx_skb_alloc_failed++;
1446         }
1447
1448         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1449 }
1450
1451 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1452                                         struct bnx2x_fastpath *fp,
1453                                         u16 bd_prod, u16 rx_comp_prod,
1454                                         u16 rx_sge_prod)
1455 {
1456         struct ustorm_eth_rx_producers rx_prods = {0};
1457         int i;
1458
1459         /* Update producers */
1460         rx_prods.bd_prod = bd_prod;
1461         rx_prods.cqe_prod = rx_comp_prod;
1462         rx_prods.sge_prod = rx_sge_prod;
1463
1464         /*
1465          * Make sure that the BD and SGE data is updated before updating the
1466          * producers since FW might read the BD/SGE right after the producer
1467          * is updated.
1468          * This is only applicable for weak-ordered memory model archs such
1469          * as IA-64. The following barrier is also mandatory since FW will
1470          * assumes BDs must have buffers.
1471          */
1472         wmb();
1473
1474         for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1475                 REG_WR(bp, BAR_USTRORM_INTMEM +
1476                        USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1477                        ((u32 *)&rx_prods)[i]);
1478
1479         mmiowb(); /* keep prod updates ordered */
1480
1481         DP(NETIF_MSG_RX_STATUS,
1482            "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
1483            fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1484 }
1485
1486 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1487 {
1488         struct bnx2x *bp = fp->bp;
1489         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1490         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1491         int rx_pkt = 0;
1492
1493 #ifdef BNX2X_STOP_ON_ERROR
1494         if (unlikely(bp->panic))
1495                 return 0;
1496 #endif
1497
1498         /* CQ "next element" is of the size of the regular element,
1499            that's why it's ok here */
1500         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1501         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1502                 hw_comp_cons++;
1503
1504         bd_cons = fp->rx_bd_cons;
1505         bd_prod = fp->rx_bd_prod;
1506         bd_prod_fw = bd_prod;
1507         sw_comp_cons = fp->rx_comp_cons;
1508         sw_comp_prod = fp->rx_comp_prod;
1509
1510         /* Memory barrier necessary as speculative reads of the rx
1511          * buffer can be ahead of the index in the status block
1512          */
1513         rmb();
1514
1515         DP(NETIF_MSG_RX_STATUS,
1516            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1517            fp->index, hw_comp_cons, sw_comp_cons);
1518
1519         while (sw_comp_cons != hw_comp_cons) {
1520                 struct sw_rx_bd *rx_buf = NULL;
1521                 struct sk_buff *skb;
1522                 union eth_rx_cqe *cqe;
1523                 u8 cqe_fp_flags;
1524                 u16 len, pad;
1525
1526                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1527                 bd_prod = RX_BD(bd_prod);
1528                 bd_cons = RX_BD(bd_cons);
1529
1530                 /* Prefetch the page containing the BD descriptor
1531                    at producer's index. It will be needed when new skb is
1532                    allocated */
1533                 prefetch((void *)(PAGE_ALIGN((unsigned long)
1534                                              (&fp->rx_desc_ring[bd_prod])) -
1535                                   PAGE_SIZE + 1));
1536
1537                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1538                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1539
1540                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1541                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1542                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1543                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1544                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1545                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1546
1547                 /* is this a slowpath msg? */
1548                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1549                         bnx2x_sp_event(fp, cqe);
1550                         goto next_cqe;
1551
1552                 /* this is an rx packet */
1553                 } else {
1554                         rx_buf = &fp->rx_buf_ring[bd_cons];
1555                         skb = rx_buf->skb;
1556                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1557                         pad = cqe->fast_path_cqe.placement_offset;
1558
1559                         /* If CQE is marked both TPA_START and TPA_END
1560                            it is a non-TPA CQE */
1561                         if ((!fp->disable_tpa) &&
1562                             (TPA_TYPE(cqe_fp_flags) !=
1563                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1564                                 u16 queue = cqe->fast_path_cqe.queue_index;
1565
1566                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1567                                         DP(NETIF_MSG_RX_STATUS,
1568                                            "calling tpa_start on queue %d\n",
1569                                            queue);
1570
1571                                         bnx2x_tpa_start(fp, queue, skb,
1572                                                         bd_cons, bd_prod);
1573                                         goto next_rx;
1574                                 }
1575
1576                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1577                                         DP(NETIF_MSG_RX_STATUS,
1578                                            "calling tpa_stop on queue %d\n",
1579                                            queue);
1580
1581                                         if (!BNX2X_RX_SUM_FIX(cqe))
1582                                                 BNX2X_ERR("STOP on none TCP "
1583                                                           "data\n");
1584
1585                                         /* This is a size of the linear data
1586                                            on this skb */
1587                                         len = le16_to_cpu(cqe->fast_path_cqe.
1588                                                                 len_on_bd);
1589                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1590                                                     len, cqe, comp_ring_cons);
1591 #ifdef BNX2X_STOP_ON_ERROR
1592                                         if (bp->panic)
1593                                                 return 0;
1594 #endif
1595
1596                                         bnx2x_update_sge_prod(fp,
1597                                                         &cqe->fast_path_cqe);
1598                                         goto next_cqe;
1599                                 }
1600                         }
1601
1602                         pci_dma_sync_single_for_device(bp->pdev,
1603                                         pci_unmap_addr(rx_buf, mapping),
1604                                                        pad + RX_COPY_THRESH,
1605                                                        PCI_DMA_FROMDEVICE);
1606                         prefetch(skb);
1607                         prefetch(((char *)(skb)) + 128);
1608
1609                         /* is this an error packet? */
1610                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1611                                 DP(NETIF_MSG_RX_ERR,
1612                                    "ERROR  flags %x  rx packet %u\n",
1613                                    cqe_fp_flags, sw_comp_cons);
1614                                 fp->eth_q_stats.rx_err_discard_pkt++;
1615                                 goto reuse_rx;
1616                         }
1617
1618                         /* Since we don't have a jumbo ring
1619                          * copy small packets if mtu > 1500
1620                          */
1621                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1622                             (len <= RX_COPY_THRESH)) {
1623                                 struct sk_buff *new_skb;
1624
1625                                 new_skb = netdev_alloc_skb(bp->dev,
1626                                                            len + pad);
1627                                 if (new_skb == NULL) {
1628                                         DP(NETIF_MSG_RX_ERR,
1629                                            "ERROR  packet dropped "
1630                                            "because of alloc failure\n");
1631                                         fp->eth_q_stats.rx_skb_alloc_failed++;
1632                                         goto reuse_rx;
1633                                 }
1634
1635                                 /* aligned copy */
1636                                 skb_copy_from_linear_data_offset(skb, pad,
1637                                                     new_skb->data + pad, len);
1638                                 skb_reserve(new_skb, pad);
1639                                 skb_put(new_skb, len);
1640
1641                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1642
1643                                 skb = new_skb;
1644
1645                         } else
1646                         if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1647                                 pci_unmap_single(bp->pdev,
1648                                         pci_unmap_addr(rx_buf, mapping),
1649                                                  bp->rx_buf_size,
1650                                                  PCI_DMA_FROMDEVICE);
1651                                 skb_reserve(skb, pad);
1652                                 skb_put(skb, len);
1653
1654                         } else {
1655                                 DP(NETIF_MSG_RX_ERR,
1656                                    "ERROR  packet dropped because "
1657                                    "of alloc failure\n");
1658                                 fp->eth_q_stats.rx_skb_alloc_failed++;
1659 reuse_rx:
1660                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1661                                 goto next_rx;
1662                         }
1663
1664                         skb->protocol = eth_type_trans(skb, bp->dev);
1665
1666                         skb->ip_summed = CHECKSUM_NONE;
1667                         if (bp->rx_csum) {
1668                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1669                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1670                                 else
1671                                         fp->eth_q_stats.hw_csum_err++;
1672                         }
1673                 }
1674
1675                 skb_record_rx_queue(skb, fp->index);
1676
1677 #ifdef BCM_VLAN
1678                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1679                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1680                      PARSING_FLAGS_VLAN))
1681                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1682                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1683                 else
1684 #endif
1685                         netif_receive_skb(skb);
1686
1687
1688 next_rx:
1689                 rx_buf->skb = NULL;
1690
1691                 bd_cons = NEXT_RX_IDX(bd_cons);
1692                 bd_prod = NEXT_RX_IDX(bd_prod);
1693                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1694                 rx_pkt++;
1695 next_cqe:
1696                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1697                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1698
1699                 if (rx_pkt == budget)
1700                         break;
1701         } /* while */
1702
1703         fp->rx_bd_cons = bd_cons;
1704         fp->rx_bd_prod = bd_prod_fw;
1705         fp->rx_comp_cons = sw_comp_cons;
1706         fp->rx_comp_prod = sw_comp_prod;
1707
1708         /* Update producers */
1709         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1710                              fp->rx_sge_prod);
1711
1712         fp->rx_pkt += rx_pkt;
1713         fp->rx_calls++;
1714
1715         return rx_pkt;
1716 }
1717
1718 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1719 {
1720         struct bnx2x_fastpath *fp = fp_cookie;
1721         struct bnx2x *bp = fp->bp;
1722
1723         /* Return here if interrupt is disabled */
1724         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1725                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1726                 return IRQ_HANDLED;
1727         }
1728
1729         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1730            fp->index, fp->sb_id);
1731         bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1732
1733 #ifdef BNX2X_STOP_ON_ERROR
1734         if (unlikely(bp->panic))
1735                 return IRQ_HANDLED;
1736 #endif
1737         /* Handle Rx or Tx according to MSI-X vector */
1738         if (fp->is_rx_queue) {
1739                 prefetch(fp->rx_cons_sb);
1740                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1741
1742                 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1743
1744         } else {
1745                 prefetch(fp->tx_cons_sb);
1746                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1747
1748                 bnx2x_update_fpsb_idx(fp);
1749                 rmb();
1750                 bnx2x_tx_int(fp);
1751
1752                 /* Re-enable interrupts */
1753                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1754                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1755                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1756                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1757         }
1758
1759         return IRQ_HANDLED;
1760 }
1761
1762 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1763 {
1764         struct bnx2x *bp = netdev_priv(dev_instance);
1765         u16 status = bnx2x_ack_int(bp);
1766         u16 mask;
1767         int i;
1768
1769         /* Return here if interrupt is shared and it's not for us */
1770         if (unlikely(status == 0)) {
1771                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1772                 return IRQ_NONE;
1773         }
1774         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1775
1776         /* Return here if interrupt is disabled */
1777         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1778                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1779                 return IRQ_HANDLED;
1780         }
1781
1782 #ifdef BNX2X_STOP_ON_ERROR
1783         if (unlikely(bp->panic))
1784                 return IRQ_HANDLED;
1785 #endif
1786
1787         for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1788                 struct bnx2x_fastpath *fp = &bp->fp[i];
1789
1790                 mask = 0x2 << fp->sb_id;
1791                 if (status & mask) {
1792                         /* Handle Rx or Tx according to SB id */
1793                         if (fp->is_rx_queue) {
1794                                 prefetch(fp->rx_cons_sb);
1795                                 prefetch(&fp->status_blk->u_status_block.
1796                                                         status_block_index);
1797
1798                                 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1799
1800                         } else {
1801                                 prefetch(fp->tx_cons_sb);
1802                                 prefetch(&fp->status_blk->c_status_block.
1803                                                         status_block_index);
1804
1805                                 bnx2x_update_fpsb_idx(fp);
1806                                 rmb();
1807                                 bnx2x_tx_int(fp);
1808
1809                                 /* Re-enable interrupts */
1810                                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1811                                              le16_to_cpu(fp->fp_u_idx),
1812                                              IGU_INT_NOP, 1);
1813                                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1814                                              le16_to_cpu(fp->fp_c_idx),
1815                                              IGU_INT_ENABLE, 1);
1816                         }
1817                         status &= ~mask;
1818                 }
1819         }
1820
1821 #ifdef BCM_CNIC
1822         mask = 0x2 << CNIC_SB_ID(bp);
1823         if (status & (mask | 0x1)) {
1824                 struct cnic_ops *c_ops = NULL;
1825
1826                 rcu_read_lock();
1827                 c_ops = rcu_dereference(bp->cnic_ops);
1828                 if (c_ops)
1829                         c_ops->cnic_handler(bp->cnic_data, NULL);
1830                 rcu_read_unlock();
1831
1832                 status &= ~mask;
1833         }
1834 #endif
1835
1836         if (unlikely(status & 0x1)) {
1837                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1838
1839                 status &= ~0x1;
1840                 if (!status)
1841                         return IRQ_HANDLED;
1842         }
1843
1844         if (status)
1845                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1846                    status);
1847
1848         return IRQ_HANDLED;
1849 }
1850
1851 /* end of fast path */
1852
1853 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1854
1855 /* Link */
1856
1857 /*
1858  * General service functions
1859  */
1860
1861 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1862 {
1863         u32 lock_status;
1864         u32 resource_bit = (1 << resource);
1865         int func = BP_FUNC(bp);
1866         u32 hw_lock_control_reg;
1867         int cnt;
1868
1869         /* Validating that the resource is within range */
1870         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1871                 DP(NETIF_MSG_HW,
1872                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1873                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1874                 return -EINVAL;
1875         }
1876
1877         if (func <= 5) {
1878                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1879         } else {
1880                 hw_lock_control_reg =
1881                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1882         }
1883
1884         /* Validating that the resource is not already taken */
1885         lock_status = REG_RD(bp, hw_lock_control_reg);
1886         if (lock_status & resource_bit) {
1887                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1888                    lock_status, resource_bit);
1889                 return -EEXIST;
1890         }
1891
1892         /* Try for 5 second every 5ms */
1893         for (cnt = 0; cnt < 1000; cnt++) {
1894                 /* Try to acquire the lock */
1895                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1896                 lock_status = REG_RD(bp, hw_lock_control_reg);
1897                 if (lock_status & resource_bit)
1898                         return 0;
1899
1900                 msleep(5);
1901         }
1902         DP(NETIF_MSG_HW, "Timeout\n");
1903         return -EAGAIN;
1904 }
1905
1906 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1907 {
1908         u32 lock_status;
1909         u32 resource_bit = (1 << resource);
1910         int func = BP_FUNC(bp);
1911         u32 hw_lock_control_reg;
1912
1913         /* Validating that the resource is within range */
1914         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1915                 DP(NETIF_MSG_HW,
1916                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1917                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1918                 return -EINVAL;
1919         }
1920
1921         if (func <= 5) {
1922                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1923         } else {
1924                 hw_lock_control_reg =
1925                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1926         }
1927
1928         /* Validating that the resource is currently taken */
1929         lock_status = REG_RD(bp, hw_lock_control_reg);
1930         if (!(lock_status & resource_bit)) {
1931                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1932                    lock_status, resource_bit);
1933                 return -EFAULT;
1934         }
1935
1936         REG_WR(bp, hw_lock_control_reg, resource_bit);
1937         return 0;
1938 }
1939
1940 /* HW Lock for shared dual port PHYs */
1941 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1942 {
1943         mutex_lock(&bp->port.phy_mutex);
1944
1945         if (bp->port.need_hw_lock)
1946                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1947 }
1948
1949 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1950 {
1951         if (bp->port.need_hw_lock)
1952                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1953
1954         mutex_unlock(&bp->port.phy_mutex);
1955 }
1956
1957 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1958 {
1959         /* The GPIO should be swapped if swap register is set and active */
1960         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1961                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1962         int gpio_shift = gpio_num +
1963                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1964         u32 gpio_mask = (1 << gpio_shift);
1965         u32 gpio_reg;
1966         int value;
1967
1968         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1969                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1970                 return -EINVAL;
1971         }
1972
1973         /* read GPIO value */
1974         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1975
1976         /* get the requested pin value */
1977         if ((gpio_reg & gpio_mask) == gpio_mask)
1978                 value = 1;
1979         else
1980                 value = 0;
1981
1982         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1983
1984         return value;
1985 }
1986
1987 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1988 {
1989         /* The GPIO should be swapped if swap register is set and active */
1990         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1991                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1992         int gpio_shift = gpio_num +
1993                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1994         u32 gpio_mask = (1 << gpio_shift);
1995         u32 gpio_reg;
1996
1997         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1998                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1999                 return -EINVAL;
2000         }
2001
2002         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2003         /* read GPIO and mask except the float bits */
2004         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2005
2006         switch (mode) {
2007         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2008                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
2009                    gpio_num, gpio_shift);
2010                 /* clear FLOAT and set CLR */
2011                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2012                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2013                 break;
2014
2015         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2016                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2017                    gpio_num, gpio_shift);
2018                 /* clear FLOAT and set SET */
2019                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2020                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2021                 break;
2022
2023         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2024                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2025                    gpio_num, gpio_shift);
2026                 /* set FLOAT */
2027                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2028                 break;
2029
2030         default:
2031                 break;
2032         }
2033
2034         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2035         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2036
2037         return 0;
2038 }
2039
2040 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2041 {
2042         /* The GPIO should be swapped if swap register is set and active */
2043         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2044                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2045         int gpio_shift = gpio_num +
2046                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2047         u32 gpio_mask = (1 << gpio_shift);
2048         u32 gpio_reg;
2049
2050         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2051                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2052                 return -EINVAL;
2053         }
2054
2055         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2056         /* read GPIO int */
2057         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2058
2059         switch (mode) {
2060         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2061                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2062                                    "output low\n", gpio_num, gpio_shift);
2063                 /* clear SET and set CLR */
2064                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2065                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2066                 break;
2067
2068         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2069                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2070                                    "output high\n", gpio_num, gpio_shift);
2071                 /* clear CLR and set SET */
2072                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2073                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2074                 break;
2075
2076         default:
2077                 break;
2078         }
2079
2080         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2081         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2082
2083         return 0;
2084 }
2085
2086 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2087 {
2088         u32 spio_mask = (1 << spio_num);
2089         u32 spio_reg;
2090
2091         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2092             (spio_num > MISC_REGISTERS_SPIO_7)) {
2093                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2094                 return -EINVAL;
2095         }
2096
2097         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2098         /* read SPIO and mask except the float bits */
2099         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2100
2101         switch (mode) {
2102         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2103                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2104                 /* clear FLOAT and set CLR */
2105                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2106                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2107                 break;
2108
2109         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2110                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2111                 /* clear FLOAT and set SET */
2112                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2113                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2114                 break;
2115
2116         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2117                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2118                 /* set FLOAT */
2119                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2120                 break;
2121
2122         default:
2123                 break;
2124         }
2125
2126         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2127         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2128
2129         return 0;
2130 }
2131
2132 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2133 {
2134         switch (bp->link_vars.ieee_fc &
2135                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2136         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2137                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2138                                           ADVERTISED_Pause);
2139                 break;
2140
2141         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2142                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2143                                          ADVERTISED_Pause);
2144                 break;
2145
2146         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2147                 bp->port.advertising |= ADVERTISED_Asym_Pause;
2148                 break;
2149
2150         default:
2151                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2152                                           ADVERTISED_Pause);
2153                 break;
2154         }
2155 }
2156
2157 static void bnx2x_link_report(struct bnx2x *bp)
2158 {
2159         if (bp->flags & MF_FUNC_DIS) {
2160                 netif_carrier_off(bp->dev);
2161                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2162                 return;
2163         }
2164
2165         if (bp->link_vars.link_up) {
2166                 if (bp->state == BNX2X_STATE_OPEN)
2167                         netif_carrier_on(bp->dev);
2168                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2169
2170                 printk("%d Mbps ", bp->link_vars.line_speed);
2171
2172                 if (bp->link_vars.duplex == DUPLEX_FULL)
2173                         printk("full duplex");
2174                 else
2175                         printk("half duplex");
2176
2177                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2178                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2179                                 printk(", receive ");
2180                                 if (bp->link_vars.flow_ctrl &
2181                                     BNX2X_FLOW_CTRL_TX)
2182                                         printk("& transmit ");
2183                         } else {
2184                                 printk(", transmit ");
2185                         }
2186                         printk("flow control ON");
2187                 }
2188                 printk("\n");
2189
2190         } else { /* link_down */
2191                 netif_carrier_off(bp->dev);
2192                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2193         }
2194 }
2195
2196 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2197 {
2198         if (!BP_NOMCP(bp)) {
2199                 u8 rc;
2200
2201                 /* Initialize link parameters structure variables */
2202                 /* It is recommended to turn off RX FC for jumbo frames
2203                    for better performance */
2204                 if (bp->dev->mtu > 5000)
2205                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2206                 else
2207                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2208
2209                 bnx2x_acquire_phy_lock(bp);
2210
2211                 if (load_mode == LOAD_DIAG)
2212                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2213
2214                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2215
2216                 bnx2x_release_phy_lock(bp);
2217
2218                 bnx2x_calc_fc_adv(bp);
2219
2220                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2221                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2222                         bnx2x_link_report(bp);
2223                 }
2224
2225                 return rc;
2226         }
2227         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2228         return -EINVAL;
2229 }
2230
2231 static void bnx2x_link_set(struct bnx2x *bp)
2232 {
2233         if (!BP_NOMCP(bp)) {
2234                 bnx2x_acquire_phy_lock(bp);
2235                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2236                 bnx2x_release_phy_lock(bp);
2237
2238                 bnx2x_calc_fc_adv(bp);
2239         } else
2240                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2241 }
2242
2243 static void bnx2x__link_reset(struct bnx2x *bp)
2244 {
2245         if (!BP_NOMCP(bp)) {
2246                 bnx2x_acquire_phy_lock(bp);
2247                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2248                 bnx2x_release_phy_lock(bp);
2249         } else
2250                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2251 }
2252
2253 static u8 bnx2x_link_test(struct bnx2x *bp)
2254 {
2255         u8 rc;
2256
2257         bnx2x_acquire_phy_lock(bp);
2258         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2259         bnx2x_release_phy_lock(bp);
2260
2261         return rc;
2262 }
2263
2264 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2265 {
2266         u32 r_param = bp->link_vars.line_speed / 8;
2267         u32 fair_periodic_timeout_usec;
2268         u32 t_fair;
2269
2270         memset(&(bp->cmng.rs_vars), 0,
2271                sizeof(struct rate_shaping_vars_per_port));
2272         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2273
2274         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2275         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2276
2277         /* this is the threshold below which no timer arming will occur
2278            1.25 coefficient is for the threshold to be a little bigger
2279            than the real time, to compensate for timer in-accuracy */
2280         bp->cmng.rs_vars.rs_threshold =
2281                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2282
2283         /* resolution of fairness timer */
2284         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2285         /* for 10G it is 1000usec. for 1G it is 10000usec. */
2286         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2287
2288         /* this is the threshold below which we won't arm the timer anymore */
2289         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2290
2291         /* we multiply by 1e3/8 to get bytes/msec.
2292            We don't want the credits to pass a credit
2293            of the t_fair*FAIR_MEM (algorithm resolution) */
2294         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2295         /* since each tick is 4 usec */
2296         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2297 }
2298
2299 /* Calculates the sum of vn_min_rates.
2300    It's needed for further normalizing of the min_rates.
2301    Returns:
2302      sum of vn_min_rates.
2303        or
2304      0 - if all the min_rates are 0.
2305      In the later case fainess algorithm should be deactivated.
2306      If not all min_rates are zero then those that are zeroes will be set to 1.
2307  */
2308 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2309 {
2310         int all_zero = 1;
2311         int port = BP_PORT(bp);
2312         int vn;
2313
2314         bp->vn_weight_sum = 0;
2315         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2316                 int func = 2*vn + port;
2317                 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2318                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2319                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2320
2321                 /* Skip hidden vns */
2322                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2323                         continue;
2324
2325                 /* If min rate is zero - set it to 1 */
2326                 if (!vn_min_rate)
2327                         vn_min_rate = DEF_MIN_RATE;
2328                 else
2329                         all_zero = 0;
2330
2331                 bp->vn_weight_sum += vn_min_rate;
2332         }
2333
2334         /* ... only if all min rates are zeros - disable fairness */
2335         if (all_zero) {
2336                 bp->cmng.flags.cmng_enables &=
2337                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2338                 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2339                    "  fairness will be disabled\n");
2340         } else
2341                 bp->cmng.flags.cmng_enables |=
2342                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2343 }
2344
2345 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2346 {
2347         struct rate_shaping_vars_per_vn m_rs_vn;
2348         struct fairness_vars_per_vn m_fair_vn;
2349         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2350         u16 vn_min_rate, vn_max_rate;
2351         int i;
2352
2353         /* If function is hidden - set min and max to zeroes */
2354         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2355                 vn_min_rate = 0;
2356                 vn_max_rate = 0;
2357
2358         } else {
2359                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2360                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2361                 /* If min rate is zero - set it to 1 */
2362                 if (!vn_min_rate)
2363                         vn_min_rate = DEF_MIN_RATE;
2364                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2365                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2366         }
2367         DP(NETIF_MSG_IFUP,
2368            "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
2369            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2370
2371         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2372         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2373
2374         /* global vn counter - maximal Mbps for this vn */
2375         m_rs_vn.vn_counter.rate = vn_max_rate;
2376
2377         /* quota - number of bytes transmitted in this period */
2378         m_rs_vn.vn_counter.quota =
2379                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2380
2381         if (bp->vn_weight_sum) {
2382                 /* credit for each period of the fairness algorithm:
2383                    number of bytes in T_FAIR (the vn share the port rate).
2384                    vn_weight_sum should not be larger than 10000, thus
2385                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2386                    than zero */
2387                 m_fair_vn.vn_credit_delta =
2388                         max((u32)(vn_min_rate * (T_FAIR_COEF /
2389                                                  (8 * bp->vn_weight_sum))),
2390                             (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2391                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2392                    m_fair_vn.vn_credit_delta);
2393         }
2394
2395         /* Store it to internal memory */
2396         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2397                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2398                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2399                        ((u32 *)(&m_rs_vn))[i]);
2400
2401         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2402                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2403                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2404                        ((u32 *)(&m_fair_vn))[i]);
2405 }
2406
2407
2408 /* This function is called upon link interrupt */
2409 static void bnx2x_link_attn(struct bnx2x *bp)
2410 {
2411         /* Make sure that we are synced with the current statistics */
2412         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2413
2414         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2415
2416         if (bp->link_vars.link_up) {
2417
2418                 /* dropless flow control */
2419                 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
2420                         int port = BP_PORT(bp);
2421                         u32 pause_enabled = 0;
2422
2423                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2424                                 pause_enabled = 1;
2425
2426                         REG_WR(bp, BAR_USTRORM_INTMEM +
2427                                USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2428                                pause_enabled);
2429                 }
2430
2431                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2432                         struct host_port_stats *pstats;
2433
2434                         pstats = bnx2x_sp(bp, port_stats);
2435                         /* reset old bmac stats */
2436                         memset(&(pstats->mac_stx[0]), 0,
2437                                sizeof(struct mac_stx));
2438                 }
2439                 if (bp->state == BNX2X_STATE_OPEN)
2440                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2441         }
2442
2443         /* indicate link status */
2444         bnx2x_link_report(bp);
2445
2446         if (IS_E1HMF(bp)) {
2447                 int port = BP_PORT(bp);
2448                 int func;
2449                 int vn;
2450
2451                 /* Set the attention towards other drivers on the same port */
2452                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2453                         if (vn == BP_E1HVN(bp))
2454                                 continue;
2455
2456                         func = ((vn << 1) | port);
2457                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2458                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2459                 }
2460
2461                 if (bp->link_vars.link_up) {
2462                         int i;
2463
2464                         /* Init rate shaping and fairness contexts */
2465                         bnx2x_init_port_minmax(bp);
2466
2467                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2468                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2469
2470                         /* Store it to internal memory */
2471                         for (i = 0;
2472                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
2473                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2474                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2475                                        ((u32 *)(&bp->cmng))[i]);
2476                 }
2477         }
2478 }
2479
2480 static void bnx2x__link_status_update(struct bnx2x *bp)
2481 {
2482         if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2483                 return;
2484
2485         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2486
2487         if (bp->link_vars.link_up)
2488                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2489         else
2490                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2491
2492         bnx2x_calc_vn_weight_sum(bp);
2493
2494         /* indicate link status */
2495         bnx2x_link_report(bp);
2496 }
2497
2498 static void bnx2x_pmf_update(struct bnx2x *bp)
2499 {
2500         int port = BP_PORT(bp);
2501         u32 val;
2502
2503         bp->port.pmf = 1;
2504         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2505
2506         /* enable nig attention */
2507         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2508         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2509         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2510
2511         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2512 }
2513
2514 /* end of Link */
2515
2516 /* slow path */
2517
2518 /*
2519  * General service functions
2520  */
2521
2522 /* send the MCP a request, block until there is a reply */
2523 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2524 {
2525         int func = BP_FUNC(bp);
2526         u32 seq = ++bp->fw_seq;
2527         u32 rc = 0;
2528         u32 cnt = 1;
2529         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2530
2531         mutex_lock(&bp->fw_mb_mutex);
2532         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2533         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2534
2535         do {
2536                 /* let the FW do it's magic ... */
2537                 msleep(delay);
2538
2539                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2540
2541                 /* Give the FW up to 5 second (500*10ms) */
2542         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2543
2544         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2545            cnt*delay, rc, seq);
2546
2547         /* is this a reply to our command? */
2548         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2549                 rc &= FW_MSG_CODE_MASK;
2550         else {
2551                 /* FW BUG! */
2552                 BNX2X_ERR("FW failed to respond!\n");
2553                 bnx2x_fw_dump(bp);
2554                 rc = 0;
2555         }
2556         mutex_unlock(&bp->fw_mb_mutex);
2557
2558         return rc;
2559 }
2560
2561 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2562 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2563 static void bnx2x_set_rx_mode(struct net_device *dev);
2564
2565 static void bnx2x_e1h_disable(struct bnx2x *bp)
2566 {
2567         int port = BP_PORT(bp);
2568         int i;
2569
2570         bp->rx_mode = BNX2X_RX_MODE_NONE;
2571         bnx2x_set_storm_rx_mode(bp);
2572
2573         netif_tx_disable(bp->dev);
2574         bp->dev->trans_start = jiffies; /* prevent tx timeout */
2575
2576         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2577
2578         bnx2x_set_eth_mac_addr_e1h(bp, 0);
2579
2580         for (i = 0; i < MC_HASH_SIZE; i++)
2581                 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
2582
2583         netif_carrier_off(bp->dev);
2584 }
2585
2586 static void bnx2x_e1h_enable(struct bnx2x *bp)
2587 {
2588         int port = BP_PORT(bp);
2589
2590         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2591
2592         bnx2x_set_eth_mac_addr_e1h(bp, 1);
2593
2594         /* Tx queue should be only reenabled */
2595         netif_tx_wake_all_queues(bp->dev);
2596
2597         /* Initialize the receive filter. */
2598         bnx2x_set_rx_mode(bp->dev);
2599 }
2600
2601 static void bnx2x_update_min_max(struct bnx2x *bp)
2602 {
2603         int port = BP_PORT(bp);
2604         int vn, i;
2605
2606         /* Init rate shaping and fairness contexts */
2607         bnx2x_init_port_minmax(bp);
2608
2609         bnx2x_calc_vn_weight_sum(bp);
2610
2611         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2612                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2613
2614         if (bp->port.pmf) {
2615                 int func;
2616
2617                 /* Set the attention towards other drivers on the same port */
2618                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2619                         if (vn == BP_E1HVN(bp))
2620                                 continue;
2621
2622                         func = ((vn << 1) | port);
2623                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2624                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2625                 }
2626
2627                 /* Store it to internal memory */
2628                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2629                         REG_WR(bp, BAR_XSTRORM_INTMEM +
2630                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2631                                ((u32 *)(&bp->cmng))[i]);
2632         }
2633 }
2634
2635 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2636 {
2637         DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2638
2639         if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2640
2641                 /*
2642                  * This is the only place besides the function initialization
2643                  * where the bp->flags can change so it is done without any
2644                  * locks
2645                  */
2646                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2647                         DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2648                         bp->flags |= MF_FUNC_DIS;
2649
2650                         bnx2x_e1h_disable(bp);
2651                 } else {
2652                         DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2653                         bp->flags &= ~MF_FUNC_DIS;
2654
2655                         bnx2x_e1h_enable(bp);
2656                 }
2657                 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2658         }
2659         if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2660
2661                 bnx2x_update_min_max(bp);
2662                 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2663         }
2664
2665         /* Report results to MCP */
2666         if (dcc_event)
2667                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2668         else
2669                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2670 }
2671
2672 /* must be called under the spq lock */
2673 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2674 {
2675         struct eth_spe *next_spe = bp->spq_prod_bd;
2676
2677         if (bp->spq_prod_bd == bp->spq_last_bd) {
2678                 bp->spq_prod_bd = bp->spq;
2679                 bp->spq_prod_idx = 0;
2680                 DP(NETIF_MSG_TIMER, "end of spq\n");
2681         } else {
2682                 bp->spq_prod_bd++;
2683                 bp->spq_prod_idx++;
2684         }
2685         return next_spe;
2686 }
2687
2688 /* must be called under the spq lock */
2689 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2690 {
2691         int func = BP_FUNC(bp);
2692
2693         /* Make sure that BD data is updated before writing the producer */
2694         wmb();
2695
2696         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2697                bp->spq_prod_idx);
2698         mmiowb();
2699 }
2700
2701 /* the slow path queue is odd since completions arrive on the fastpath ring */
2702 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2703                          u32 data_hi, u32 data_lo, int common)
2704 {
2705         struct eth_spe *spe;
2706
2707         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2708            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2709            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2710            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2711            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2712
2713 #ifdef BNX2X_STOP_ON_ERROR
2714         if (unlikely(bp->panic))
2715                 return -EIO;
2716 #endif
2717
2718         spin_lock_bh(&bp->spq_lock);
2719
2720         if (!bp->spq_left) {
2721                 BNX2X_ERR("BUG! SPQ ring full!\n");
2722                 spin_unlock_bh(&bp->spq_lock);
2723                 bnx2x_panic();
2724                 return -EBUSY;
2725         }
2726
2727         spe = bnx2x_sp_get_next(bp);
2728
2729         /* CID needs port number to be encoded int it */
2730         spe->hdr.conn_and_cmd_data =
2731                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2732                                      HW_CID(bp, cid)));
2733         spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2734         if (common)
2735                 spe->hdr.type |=
2736                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2737
2738         spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2739         spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2740
2741         bp->spq_left--;
2742
2743         bnx2x_sp_prod_update(bp);
2744         spin_unlock_bh(&bp->spq_lock);
2745         return 0;
2746 }
2747
2748 /* acquire split MCP access lock register */
2749 static int bnx2x_acquire_alr(struct bnx2x *bp)
2750 {
2751         u32 i, j, val;
2752         int rc = 0;
2753
2754         might_sleep();
2755         i = 100;
2756         for (j = 0; j < i*10; j++) {
2757                 val = (1UL << 31);
2758                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2759                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2760                 if (val & (1L << 31))
2761                         break;
2762
2763                 msleep(5);
2764         }
2765         if (!(val & (1L << 31))) {
2766                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2767                 rc = -EBUSY;
2768         }
2769
2770         return rc;
2771 }
2772
2773 /* release split MCP access lock register */
2774 static void bnx2x_release_alr(struct bnx2x *bp)
2775 {
2776         u32 val = 0;
2777
2778         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2779 }
2780
2781 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2782 {
2783         struct host_def_status_block *def_sb = bp->def_status_blk;
2784         u16 rc = 0;
2785
2786         barrier(); /* status block is written to by the chip */
2787         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2788                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2789                 rc |= 1;
2790         }
2791         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2792                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2793                 rc |= 2;
2794         }
2795         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2796                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2797                 rc |= 4;
2798         }
2799         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2800                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2801                 rc |= 8;
2802         }
2803         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2804                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2805                 rc |= 16;
2806         }
2807         return rc;
2808 }
2809
2810 /*
2811  * slow path service functions
2812  */
2813
2814 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2815 {
2816         int port = BP_PORT(bp);
2817         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2818                        COMMAND_REG_ATTN_BITS_SET);
2819         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2820                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2821         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2822                                        NIG_REG_MASK_INTERRUPT_PORT0;
2823         u32 aeu_mask;
2824         u32 nig_mask = 0;
2825
2826         if (bp->attn_state & asserted)
2827                 BNX2X_ERR("IGU ERROR\n");
2828
2829         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2830         aeu_mask = REG_RD(bp, aeu_addr);
2831
2832         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2833            aeu_mask, asserted);
2834         aeu_mask &= ~(asserted & 0xff);
2835         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2836
2837         REG_WR(bp, aeu_addr, aeu_mask);
2838         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2839
2840         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2841         bp->attn_state |= asserted;
2842         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2843
2844         if (asserted & ATTN_HARD_WIRED_MASK) {
2845                 if (asserted & ATTN_NIG_FOR_FUNC) {
2846
2847                         bnx2x_acquire_phy_lock(bp);
2848
2849                         /* save nig interrupt mask */
2850                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2851                         REG_WR(bp, nig_int_mask_addr, 0);
2852
2853                         bnx2x_link_attn(bp);
2854
2855                         /* handle unicore attn? */
2856                 }
2857                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2858                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2859
2860                 if (asserted & GPIO_2_FUNC)
2861                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2862
2863                 if (asserted & GPIO_3_FUNC)
2864                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2865
2866                 if (asserted & GPIO_4_FUNC)
2867                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2868
2869                 if (port == 0) {
2870                         if (asserted & ATTN_GENERAL_ATTN_1) {
2871                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2872                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2873                         }
2874                         if (asserted & ATTN_GENERAL_ATTN_2) {
2875                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2876                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2877                         }
2878                         if (asserted & ATTN_GENERAL_ATTN_3) {
2879                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2880                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2881                         }
2882                 } else {
2883                         if (asserted & ATTN_GENERAL_ATTN_4) {
2884                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2885                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2886                         }
2887                         if (asserted & ATTN_GENERAL_ATTN_5) {
2888                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2889                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2890                         }
2891                         if (asserted & ATTN_GENERAL_ATTN_6) {
2892                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2893                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2894                         }
2895                 }
2896
2897         } /* if hardwired */
2898
2899         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2900            asserted, hc_addr);
2901         REG_WR(bp, hc_addr, asserted);
2902
2903         /* now set back the mask */
2904         if (asserted & ATTN_NIG_FOR_FUNC) {
2905                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2906                 bnx2x_release_phy_lock(bp);
2907         }
2908 }
2909
2910 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2911 {
2912         int port = BP_PORT(bp);
2913
2914         /* mark the failure */
2915         bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2916         bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2917         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2918                  bp->link_params.ext_phy_config);
2919
2920         /* log the failure */
2921         printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2922                " the driver to shutdown the card to prevent permanent"
2923                " damage.  Please contact Dell Support for assistance\n",
2924                bp->dev->name);
2925 }
2926
2927 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2928 {
2929         int port = BP_PORT(bp);
2930         int reg_offset;
2931         u32 val, swap_val, swap_override;
2932
2933         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2934                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2935
2936         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2937
2938                 val = REG_RD(bp, reg_offset);
2939                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2940                 REG_WR(bp, reg_offset, val);
2941
2942                 BNX2X_ERR("SPIO5 hw attention\n");
2943
2944                 /* Fan failure attention */
2945                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2946                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2947                         /* Low power mode is controlled by GPIO 2 */
2948                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2949                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2950                         /* The PHY reset is controlled by GPIO 1 */
2951                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2952                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2953                         break;
2954
2955                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2956                         /* The PHY reset is controlled by GPIO 1 */
2957                         /* fake the port number to cancel the swap done in
2958                            set_gpio() */
2959                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2960                         swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2961                         port = (swap_val && swap_override) ^ 1;
2962                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2963                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2964                         break;
2965
2966                 default:
2967                         break;
2968                 }
2969                 bnx2x_fan_failure(bp);
2970         }
2971
2972         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2973                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2974                 bnx2x_acquire_phy_lock(bp);
2975                 bnx2x_handle_module_detect_int(&bp->link_params);
2976                 bnx2x_release_phy_lock(bp);
2977         }
2978
2979         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2980
2981                 val = REG_RD(bp, reg_offset);
2982                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2983                 REG_WR(bp, reg_offset, val);
2984
2985                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2986                           (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2987                 bnx2x_panic();
2988         }
2989 }
2990
2991 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2992 {
2993         u32 val;
2994
2995         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2996
2997                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2998                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2999                 /* DORQ discard attention */
3000                 if (val & 0x2)
3001                         BNX2X_ERR("FATAL error from DORQ\n");
3002         }
3003
3004         if (attn & HW_INTERRUT_ASSERT_SET_1) {
3005
3006                 int port = BP_PORT(bp);
3007                 int reg_offset;
3008
3009                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3010                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3011
3012                 val = REG_RD(bp, reg_offset);
3013                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3014                 REG_WR(bp, reg_offset, val);
3015
3016                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
3017                           (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
3018                 bnx2x_panic();
3019         }
3020 }
3021
3022 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3023 {
3024         u32 val;
3025
3026         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3027
3028                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3029                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3030                 /* CFC error attention */
3031                 if (val & 0x2)
3032                         BNX2X_ERR("FATAL error from CFC\n");
3033         }
3034
3035         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3036
3037                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3038                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3039                 /* RQ_USDMDP_FIFO_OVERFLOW */
3040                 if (val & 0x18000)
3041                         BNX2X_ERR("FATAL error from PXP\n");
3042         }
3043
3044         if (attn & HW_INTERRUT_ASSERT_SET_2) {
3045
3046                 int port = BP_PORT(bp);
3047                 int reg_offset;
3048
3049                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3050                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3051
3052                 val = REG_RD(bp, reg_offset);
3053                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3054                 REG_WR(bp, reg_offset, val);
3055
3056                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3057                           (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3058                 bnx2x_panic();
3059         }
3060 }
3061
3062 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3063 {
3064         u32 val;
3065
3066         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3067
3068                 if (attn & BNX2X_PMF_LINK_ASSERT) {
3069                         int func = BP_FUNC(bp);
3070
3071                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3072                         bp->mf_config = SHMEM_RD(bp,
3073                                            mf_cfg.func_mf_config[func].config);
3074                         val = SHMEM_RD(bp, func_mb[func].drv_status);
3075                         if (val & DRV_STATUS_DCC_EVENT_MASK)
3076                                 bnx2x_dcc_event(bp,
3077                                             (val & DRV_STATUS_DCC_EVENT_MASK));
3078                         bnx2x__link_status_update(bp);
3079                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3080                                 bnx2x_pmf_update(bp);
3081
3082                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3083
3084                         BNX2X_ERR("MC assert!\n");
3085                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3086                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3087                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3088                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3089                         bnx2x_panic();
3090
3091                 } else if (attn & BNX2X_MCP_ASSERT) {
3092
3093                         BNX2X_ERR("MCP assert!\n");
3094                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3095                         bnx2x_fw_dump(bp);
3096
3097                 } else
3098                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3099         }
3100
3101         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3102                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3103                 if (attn & BNX2X_GRC_TIMEOUT) {
3104                         val = CHIP_IS_E1H(bp) ?
3105                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3106                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
3107                 }
3108                 if (attn & BNX2X_GRC_RSV) {
3109                         val = CHIP_IS_E1H(bp) ?
3110                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3111                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
3112                 }
3113                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3114         }
3115 }
3116
3117 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3118 {
3119         struct attn_route attn;
3120         struct attn_route group_mask;
3121         int port = BP_PORT(bp);
3122         int index;
3123         u32 reg_addr;
3124         u32 val;
3125         u32 aeu_mask;
3126
3127         /* need to take HW lock because MCP or other port might also
3128            try to handle this event */
3129         bnx2x_acquire_alr(bp);
3130
3131         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3132         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3133         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3134         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3135         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3136            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3137
3138         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3139                 if (deasserted & (1 << index)) {
3140                         group_mask = bp->attn_group[index];
3141
3142                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3143                            index, group_mask.sig[0], group_mask.sig[1],
3144                            group_mask.sig[2], group_mask.sig[3]);
3145
3146                         bnx2x_attn_int_deasserted3(bp,
3147                                         attn.sig[3] & group_mask.sig[3]);
3148                         bnx2x_attn_int_deasserted1(bp,
3149                                         attn.sig[1] & group_mask.sig[1]);
3150                         bnx2x_attn_int_deasserted2(bp,
3151                                         attn.sig[2] & group_mask.sig[2]);
3152                         bnx2x_attn_int_deasserted0(bp,
3153                                         attn.sig[0] & group_mask.sig[0]);
3154
3155                         if ((attn.sig[0] & group_mask.sig[0] &
3156                                                 HW_PRTY_ASSERT_SET_0) ||
3157                             (attn.sig[1] & group_mask.sig[1] &
3158                                                 HW_PRTY_ASSERT_SET_1) ||
3159                             (attn.sig[2] & group_mask.sig[2] &
3160                                                 HW_PRTY_ASSERT_SET_2))
3161                                 BNX2X_ERR("FATAL HW block parity attention\n");
3162                 }
3163         }
3164
3165         bnx2x_release_alr(bp);
3166
3167         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3168
3169         val = ~deasserted;
3170         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3171            val, reg_addr);
3172         REG_WR(bp, reg_addr, val);
3173
3174         if (~bp->attn_state & deasserted)
3175                 BNX2X_ERR("IGU ERROR\n");
3176
3177         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3178                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
3179
3180         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3181         aeu_mask = REG_RD(bp, reg_addr);
3182
3183         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
3184            aeu_mask, deasserted);
3185         aeu_mask |= (deasserted & 0xff);
3186         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3187
3188         REG_WR(bp, reg_addr, aeu_mask);
3189         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3190
3191         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3192         bp->attn_state &= ~deasserted;
3193         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3194 }
3195
3196 static void bnx2x_attn_int(struct bnx2x *bp)
3197 {
3198         /* read local copy of bits */
3199         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3200                                                                 attn_bits);
3201         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3202                                                                 attn_bits_ack);
3203         u32 attn_state = bp->attn_state;
3204
3205         /* look for changed bits */
3206         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
3207         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
3208
3209         DP(NETIF_MSG_HW,
3210            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
3211            attn_bits, attn_ack, asserted, deasserted);
3212
3213         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3214                 BNX2X_ERR("BAD attention state\n");
3215
3216         /* handle bits that were raised */
3217         if (asserted)
3218                 bnx2x_attn_int_asserted(bp, asserted);
3219
3220         if (deasserted)
3221                 bnx2x_attn_int_deasserted(bp, deasserted);
3222 }
3223
3224 static void bnx2x_sp_task(struct work_struct *work)
3225 {
3226         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3227         u16 status;
3228
3229
3230         /* Return here if interrupt is disabled */
3231         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3232                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3233                 return;
3234         }
3235
3236         status = bnx2x_update_dsb_idx(bp);
3237 /*      if (status == 0)                                     */
3238 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
3239
3240         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
3241
3242         /* HW attentions */
3243         if (status & 0x1)
3244                 bnx2x_attn_int(bp);
3245
3246         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3247                      IGU_INT_NOP, 1);
3248         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3249                      IGU_INT_NOP, 1);
3250         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3251                      IGU_INT_NOP, 1);
3252         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3253                      IGU_INT_NOP, 1);
3254         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3255                      IGU_INT_ENABLE, 1);
3256
3257 }
3258
3259 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3260 {
3261         struct net_device *dev = dev_instance;
3262         struct bnx2x *bp = netdev_priv(dev);
3263
3264         /* Return here if interrupt is disabled */
3265         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3266                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3267                 return IRQ_HANDLED;
3268         }
3269
3270         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3271
3272 #ifdef BNX2X_STOP_ON_ERROR
3273         if (unlikely(bp->panic))
3274                 return IRQ_HANDLED;
3275 #endif
3276
3277 #ifdef BCM_CNIC
3278         {
3279                 struct cnic_ops *c_ops;
3280
3281                 rcu_read_lock();
3282                 c_ops = rcu_dereference(bp->cnic_ops);
3283                 if (c_ops)
3284                         c_ops->cnic_handler(bp->cnic_data, NULL);
3285                 rcu_read_unlock();
3286         }
3287 #endif
3288         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3289
3290         return IRQ_HANDLED;
3291 }
3292
3293 /* end of slow path */
3294
3295 /* Statistics */
3296
3297 /****************************************************************************
3298 * Macros
3299 ****************************************************************************/
3300
3301 /* sum[hi:lo] += add[hi:lo] */
3302 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3303         do { \
3304                 s_lo += a_lo; \
3305                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3306         } while (0)
3307
3308 /* difference = minuend - subtrahend */
3309 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3310         do { \
3311                 if (m_lo < s_lo) { \
3312                         /* underflow */ \
3313                         d_hi = m_hi - s_hi; \
3314                         if (d_hi > 0) { \
3315                                 /* we can 'loan' 1 */ \
3316                                 d_hi--; \
3317                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3318                         } else { \
3319                                 /* m_hi <= s_hi */ \
3320                                 d_hi = 0; \
3321                                 d_lo = 0; \
3322                         } \
3323                 } else { \
3324                         /* m_lo >= s_lo */ \
3325                         if (m_hi < s_hi) { \
3326                                 d_hi = 0; \
3327                                 d_lo = 0; \
3328                         } else { \
3329                                 /* m_hi >= s_hi */ \
3330                                 d_hi = m_hi - s_hi; \
3331                                 d_lo = m_lo - s_lo; \
3332                         } \
3333                 } \
3334         } while (0)
3335
3336 #define UPDATE_STAT64(s, t) \
3337         do { \
3338                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3339                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3340                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3341                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3342                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3343                        pstats->mac_stx[1].t##_lo, diff.lo); \
3344         } while (0)
3345
3346 #define UPDATE_STAT64_NIG(s, t) \
3347         do { \
3348                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3349                         diff.lo, new->s##_lo, old->s##_lo); \
3350                 ADD_64(estats->t##_hi, diff.hi, \
3351                        estats->t##_lo, diff.lo); \
3352         } while (0)
3353
3354 /* sum[hi:lo] += add */
3355 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3356         do { \
3357                 s_lo += a; \
3358                 s_hi += (s_lo < a) ? 1 : 0; \
3359         } while (0)
3360
3361 #define UPDATE_EXTEND_STAT(s) \
3362         do { \
3363                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3364                               pstats->mac_stx[1].s##_lo, \
3365                               new->s); \
3366         } while (0)
3367
3368 #define UPDATE_EXTEND_TSTAT(s, t) \
3369         do { \
3370                 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3371                 old_tclient->s = tclient->s; \
3372                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3373         } while (0)
3374
3375 #define UPDATE_EXTEND_USTAT(s, t) \
3376         do { \
3377                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3378                 old_uclient->s = uclient->s; \
3379                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3380         } while (0)
3381
3382 #define UPDATE_EXTEND_XSTAT(s, t) \
3383         do { \
3384                 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3385                 old_xclient->s = xclient->s; \
3386                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3387         } while (0)
3388
3389 /* minuend -= subtrahend */
3390 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3391         do { \
3392                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3393         } while (0)
3394
3395 /* minuend[hi:lo] -= subtrahend */
3396 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3397         do { \
3398                 SUB_64(m_hi, 0, m_lo, s); \
3399         } while (0)
3400
3401 #define SUB_EXTEND_USTAT(s, t) \
3402         do { \
3403                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3404                 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3405         } while (0)
3406
3407 /*
3408  * General service functions
3409  */
3410
3411 static inline long bnx2x_hilo(u32 *hiref)
3412 {
3413         u32 lo = *(hiref + 1);
3414 #if (BITS_PER_LONG == 64)
3415         u32 hi = *hiref;
3416
3417         return HILO_U64(hi, lo);
3418 #else
3419         return lo;
3420 #endif
3421 }
3422
3423 /*
3424  * Init service functions
3425  */
3426
3427 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3428 {
3429         if (!bp->stats_pending) {
3430                 struct eth_query_ramrod_data ramrod_data = {0};
3431                 int i, rc;
3432
3433                 ramrod_data.drv_counter = bp->stats_counter++;
3434                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3435                 for_each_queue(bp, i)
3436                         ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3437
3438                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3439                                    ((u32 *)&ramrod_data)[1],
3440                                    ((u32 *)&ramrod_data)[0], 0);
3441                 if (rc == 0) {
3442                         /* stats ramrod has it's own slot on the spq */
3443                         bp->spq_left++;
3444                         bp->stats_pending = 1;
3445                 }
3446         }
3447 }
3448
3449 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3450 {
3451         struct dmae_command *dmae = &bp->stats_dmae;
3452         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3453
3454         *stats_comp = DMAE_COMP_VAL;
3455         if (CHIP_REV_IS_SLOW(bp))
3456                 return;
3457
3458         /* loader */
3459         if (bp->executer_idx) {
3460                 int loader_idx = PMF_DMAE_C(bp);
3461
3462                 memset(dmae, 0, sizeof(struct dmae_command));
3463
3464                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3465                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3466                                 DMAE_CMD_DST_RESET |
3467 #ifdef __BIG_ENDIAN
3468                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3469 #else
3470                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3471 #endif
3472                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3473                                                DMAE_CMD_PORT_0) |
3474                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3475                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3476                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3477                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3478                                      sizeof(struct dmae_command) *
3479                                      (loader_idx + 1)) >> 2;
3480                 dmae->dst_addr_hi = 0;
3481                 dmae->len = sizeof(struct dmae_command) >> 2;
3482                 if (CHIP_IS_E1(bp))
3483                         dmae->len--;
3484                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3485                 dmae->comp_addr_hi = 0;
3486                 dmae->comp_val = 1;
3487
3488                 *stats_comp = 0;
3489                 bnx2x_post_dmae(bp, dmae, loader_idx);
3490
3491         } else if (bp->func_stx) {
3492                 *stats_comp = 0;
3493                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3494         }
3495 }
3496
3497 static int bnx2x_stats_comp(struct bnx2x *bp)
3498 {
3499         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3500         int cnt = 10;
3501
3502         might_sleep();
3503         while (*stats_comp != DMAE_COMP_VAL) {
3504                 if (!cnt) {
3505                         BNX2X_ERR("timeout waiting for stats finished\n");
3506                         break;
3507                 }
3508                 cnt--;
3509                 msleep(1);
3510         }
3511         return 1;
3512 }
3513
3514 /*
3515  * Statistics service functions
3516  */
3517
3518 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3519 {
3520         struct dmae_command *dmae;
3521         u32 opcode;
3522         int loader_idx = PMF_DMAE_C(bp);
3523         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3524
3525         /* sanity */
3526         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3527                 BNX2X_ERR("BUG!\n");
3528                 return;
3529         }
3530
3531         bp->executer_idx = 0;
3532
3533         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3534                   DMAE_CMD_C_ENABLE |
3535                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3536 #ifdef __BIG_ENDIAN
3537                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3538 #else
3539                   DMAE_CMD_ENDIANITY_DW_SWAP |
3540 #endif
3541                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3542                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3543
3544         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3545         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3546         dmae->src_addr_lo = bp->port.port_stx >> 2;
3547         dmae->src_addr_hi = 0;
3548         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3549         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3550         dmae->len = DMAE_LEN32_RD_MAX;
3551         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3552         dmae->comp_addr_hi = 0;
3553         dmae->comp_val = 1;
3554
3555         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3556         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3557         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3558         dmae->src_addr_hi = 0;
3559         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3560                                    DMAE_LEN32_RD_MAX * 4);
3561         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3562                                    DMAE_LEN32_RD_MAX * 4);
3563         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3564         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3565         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3566         dmae->comp_val = DMAE_COMP_VAL;
3567
3568         *stats_comp = 0;
3569         bnx2x_hw_stats_post(bp);
3570         bnx2x_stats_comp(bp);
3571 }
3572
3573 static void bnx2x_port_stats_init(struct bnx2x *bp)
3574 {
3575         struct dmae_command *dmae;
3576         int port = BP_PORT(bp);
3577         int vn = BP_E1HVN(bp);
3578         u32 opcode;
3579         int loader_idx = PMF_DMAE_C(bp);
3580         u32 mac_addr;
3581         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3582
3583         /* sanity */
3584         if (!bp->link_vars.link_up || !bp->port.pmf) {
3585                 BNX2X_ERR("BUG!\n");
3586                 return;
3587         }
3588
3589         bp->executer_idx = 0;
3590
3591         /* MCP */
3592         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3593                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3594                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3595 #ifdef __BIG_ENDIAN
3596                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3597 #else
3598                   DMAE_CMD_ENDIANITY_DW_SWAP |
3599 #endif
3600                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3601                   (vn << DMAE_CMD_E1HVN_SHIFT));
3602
3603         if (bp->port.port_stx) {
3604
3605                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3606                 dmae->opcode = opcode;
3607                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3608                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3609                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3610                 dmae->dst_addr_hi = 0;
3611                 dmae->len = sizeof(struct host_port_stats) >> 2;
3612                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3613                 dmae->comp_addr_hi = 0;
3614                 dmae->comp_val = 1;
3615         }
3616
3617         if (bp->func_stx) {
3618
3619                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3620                 dmae->opcode = opcode;
3621                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3622                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3623                 dmae->dst_addr_lo = bp->func_stx >> 2;
3624                 dmae->dst_addr_hi = 0;
3625                 dmae->len = sizeof(struct host_func_stats) >> 2;
3626                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3627                 dmae->comp_addr_hi = 0;
3628                 dmae->comp_val = 1;
3629         }
3630
3631         /* MAC */
3632         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3633                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3634                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3635 #ifdef __BIG_ENDIAN
3636                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3637 #else
3638                   DMAE_CMD_ENDIANITY_DW_SWAP |
3639 #endif
3640                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3641                   (vn << DMAE_CMD_E1HVN_SHIFT));
3642
3643         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3644
3645                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3646                                    NIG_REG_INGRESS_BMAC0_MEM);
3647
3648                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3649                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3650                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3651                 dmae->opcode = opcode;
3652                 dmae->src_addr_lo = (mac_addr +
3653                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3654                 dmae->src_addr_hi = 0;
3655                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3656                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3657                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3658                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3659                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3660                 dmae->comp_addr_hi = 0;
3661                 dmae->comp_val = 1;
3662
3663                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3664                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3665                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3666                 dmae->opcode = opcode;
3667                 dmae->src_addr_lo = (mac_addr +
3668                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3669                 dmae->src_addr_hi = 0;
3670                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3671                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3672                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3673                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3674                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3675                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3676                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3677                 dmae->comp_addr_hi = 0;
3678                 dmae->comp_val = 1;
3679
3680         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3681
3682                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3683
3684                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3685                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3686                 dmae->opcode = opcode;
3687                 dmae->src_addr_lo = (mac_addr +
3688                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3689                 dmae->src_addr_hi = 0;
3690                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3691                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3692                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3693                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3694                 dmae->comp_addr_hi = 0;
3695                 dmae->comp_val = 1;
3696
3697                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3698                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3699                 dmae->opcode = opcode;
3700                 dmae->src_addr_lo = (mac_addr +
3701                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3702                 dmae->src_addr_hi = 0;
3703                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3704                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3705                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3706                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3707                 dmae->len = 1;
3708                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3709                 dmae->comp_addr_hi = 0;
3710                 dmae->comp_val = 1;
3711
3712                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3713                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3714                 dmae->opcode = opcode;
3715                 dmae->src_addr_lo = (mac_addr +
3716                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3717                 dmae->src_addr_hi = 0;
3718                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3719                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3720                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3721                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3722                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3723                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3724                 dmae->comp_addr_hi = 0;
3725                 dmae->comp_val = 1;
3726         }
3727
3728         /* NIG */
3729         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3730         dmae->opcode = opcode;
3731         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3732                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3733         dmae->src_addr_hi = 0;
3734         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3735         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3736         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3737         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3738         dmae->comp_addr_hi = 0;
3739         dmae->comp_val = 1;
3740
3741         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3742         dmae->opcode = opcode;
3743         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3744                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3745         dmae->src_addr_hi = 0;
3746         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3747                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3748         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3749                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3750         dmae->len = (2*sizeof(u32)) >> 2;
3751         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3752         dmae->comp_addr_hi = 0;
3753         dmae->comp_val = 1;
3754
3755         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3756         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3757                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3758                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3759 #ifdef __BIG_ENDIAN
3760                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3761 #else
3762                         DMAE_CMD_ENDIANITY_DW_SWAP |
3763 #endif
3764                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3765                         (vn << DMAE_CMD_E1HVN_SHIFT));
3766         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3767                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3768         dmae->src_addr_hi = 0;
3769         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3770                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3771         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3772                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3773         dmae->len = (2*sizeof(u32)) >> 2;
3774         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3775         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3776         dmae->comp_val = DMAE_COMP_VAL;
3777
3778         *stats_comp = 0;
3779 }
3780
3781 static void bnx2x_func_stats_init(struct bnx2x *bp)
3782 {
3783         struct dmae_command *dmae = &bp->stats_dmae;
3784         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3785
3786         /* sanity */
3787         if (!bp->func_stx) {
3788                 BNX2X_ERR("BUG!\n");
3789                 return;
3790         }
3791
3792         bp->executer_idx = 0;
3793         memset(dmae, 0, sizeof(struct dmae_command));
3794
3795         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3796                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3797                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3798 #ifdef __BIG_ENDIAN
3799                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3800 #else
3801                         DMAE_CMD_ENDIANITY_DW_SWAP |
3802 #endif
3803                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3804                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3805         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3806         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3807         dmae->dst_addr_lo = bp->func_stx >> 2;
3808         dmae->dst_addr_hi = 0;
3809         dmae->len = sizeof(struct host_func_stats) >> 2;
3810         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3811         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3812         dmae->comp_val = DMAE_COMP_VAL;
3813
3814         *stats_comp = 0;
3815 }
3816
3817 static void bnx2x_stats_start(struct bnx2x *bp)
3818 {
3819         if (bp->port.pmf)
3820                 bnx2x_port_stats_init(bp);
3821
3822         else if (bp->func_stx)
3823                 bnx2x_func_stats_init(bp);
3824
3825         bnx2x_hw_stats_post(bp);
3826         bnx2x_storm_stats_post(bp);
3827 }
3828
3829 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3830 {
3831         bnx2x_stats_comp(bp);
3832         bnx2x_stats_pmf_update(bp);
3833         bnx2x_stats_start(bp);
3834 }
3835
3836 static void bnx2x_stats_restart(struct bnx2x *bp)
3837 {
3838         bnx2x_stats_comp(bp);
3839         bnx2x_stats_start(bp);
3840 }
3841
3842 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3843 {
3844         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3845         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3846         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3847         struct {
3848                 u32 lo;
3849                 u32 hi;
3850         } diff;
3851
3852         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3853         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3854         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3855         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3856         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3857         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3858         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3859         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3860         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3861         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3862         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3863         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3864         UPDATE_STAT64(tx_stat_gt127,
3865                                 tx_stat_etherstatspkts65octetsto127octets);
3866         UPDATE_STAT64(tx_stat_gt255,
3867                                 tx_stat_etherstatspkts128octetsto255octets);
3868         UPDATE_STAT64(tx_stat_gt511,
3869                                 tx_stat_etherstatspkts256octetsto511octets);
3870         UPDATE_STAT64(tx_stat_gt1023,
3871                                 tx_stat_etherstatspkts512octetsto1023octets);
3872         UPDATE_STAT64(tx_stat_gt1518,
3873                                 tx_stat_etherstatspkts1024octetsto1522octets);
3874         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3875         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3876         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3877         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3878         UPDATE_STAT64(tx_stat_gterr,
3879                                 tx_stat_dot3statsinternalmactransmiterrors);
3880         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3881
3882         estats->pause_frames_received_hi =
3883                                 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3884         estats->pause_frames_received_lo =
3885                                 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3886
3887         estats->pause_frames_sent_hi =
3888                                 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3889         estats->pause_frames_sent_lo =
3890                                 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3891 }
3892
3893 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3894 {
3895         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3896         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3897         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3898
3899         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3900         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3901         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3902         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3903         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3904         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3905         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3906         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3907         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3908         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3909         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3910         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3911         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3912         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3913         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3914         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3915         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3916         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3917         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3918         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3919         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3920         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3921         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3922         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3923         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3924         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3925         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3926         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3927         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3928         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3929         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3930
3931         estats->pause_frames_received_hi =
3932                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3933         estats->pause_frames_received_lo =
3934                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3935         ADD_64(estats->pause_frames_received_hi,
3936                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3937                estats->pause_frames_received_lo,
3938                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3939
3940         estats->pause_frames_sent_hi =
3941                         pstats->mac_stx[1].tx_stat_outxonsent_hi;
3942         estats->pause_frames_sent_lo =
3943                         pstats->mac_stx[1].tx_stat_outxonsent_lo;
3944         ADD_64(estats->pause_frames_sent_hi,
3945                pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3946                estats->pause_frames_sent_lo,
3947                pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3948 }
3949
3950 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3951 {
3952         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3953         struct nig_stats *old = &(bp->port.old_nig_stats);
3954         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3955         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3956         struct {
3957                 u32 lo;
3958                 u32 hi;
3959         } diff;
3960         u32 nig_timer_max;
3961
3962         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3963                 bnx2x_bmac_stats_update(bp);
3964
3965         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3966                 bnx2x_emac_stats_update(bp);
3967
3968         else { /* unreached */
3969                 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3970                 return -1;
3971         }
3972
3973         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3974                       new->brb_discard - old->brb_discard);
3975         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3976                       new->brb_truncate - old->brb_truncate);
3977
3978         UPDATE_STAT64_NIG(egress_mac_pkt0,
3979                                         etherstatspkts1024octetsto1522octets);
3980         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3981
3982         memcpy(old, new, sizeof(struct nig_stats));
3983
3984         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3985                sizeof(struct mac_stx));
3986         estats->brb_drop_hi = pstats->brb_drop_hi;
3987         estats->brb_drop_lo = pstats->brb_drop_lo;
3988
3989         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3990
3991         nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3992         if (nig_timer_max != estats->nig_timer_max) {
3993                 estats->nig_timer_max = nig_timer_max;
3994                 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3995         }
3996
3997         return 0;
3998 }
3999
4000 static int bnx2x_storm_stats_update(struct bnx2x *bp)
4001 {
4002         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
4003         struct tstorm_per_port_stats *tport =
4004                                         &stats->tstorm_common.port_statistics;
4005         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
4006         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4007         int i;
4008
4009         memcpy(&(fstats->total_bytes_received_hi),
4010                &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
4011                sizeof(struct host_func_stats) - 2*sizeof(u32));
4012         estats->error_bytes_received_hi = 0;
4013         estats->error_bytes_received_lo = 0;
4014         estats->etherstatsoverrsizepkts_hi = 0;
4015         estats->etherstatsoverrsizepkts_lo = 0;
4016         estats->no_buff_discard_hi = 0;
4017         estats->no_buff_discard_lo = 0;
4018
4019         for_each_rx_queue(bp, i) {
4020                 struct bnx2x_fastpath *fp = &bp->fp[i];
4021                 int cl_id = fp->cl_id;
4022                 struct tstorm_per_client_stats *tclient =
4023                                 &stats->tstorm_common.client_statistics[cl_id];
4024                 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4025                 struct ustorm_per_client_stats *uclient =
4026                                 &stats->ustorm_common.client_statistics[cl_id];
4027                 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4028                 struct xstorm_per_client_stats *xclient =
4029                                 &stats->xstorm_common.client_statistics[cl_id];
4030                 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4031                 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4032                 u32 diff;
4033
4034                 /* are storm stats valid? */
4035                 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4036                                                         bp->stats_counter) {
4037                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4038                            "  xstorm counter (%d) != stats_counter (%d)\n",
4039                            i, xclient->stats_counter, bp->stats_counter);
4040                         return -1;
4041                 }
4042                 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4043                                                         bp->stats_counter) {
4044                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4045                            "  tstorm counter (%d) != stats_counter (%d)\n",
4046                            i, tclient->stats_counter, bp->stats_counter);
4047                         return -2;
4048                 }
4049                 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4050                                                         bp->stats_counter) {
4051                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4052                            "  ustorm counter (%d) != stats_counter (%d)\n",
4053                            i, uclient->stats_counter, bp->stats_counter);
4054                         return -4;
4055                 }
4056
4057                 qstats->total_bytes_received_hi =
4058                         le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4059                 qstats->total_bytes_received_lo =
4060                         le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4061
4062                 ADD_64(qstats->total_bytes_received_hi,
4063                        le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4064                        qstats->total_bytes_received_lo,
4065                        le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4066
4067                 ADD_64(qstats->total_bytes_received_hi,
4068                        le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4069                        qstats->total_bytes_received_lo,
4070                        le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4071
4072                 qstats->valid_bytes_received_hi =
4073                                         qstats->total_bytes_received_hi;
4074                 qstats->valid_bytes_received_lo =
4075                                         qstats->total_bytes_received_lo;
4076
4077                 qstats->error_bytes_received_hi =
4078                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
4079                 qstats->error_bytes_received_lo =
4080                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
4081
4082                 ADD_64(qstats->total_bytes_received_hi,
4083                        qstats->error_bytes_received_hi,
4084                        qstats->total_bytes_received_lo,
4085                        qstats->error_bytes_received_lo);
4086
4087                 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4088                                         total_unicast_packets_received);
4089                 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4090                                         total_multicast_packets_received);
4091                 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4092                                         total_broadcast_packets_received);
4093                 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4094                                         etherstatsoverrsizepkts);
4095                 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4096
4097                 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4098                                         total_unicast_packets_received);
4099                 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4100                                         total_multicast_packets_received);
4101                 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4102                                         total_broadcast_packets_received);
4103                 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4104                 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4105                 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4106
4107                 qstats->total_bytes_transmitted_hi =
4108                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4109                 qstats->total_bytes_transmitted_lo =
4110                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4111
4112                 ADD_64(qstats->total_bytes_transmitted_hi,
4113                        le32_to_cpu(xclient->multicast_bytes_sent.hi),
4114                        qstats->total_bytes_transmitted_lo,
4115                        le32_to_cpu(xclient->multicast_bytes_sent.lo));
4116
4117                 ADD_64(qstats->total_bytes_transmitted_hi,
4118                        le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4119                        qstats->total_bytes_transmitted_lo,
4120                        le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4121
4122                 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4123                                         total_unicast_packets_transmitted);
4124                 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4125                                         total_multicast_packets_transmitted);
4126                 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4127                                         total_broadcast_packets_transmitted);
4128
4129                 old_tclient->checksum_discard = tclient->checksum_discard;
4130                 old_tclient->ttl0_discard = tclient->ttl0_discard;
4131
4132                 ADD_64(fstats->total_bytes_received_hi,
4133                        qstats->total_bytes_received_hi,
4134                        fstats->total_bytes_received_lo,
4135                        qstats->total_bytes_received_lo);
4136                 ADD_64(fstats->total_bytes_transmitted_hi,
4137                        qstats->total_bytes_transmitted_hi,
4138                        fstats->total_bytes_transmitted_lo,
4139                        qstats->total_bytes_transmitted_lo);
4140                 ADD_64(fstats->total_unicast_packets_received_hi,
4141                        qstats->total_unicast_packets_received_hi,
4142                        fstats->total_unicast_packets_received_lo,
4143                        qstats->total_unicast_packets_received_lo);
4144                 ADD_64(fstats->total_multicast_packets_received_hi,
4145                        qstats->total_multicast_packets_received_hi,
4146                        fstats->total_multicast_packets_received_lo,
4147                        qstats->total_multicast_packets_received_lo);
4148                 ADD_64(fstats->total_broadcast_packets_received_hi,
4149                        qstats->total_broadcast_packets_received_hi,
4150                        fstats->total_broadcast_packets_received_lo,
4151                        qstats->total_broadcast_packets_received_lo);
4152                 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4153                        qstats->total_unicast_packets_transmitted_hi,
4154                        fstats->total_unicast_packets_transmitted_lo,
4155                        qstats->total_unicast_packets_transmitted_lo);
4156                 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4157                        qstats->total_multicast_packets_transmitted_hi,
4158                        fstats->total_multicast_packets_transmitted_lo,
4159                        qstats->total_multicast_packets_transmitted_lo);
4160                 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4161                        qstats->total_broadcast_packets_transmitted_hi,
4162                        fstats->total_broadcast_packets_transmitted_lo,
4163                        qstats->total_broadcast_packets_transmitted_lo);
4164                 ADD_64(fstats->valid_bytes_received_hi,
4165                        qstats->valid_bytes_received_hi,
4166                        fstats->valid_bytes_received_lo,
4167                        qstats->valid_bytes_received_lo);
4168
4169                 ADD_64(estats->error_bytes_received_hi,
4170                        qstats->error_bytes_received_hi,
4171                        estats->error_bytes_received_lo,
4172                        qstats->error_bytes_received_lo);
4173                 ADD_64(estats->etherstatsoverrsizepkts_hi,
4174                        qstats->etherstatsoverrsizepkts_hi,
4175                        estats->etherstatsoverrsizepkts_lo,
4176                        qstats->etherstatsoverrsizepkts_lo);
4177                 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4178                        estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4179         }
4180
4181         ADD_64(fstats->total_bytes_received_hi,
4182                estats->rx_stat_ifhcinbadoctets_hi,
4183                fstats->total_bytes_received_lo,
4184                estats->rx_stat_ifhcinbadoctets_lo);
4185
4186         memcpy(estats, &(fstats->total_bytes_received_hi),
4187                sizeof(struct host_func_stats) - 2*sizeof(u32));
4188
4189         ADD_64(estats->etherstatsoverrsizepkts_hi,
4190                estats->rx_stat_dot3statsframestoolong_hi,
4191                estats->etherstatsoverrsizepkts_lo,
4192                estats->rx_stat_dot3statsframestoolong_lo);
4193         ADD_64(estats->error_bytes_received_hi,
4194                estats->rx_stat_ifhcinbadoctets_hi,
4195                estats->error_bytes_received_lo,
4196                estats->rx_stat_ifhcinbadoctets_lo);
4197
4198         if (bp->port.pmf) {
4199                 estats->mac_filter_discard =
4200                                 le32_to_cpu(tport->mac_filter_discard);
4201                 estats->xxoverflow_discard =
4202                                 le32_to_cpu(tport->xxoverflow_discard);
4203                 estats->brb_truncate_discard =
4204                                 le32_to_cpu(tport->brb_truncate_discard);
4205                 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4206         }
4207
4208         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4209
4210         bp->stats_pending = 0;
4211
4212         return 0;
4213 }
4214
4215 static void bnx2x_net_stats_update(struct bnx2x *bp)
4216 {
4217         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4218         struct net_device_stats *nstats = &bp->dev->stats;
4219         int i;
4220
4221         nstats->rx_packets =
4222                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4223                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4224                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4225
4226         nstats->tx_packets =
4227                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4228                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4229                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4230
4231         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4232
4233         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4234
4235         nstats->rx_dropped = estats->mac_discard;
4236         for_each_rx_queue(bp, i)
4237                 nstats->rx_dropped +=
4238                         le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4239
4240         nstats->tx_dropped = 0;
4241
4242         nstats->multicast =
4243                 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4244
4245         nstats->collisions =
4246                 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4247
4248         nstats->rx_length_errors =
4249                 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4250                 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4251         nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4252                                  bnx2x_hilo(&estats->brb_truncate_hi);
4253         nstats->rx_crc_errors =
4254                 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4255         nstats->rx_frame_errors =
4256                 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4257         nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4258         nstats->rx_missed_errors = estats->xxoverflow_discard;
4259
4260         nstats->rx_errors = nstats->rx_length_errors +
4261                             nstats->rx_over_errors +
4262                             nstats->rx_crc_errors +
4263                             nstats->rx_frame_errors +
4264                             nstats->rx_fifo_errors +
4265                             nstats->rx_missed_errors;
4266
4267         nstats->tx_aborted_errors =
4268                 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4269                 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4270         nstats->tx_carrier_errors =
4271                 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4272         nstats->tx_fifo_errors = 0;
4273         nstats->tx_heartbeat_errors = 0;
4274         nstats->tx_window_errors = 0;
4275
4276         nstats->tx_errors = nstats->tx_aborted_errors +
4277                             nstats->tx_carrier_errors +
4278             bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4279 }
4280
4281 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4282 {
4283         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4284         int i;
4285
4286         estats->driver_xoff = 0;
4287         estats->rx_err_discard_pkt = 0;
4288         estats->rx_skb_alloc_failed = 0;
4289         estats->hw_csum_err = 0;
4290         for_each_rx_queue(bp, i) {
4291                 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4292
4293                 estats->driver_xoff += qstats->driver_xoff;
4294                 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4295                 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4296                 estats->hw_csum_err += qstats->hw_csum_err;
4297         }
4298 }
4299
4300 static void bnx2x_stats_update(struct bnx2x *bp)
4301 {
4302         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4303
4304         if (*stats_comp != DMAE_COMP_VAL)
4305                 return;
4306
4307         if (bp->port.pmf)
4308                 bnx2x_hw_stats_update(bp);
4309
4310         if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4311                 BNX2X_ERR("storm stats were not updated for 3 times\n");
4312                 bnx2x_panic();
4313                 return;
4314         }
4315
4316         bnx2x_net_stats_update(bp);
4317         bnx2x_drv_stats_update(bp);
4318
4319         if (bp->msglevel & NETIF_MSG_TIMER) {
4320                 struct bnx2x_fastpath *fp0_rx = bp->fp;
4321                 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
4322                 struct tstorm_per_client_stats *old_tclient =
4323                                                         &bp->fp->old_tclient;
4324                 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4325                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4326                 struct net_device_stats *nstats = &bp->dev->stats;
4327                 int i;
4328
4329                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4330                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
4331                                   "  tx pkt (%lx)\n",
4332                        bnx2x_tx_avail(fp0_tx),
4333                        le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
4334                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
4335                                   "  rx pkt (%lx)\n",
4336                        (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4337                              fp0_rx->rx_comp_cons),
4338                        le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
4339                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u  "
4340                                   "brb truncate %u\n",
4341                        (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4342                        qstats->driver_xoff,
4343                        estats->brb_drop_lo, estats->brb_truncate_lo);
4344                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
4345                         "packets_too_big_discard %lu  no_buff_discard %lu  "
4346                         "mac_discard %u  mac_filter_discard %u  "
4347                         "xxovrflow_discard %u  brb_truncate_discard %u  "
4348                         "ttl0_discard %u\n",
4349                        le32_to_cpu(old_tclient->checksum_discard),
4350                        bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4351                        bnx2x_hilo(&qstats->no_buff_discard_hi),
4352                        estats->mac_discard, estats->mac_filter_discard,
4353                        estats->xxoverflow_discard, estats->brb_truncate_discard,
4354                        le32_to_cpu(old_tclient->ttl0_discard));
4355
4356                 for_each_queue(bp, i) {
4357                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4358                                bnx2x_fp(bp, i, tx_pkt),
4359                                bnx2x_fp(bp, i, rx_pkt),
4360                                bnx2x_fp(bp, i, rx_calls));
4361                 }
4362         }
4363
4364         bnx2x_hw_stats_post(bp);
4365         bnx2x_storm_stats_post(bp);
4366 }
4367
4368 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4369 {
4370         struct dmae_command *dmae;
4371         u32 opcode;
4372         int loader_idx = PMF_DMAE_C(bp);
4373         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4374
4375         bp->executer_idx = 0;
4376
4377         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4378                   DMAE_CMD_C_ENABLE |
4379                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4380 #ifdef __BIG_ENDIAN
4381                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4382 #else
4383                   DMAE_CMD_ENDIANITY_DW_SWAP |
4384 #endif
4385                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4386                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4387
4388         if (bp->port.port_stx) {
4389
4390                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4391                 if (bp->func_stx)
4392                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4393                 else
4394                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4395                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4396                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4397                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4398                 dmae->dst_addr_hi = 0;
4399                 dmae->len = sizeof(struct host_port_stats) >> 2;
4400                 if (bp->func_stx) {
4401                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4402                         dmae->comp_addr_hi = 0;
4403                         dmae->comp_val = 1;
4404                 } else {
4405                         dmae->comp_addr_lo =
4406                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4407                         dmae->comp_addr_hi =
4408                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4409                         dmae->comp_val = DMAE_COMP_VAL;
4410
4411                         *stats_comp = 0;
4412                 }
4413         }
4414
4415         if (bp->func_stx) {
4416
4417                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4418                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4419                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4420                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4421                 dmae->dst_addr_lo = bp->func_stx >> 2;
4422                 dmae->dst_addr_hi = 0;
4423                 dmae->len = sizeof(struct host_func_stats) >> 2;
4424                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4425                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4426                 dmae->comp_val = DMAE_COMP_VAL;
4427
4428                 *stats_comp = 0;
4429         }
4430 }
4431
4432 static void bnx2x_stats_stop(struct bnx2x *bp)
4433 {
4434         int update = 0;
4435
4436         bnx2x_stats_comp(bp);
4437
4438         if (bp->port.pmf)
4439                 update = (bnx2x_hw_stats_update(bp) == 0);
4440
4441         update |= (bnx2x_storm_stats_update(bp) == 0);
4442
4443         if (update) {
4444                 bnx2x_net_stats_update(bp);
4445
4446                 if (bp->port.pmf)
4447                         bnx2x_port_stats_stop(bp);
4448
4449                 bnx2x_hw_stats_post(bp);
4450                 bnx2x_stats_comp(bp);
4451         }
4452 }
4453
4454 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4455 {
4456 }
4457
4458 static const struct {
4459         void (*action)(struct bnx2x *bp);
4460         enum bnx2x_stats_state next_state;
4461 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4462 /* state        event   */
4463 {
4464 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4465 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
4466 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4467 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4468 },
4469 {
4470 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
4471 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
4472 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
4473 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
4474 }
4475 };
4476
4477 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4478 {
4479         enum bnx2x_stats_state state = bp->stats_state;
4480
4481         bnx2x_stats_stm[state][event].action(bp);
4482         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4483
4484         /* Make sure the state has been "changed" */
4485         smp_wmb();
4486
4487         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4488                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4489                    state, event, bp->stats_state);
4490 }
4491
4492 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4493 {
4494         struct dmae_command *dmae;
4495         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4496
4497         /* sanity */
4498         if (!bp->port.pmf || !bp->port.port_stx) {
4499                 BNX2X_ERR("BUG!\n");
4500                 return;
4501         }
4502
4503         bp->executer_idx = 0;
4504
4505         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4506         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4507                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4508                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4509 #ifdef __BIG_ENDIAN
4510                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4511 #else
4512                         DMAE_CMD_ENDIANITY_DW_SWAP |
4513 #endif
4514                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4515                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4516         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4517         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4518         dmae->dst_addr_lo = bp->port.port_stx >> 2;
4519         dmae->dst_addr_hi = 0;
4520         dmae->len = sizeof(struct host_port_stats) >> 2;
4521         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4522         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4523         dmae->comp_val = DMAE_COMP_VAL;
4524
4525         *stats_comp = 0;
4526         bnx2x_hw_stats_post(bp);
4527         bnx2x_stats_comp(bp);
4528 }
4529
4530 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4531 {
4532         int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4533         int port = BP_PORT(bp);
4534         int func;
4535         u32 func_stx;
4536
4537         /* sanity */
4538         if (!bp->port.pmf || !bp->func_stx) {
4539                 BNX2X_ERR("BUG!\n");
4540                 return;
4541         }
4542
4543         /* save our func_stx */
4544         func_stx = bp->func_stx;
4545
4546         for (vn = VN_0; vn < vn_max; vn++) {
4547                 func = 2*vn + port;
4548
4549                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4550                 bnx2x_func_stats_init(bp);
4551                 bnx2x_hw_stats_post(bp);
4552                 bnx2x_stats_comp(bp);
4553         }
4554
4555         /* restore our func_stx */
4556         bp->func_stx = func_stx;
4557 }
4558
4559 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4560 {
4561         struct dmae_command *dmae = &bp->stats_dmae;
4562         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4563
4564         /* sanity */
4565         if (!bp->func_stx) {
4566                 BNX2X_ERR("BUG!\n");
4567                 return;
4568         }
4569
4570         bp->executer_idx = 0;
4571         memset(dmae, 0, sizeof(struct dmae_command));
4572
4573         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4574                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4575                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4576 #ifdef __BIG_ENDIAN
4577                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4578 #else
4579                         DMAE_CMD_ENDIANITY_DW_SWAP |
4580 #endif
4581                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4582                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4583         dmae->src_addr_lo = bp->func_stx >> 2;
4584         dmae->src_addr_hi = 0;
4585         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4586         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4587         dmae->len = sizeof(struct host_func_stats) >> 2;
4588         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4589         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4590         dmae->comp_val = DMAE_COMP_VAL;
4591
4592         *stats_comp = 0;
4593         bnx2x_hw_stats_post(bp);
4594         bnx2x_stats_comp(bp);
4595 }
4596
4597 static void bnx2x_stats_init(struct bnx2x *bp)
4598 {
4599         int port = BP_PORT(bp);
4600         int func = BP_FUNC(bp);
4601         int i;
4602
4603         bp->stats_pending = 0;
4604         bp->executer_idx = 0;
4605         bp->stats_counter = 0;
4606
4607         /* port and func stats for management */
4608         if (!BP_NOMCP(bp)) {
4609                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4610                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4611
4612         } else {
4613                 bp->port.port_stx = 0;
4614                 bp->func_stx = 0;
4615         }
4616         DP(BNX2X_MSG_STATS, "port_stx 0x%x  func_stx 0x%x\n",
4617            bp->port.port_stx, bp->func_stx);
4618
4619         /* port stats */
4620         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4621         bp->port.old_nig_stats.brb_discard =
4622                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4623         bp->port.old_nig_stats.brb_truncate =
4624                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4625         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4626                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4627         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4628                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4629
4630         /* function stats */
4631         for_each_queue(bp, i) {
4632                 struct bnx2x_fastpath *fp = &bp->fp[i];
4633
4634                 memset(&fp->old_tclient, 0,
4635                        sizeof(struct tstorm_per_client_stats));
4636                 memset(&fp->old_uclient, 0,
4637                        sizeof(struct ustorm_per_client_stats));
4638                 memset(&fp->old_xclient, 0,
4639                        sizeof(struct xstorm_per_client_stats));
4640                 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4641         }
4642
4643         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4644         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4645
4646         bp->stats_state = STATS_STATE_DISABLED;
4647
4648         if (bp->port.pmf) {
4649                 if (bp->port.port_stx)
4650                         bnx2x_port_stats_base_init(bp);
4651
4652                 if (bp->func_stx)
4653                         bnx2x_func_stats_base_init(bp);
4654
4655         } else if (bp->func_stx)
4656                 bnx2x_func_stats_base_update(bp);
4657 }
4658
4659 static void bnx2x_timer(unsigned long data)
4660 {
4661         struct bnx2x *bp = (struct bnx2x *) data;
4662
4663         if (!netif_running(bp->dev))
4664                 return;
4665
4666         if (atomic_read(&bp->intr_sem) != 0)
4667                 goto timer_restart;
4668
4669         if (poll) {
4670                 struct bnx2x_fastpath *fp = &bp->fp[0];
4671                 int rc;
4672
4673                 bnx2x_tx_int(fp);
4674                 rc = bnx2x_rx_int(fp, 1000);
4675         }
4676
4677         if (!BP_NOMCP(bp)) {
4678                 int func = BP_FUNC(bp);
4679                 u32 drv_pulse;
4680                 u32 mcp_pulse;
4681
4682                 ++bp->fw_drv_pulse_wr_seq;
4683                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4684                 /* TBD - add SYSTEM_TIME */
4685                 drv_pulse = bp->fw_drv_pulse_wr_seq;
4686                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4687
4688                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4689                              MCP_PULSE_SEQ_MASK);
4690                 /* The delta between driver pulse and mcp response
4691                  * should be 1 (before mcp response) or 0 (after mcp response)
4692                  */
4693                 if ((drv_pulse != mcp_pulse) &&
4694                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4695                         /* someone lost a heartbeat... */
4696                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4697                                   drv_pulse, mcp_pulse);
4698                 }
4699         }
4700
4701         if (bp->state == BNX2X_STATE_OPEN)
4702                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4703
4704 timer_restart:
4705         mod_timer(&bp->timer, jiffies + bp->current_interval);
4706 }
4707
4708 /* end of Statistics */
4709
4710 /* nic init */
4711
4712 /*
4713  * nic init service functions
4714  */
4715
4716 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4717 {
4718         int port = BP_PORT(bp);
4719
4720         /* "CSTORM" */
4721         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4722                         CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4723                         CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4724         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4725                         CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4726                         CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
4727 }
4728
4729 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4730                           dma_addr_t mapping, int sb_id)
4731 {
4732         int port = BP_PORT(bp);
4733         int func = BP_FUNC(bp);
4734         int index;
4735         u64 section;
4736
4737         /* USTORM */
4738         section = ((u64)mapping) + offsetof(struct host_status_block,
4739                                             u_status_block);
4740         sb->u_status_block.status_block_id = sb_id;
4741
4742         REG_WR(bp, BAR_CSTRORM_INTMEM +
4743                CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4744         REG_WR(bp, BAR_CSTRORM_INTMEM +
4745                ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
4746                U64_HI(section));
4747         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4748                 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
4749
4750         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4751                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4752                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
4753
4754         /* CSTORM */
4755         section = ((u64)mapping) + offsetof(struct host_status_block,
4756                                             c_status_block);
4757         sb->c_status_block.status_block_id = sb_id;
4758
4759         REG_WR(bp, BAR_CSTRORM_INTMEM +
4760                CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
4761         REG_WR(bp, BAR_CSTRORM_INTMEM +
4762                ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
4763                U64_HI(section));
4764         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4765                 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
4766
4767         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4768                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4769                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
4770
4771         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4772 }
4773
4774 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4775 {
4776         int func = BP_FUNC(bp);
4777
4778         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
4779                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4780                         sizeof(struct tstorm_def_status_block)/4);
4781         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4782                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4783                         sizeof(struct cstorm_def_status_block_u)/4);
4784         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4785                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4786                         sizeof(struct cstorm_def_status_block_c)/4);
4787         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
4788                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4789                         sizeof(struct xstorm_def_status_block)/4);
4790 }
4791
4792 static void bnx2x_init_def_sb(struct bnx2x *bp,
4793                               struct host_def_status_block *def_sb,
4794                               dma_addr_t mapping, int sb_id)
4795 {
4796         int port = BP_PORT(bp);
4797         int func = BP_FUNC(bp);
4798         int index, val, reg_offset;
4799         u64 section;
4800
4801         /* ATTN */
4802         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4803                                             atten_status_block);
4804         def_sb->atten_status_block.status_block_id = sb_id;
4805
4806         bp->attn_state = 0;
4807
4808         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4809                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4810
4811         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4812                 bp->attn_group[index].sig[0] = REG_RD(bp,
4813                                                      reg_offset + 0x10*index);
4814                 bp->attn_group[index].sig[1] = REG_RD(bp,
4815                                                reg_offset + 0x4 + 0x10*index);
4816                 bp->attn_group[index].sig[2] = REG_RD(bp,
4817                                                reg_offset + 0x8 + 0x10*index);
4818                 bp->attn_group[index].sig[3] = REG_RD(bp,
4819                                                reg_offset + 0xc + 0x10*index);
4820         }
4821
4822         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4823                              HC_REG_ATTN_MSG0_ADDR_L);
4824
4825         REG_WR(bp, reg_offset, U64_LO(section));
4826         REG_WR(bp, reg_offset + 4, U64_HI(section));
4827
4828         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4829
4830         val = REG_RD(bp, reg_offset);
4831         val |= sb_id;
4832         REG_WR(bp, reg_offset, val);
4833
4834         /* USTORM */
4835         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4836                                             u_def_status_block);
4837         def_sb->u_def_status_block.status_block_id = sb_id;
4838
4839         REG_WR(bp, BAR_CSTRORM_INTMEM +
4840                CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4841         REG_WR(bp, BAR_CSTRORM_INTMEM +
4842                ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
4843                U64_HI(section));
4844         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4845                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
4846
4847         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4848                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4849                          CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
4850
4851         /* CSTORM */
4852         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4853                                             c_def_status_block);
4854         def_sb->c_def_status_block.status_block_id = sb_id;
4855
4856         REG_WR(bp, BAR_CSTRORM_INTMEM +
4857                CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
4858         REG_WR(bp, BAR_CSTRORM_INTMEM +
4859                ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
4860                U64_HI(section));
4861         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4862                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
4863
4864         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4865                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4866                          CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
4867
4868         /* TSTORM */
4869         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4870                                             t_def_status_block);
4871         def_sb->t_def_status_block.status_block_id = sb_id;
4872
4873         REG_WR(bp, BAR_TSTRORM_INTMEM +
4874                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4875         REG_WR(bp, BAR_TSTRORM_INTMEM +
4876                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4877                U64_HI(section));
4878         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4879                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4880
4881         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4882                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4883                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4884
4885         /* XSTORM */
4886         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4887                                             x_def_status_block);
4888         def_sb->x_def_status_block.status_block_id = sb_id;
4889
4890         REG_WR(bp, BAR_XSTRORM_INTMEM +
4891                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4892         REG_WR(bp, BAR_XSTRORM_INTMEM +
4893                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4894                U64_HI(section));
4895         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4896                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4897
4898         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4899                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4900                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4901
4902         bp->stats_pending = 0;
4903         bp->set_mac_pending = 0;
4904
4905         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4906 }
4907
4908 static void bnx2x_update_coalesce(struct bnx2x *bp)
4909 {
4910         int port = BP_PORT(bp);
4911         int i;
4912
4913         for_each_queue(bp, i) {
4914                 int sb_id = bp->fp[i].sb_id;
4915
4916                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4917                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4918                         CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4919                                                       U_SB_ETH_RX_CQ_INDEX),
4920                         bp->rx_ticks/12);
4921                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4922                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4923                                                        U_SB_ETH_RX_CQ_INDEX),
4924                          (bp->rx_ticks/12) ? 0 : 1);
4925
4926                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4927                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4928                         CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4929                                                       C_SB_ETH_TX_CQ_INDEX),
4930                         bp->tx_ticks/12);
4931                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4932                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4933                                                        C_SB_ETH_TX_CQ_INDEX),
4934                          (bp->tx_ticks/12) ? 0 : 1);
4935         }
4936 }
4937
4938 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4939                                        struct bnx2x_fastpath *fp, int last)
4940 {
4941         int i;
4942
4943         for (i = 0; i < last; i++) {
4944                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4945                 struct sk_buff *skb = rx_buf->skb;
4946
4947                 if (skb == NULL) {
4948                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4949                         continue;
4950                 }
4951
4952                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4953                         pci_unmap_single(bp->pdev,
4954                                          pci_unmap_addr(rx_buf, mapping),
4955                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4956
4957                 dev_kfree_skb(skb);
4958                 rx_buf->skb = NULL;
4959         }
4960 }
4961
4962 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4963 {
4964         int func = BP_FUNC(bp);
4965         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4966                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4967         u16 ring_prod, cqe_ring_prod;
4968         int i, j;
4969
4970         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4971         DP(NETIF_MSG_IFUP,
4972            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4973
4974         if (bp->flags & TPA_ENABLE_FLAG) {
4975
4976                 for_each_rx_queue(bp, j) {
4977                         struct bnx2x_fastpath *fp = &bp->fp[j];
4978
4979                         for (i = 0; i < max_agg_queues; i++) {
4980                                 fp->tpa_pool[i].skb =
4981                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4982                                 if (!fp->tpa_pool[i].skb) {
4983                                         BNX2X_ERR("Failed to allocate TPA "
4984                                                   "skb pool for queue[%d] - "
4985                                                   "disabling TPA on this "
4986                                                   "queue!\n", j);
4987                                         bnx2x_free_tpa_pool(bp, fp, i);
4988                                         fp->disable_tpa = 1;
4989                                         break;
4990                                 }
4991                                 pci_unmap_addr_set((struct sw_rx_bd *)
4992                                                         &bp->fp->tpa_pool[i],
4993                                                    mapping, 0);
4994                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4995                         }
4996                 }
4997         }
4998
4999         for_each_rx_queue(bp, j) {
5000                 struct bnx2x_fastpath *fp = &bp->fp[j];
5001
5002                 fp->rx_bd_cons = 0;
5003                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5004                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
5005
5006                 /* Mark queue as Rx */
5007                 fp->is_rx_queue = 1;
5008
5009                 /* "next page" elements initialization */
5010                 /* SGE ring */
5011                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5012                         struct eth_rx_sge *sge;
5013
5014                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5015                         sge->addr_hi =
5016                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5017                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5018                         sge->addr_lo =
5019                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5020                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5021                 }
5022
5023                 bnx2x_init_sge_ring_bit_mask(fp);
5024
5025                 /* RX BD ring */
5026                 for (i = 1; i <= NUM_RX_RINGS; i++) {
5027                         struct eth_rx_bd *rx_bd;
5028
5029                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5030                         rx_bd->addr_hi =
5031                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5032                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5033                         rx_bd->addr_lo =
5034                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5035                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5036                 }
5037
5038                 /* CQ ring */
5039                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5040                         struct eth_rx_cqe_next_page *nextpg;
5041
5042                         nextpg = (struct eth_rx_cqe_next_page *)
5043                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5044                         nextpg->addr_hi =
5045                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5046                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5047                         nextpg->addr_lo =
5048                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5049                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5050                 }
5051
5052                 /* Allocate SGEs and initialize the ring elements */
5053                 for (i = 0, ring_prod = 0;
5054                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
5055
5056                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5057                                 BNX2X_ERR("was only able to allocate "
5058                                           "%d rx sges\n", i);
5059                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5060                                 /* Cleanup already allocated elements */
5061                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
5062                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
5063                                 fp->disable_tpa = 1;
5064                                 ring_prod = 0;
5065                                 break;
5066                         }
5067                         ring_prod = NEXT_SGE_IDX(ring_prod);
5068                 }
5069                 fp->rx_sge_prod = ring_prod;
5070
5071                 /* Allocate BDs and initialize BD ring */
5072                 fp->rx_comp_cons = 0;
5073                 cqe_ring_prod = ring_prod = 0;
5074                 for (i = 0; i < bp->rx_ring_size; i++) {
5075                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5076                                 BNX2X_ERR("was only able to allocate "
5077                                           "%d rx skbs on queue[%d]\n", i, j);
5078                                 fp->eth_q_stats.rx_skb_alloc_failed++;
5079                                 break;
5080                         }
5081                         ring_prod = NEXT_RX_IDX(ring_prod);
5082                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5083                         WARN_ON(ring_prod <= i);
5084                 }
5085
5086                 fp->rx_bd_prod = ring_prod;
5087                 /* must not have more available CQEs than BDs */
5088                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5089                                        cqe_ring_prod);
5090                 fp->rx_pkt = fp->rx_calls = 0;
5091
5092                 /* Warning!
5093                  * this will generate an interrupt (to the TSTORM)
5094                  * must only be done after chip is initialized
5095                  */
5096                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5097                                      fp->rx_sge_prod);
5098                 if (j != 0)
5099                         continue;
5100
5101                 REG_WR(bp, BAR_USTRORM_INTMEM +
5102                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5103                        U64_LO(fp->rx_comp_mapping));
5104                 REG_WR(bp, BAR_USTRORM_INTMEM +
5105                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5106                        U64_HI(fp->rx_comp_mapping));
5107         }
5108 }
5109
5110 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5111 {
5112         int i, j;
5113
5114         for_each_tx_queue(bp, j) {
5115                 struct bnx2x_fastpath *fp = &bp->fp[j];
5116
5117                 for (i = 1; i <= NUM_TX_RINGS; i++) {
5118                         struct eth_tx_next_bd *tx_next_bd =
5119                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5120
5121                         tx_next_bd->addr_hi =
5122                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5123                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5124                         tx_next_bd->addr_lo =
5125                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5126                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5127                 }
5128
5129                 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5130                 fp->tx_db.data.zero_fill1 = 0;
5131                 fp->tx_db.data.prod = 0;
5132
5133                 fp->tx_pkt_prod = 0;
5134                 fp->tx_pkt_cons = 0;
5135                 fp->tx_bd_prod = 0;
5136                 fp->tx_bd_cons = 0;
5137                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5138                 fp->tx_pkt = 0;
5139         }
5140
5141         /* clean tx statistics */
5142         for_each_rx_queue(bp, i)
5143                 bnx2x_fp(bp, i, tx_pkt) = 0;
5144 }
5145
5146 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5147 {
5148         int func = BP_FUNC(bp);
5149
5150         spin_lock_init(&bp->spq_lock);
5151
5152         bp->spq_left = MAX_SPQ_PENDING;
5153         bp->spq_prod_idx = 0;
5154         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5155         bp->spq_prod_bd = bp->spq;
5156         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5157
5158         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5159                U64_LO(bp->spq_mapping));
5160         REG_WR(bp,
5161                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5162                U64_HI(bp->spq_mapping));
5163
5164         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5165                bp->spq_prod_idx);
5166 }
5167
5168 static void bnx2x_init_context(struct bnx2x *bp)
5169 {
5170         int i;
5171
5172         for_each_rx_queue(bp, i) {
5173                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5174                 struct bnx2x_fastpath *fp = &bp->fp[i];
5175                 u8 cl_id = fp->cl_id;
5176
5177                 context->ustorm_st_context.common.sb_index_numbers =
5178                                                 BNX2X_RX_SB_INDEX_NUM;
5179                 context->ustorm_st_context.common.clientId = cl_id;
5180                 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5181                 context->ustorm_st_context.common.flags =
5182                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5183                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5184                 context->ustorm_st_context.common.statistics_counter_id =
5185                                                 cl_id;
5186                 context->ustorm_st_context.common.mc_alignment_log_size =
5187                                                 BNX2X_RX_ALIGN_SHIFT;
5188                 context->ustorm_st_context.common.bd_buff_size =
5189                                                 bp->rx_buf_size;
5190                 context->ustorm_st_context.common.bd_page_base_hi =
5191                                                 U64_HI(fp->rx_desc_mapping);
5192                 context->ustorm_st_context.common.bd_page_base_lo =
5193                                                 U64_LO(fp->rx_desc_mapping);
5194                 if (!fp->disable_tpa) {
5195                         context->ustorm_st_context.common.flags |=
5196                                 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5197                         context->ustorm_st_context.common.sge_buff_size =
5198                                 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5199                                          (u32)0xffff);
5200                         context->ustorm_st_context.common.sge_page_base_hi =
5201                                                 U64_HI(fp->rx_sge_mapping);
5202                         context->ustorm_st_context.common.sge_page_base_lo =
5203                                                 U64_LO(fp->rx_sge_mapping);
5204
5205                         context->ustorm_st_context.common.max_sges_for_packet =
5206                                 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5207                         context->ustorm_st_context.common.max_sges_for_packet =
5208                                 ((context->ustorm_st_context.common.
5209                                   max_sges_for_packet + PAGES_PER_SGE - 1) &
5210                                  (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5211                 }
5212
5213                 context->ustorm_ag_context.cdu_usage =
5214                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5215                                                CDU_REGION_NUMBER_UCM_AG,
5216                                                ETH_CONNECTION_TYPE);
5217
5218                 context->xstorm_ag_context.cdu_reserved =
5219                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5220                                                CDU_REGION_NUMBER_XCM_AG,
5221                                                ETH_CONNECTION_TYPE);
5222         }
5223
5224         for_each_tx_queue(bp, i) {
5225                 struct bnx2x_fastpath *fp = &bp->fp[i];
5226                 struct eth_context *context =
5227                         bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
5228
5229                 context->cstorm_st_context.sb_index_number =
5230                                                 C_SB_ETH_TX_CQ_INDEX;
5231                 context->cstorm_st_context.status_block_id = fp->sb_id;
5232
5233                 context->xstorm_st_context.tx_bd_page_base_hi =
5234                                                 U64_HI(fp->tx_desc_mapping);
5235                 context->xstorm_st_context.tx_bd_page_base_lo =
5236                                                 U64_LO(fp->tx_desc_mapping);
5237                 context->xstorm_st_context.statistics_data = (fp->cl_id |
5238                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5239         }
5240 }
5241
5242 static void bnx2x_init_ind_table(struct bnx2x *bp)
5243 {
5244         int func = BP_FUNC(bp);
5245         int i;
5246
5247         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5248                 return;
5249
5250         DP(NETIF_MSG_IFUP,
5251            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
5252         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5253                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5254                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5255                         bp->fp->cl_id + (i % bp->num_rx_queues));
5256 }
5257
5258 static void bnx2x_set_client_config(struct bnx2x *bp)
5259 {
5260         struct tstorm_eth_client_config tstorm_client = {0};
5261         int port = BP_PORT(bp);
5262         int i;
5263
5264         tstorm_client.mtu = bp->dev->mtu;
5265         tstorm_client.config_flags =
5266                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5267                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5268 #ifdef BCM_VLAN
5269         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5270                 tstorm_client.config_flags |=
5271                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5272                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5273         }
5274 #endif
5275
5276         for_each_queue(bp, i) {
5277                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5278
5279                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5280                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5281                        ((u32 *)&tstorm_client)[0]);
5282                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5283                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5284                        ((u32 *)&tstorm_client)[1]);
5285         }
5286
5287         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5288            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5289 }
5290
5291 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5292 {
5293         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5294         int mode = bp->rx_mode;
5295         int mask = bp->rx_mode_cl_mask;
5296         int func = BP_FUNC(bp);
5297         int port = BP_PORT(bp);
5298         int i;
5299         /* All but management unicast packets should pass to the host as well */
5300         u32 llh_mask =
5301                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5302                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5303                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5304                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5305
5306         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
5307
5308         switch (mode) {
5309         case BNX2X_RX_MODE_NONE: /* no Rx */
5310                 tstorm_mac_filter.ucast_drop_all = mask;
5311                 tstorm_mac_filter.mcast_drop_all = mask;
5312                 tstorm_mac_filter.bcast_drop_all = mask;
5313                 break;
5314
5315         case BNX2X_RX_MODE_NORMAL:
5316                 tstorm_mac_filter.bcast_accept_all = mask;
5317                 break;
5318
5319         case BNX2X_RX_MODE_ALLMULTI:
5320                 tstorm_mac_filter.mcast_accept_all = mask;
5321                 tstorm_mac_filter.bcast_accept_all = mask;
5322                 break;
5323
5324         case BNX2X_RX_MODE_PROMISC:
5325                 tstorm_mac_filter.ucast_accept_all = mask;
5326                 tstorm_mac_filter.mcast_accept_all = mask;
5327                 tstorm_mac_filter.bcast_accept_all = mask;
5328                 /* pass management unicast packets as well */
5329                 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5330                 break;
5331
5332         default:
5333                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5334                 break;
5335         }
5336
5337         REG_WR(bp,
5338                (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5339                llh_mask);
5340
5341         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5342                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5343                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5344                        ((u32 *)&tstorm_mac_filter)[i]);
5345
5346 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5347                    ((u32 *)&tstorm_mac_filter)[i]); */
5348         }
5349
5350         if (mode != BNX2X_RX_MODE_NONE)
5351                 bnx2x_set_client_config(bp);
5352 }
5353
5354 static void bnx2x_init_internal_common(struct bnx2x *bp)
5355 {
5356         int i;
5357
5358         /* Zero this manually as its initialization is
5359            currently missing in the initTool */
5360         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5361                 REG_WR(bp, BAR_USTRORM_INTMEM +
5362                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
5363 }
5364
5365 static void bnx2x_init_internal_port(struct bnx2x *bp)
5366 {
5367         int port = BP_PORT(bp);
5368
5369         REG_WR(bp,
5370                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5371         REG_WR(bp,
5372                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5373         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5374         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5375 }
5376
5377 static void bnx2x_init_internal_func(struct bnx2x *bp)
5378 {
5379         struct tstorm_eth_function_common_config tstorm_config = {0};
5380         struct stats_indication_flags stats_flags = {0};
5381         int port = BP_PORT(bp);
5382         int func = BP_FUNC(bp);
5383         int i, j;
5384         u32 offset;
5385         u16 max_agg_size;
5386
5387         if (is_multi(bp)) {
5388                 tstorm_config.config_flags = MULTI_FLAGS(bp);
5389                 tstorm_config.rss_result_mask = MULTI_MASK;
5390         }
5391
5392         /* Enable TPA if needed */
5393         if (bp->flags & TPA_ENABLE_FLAG)
5394                 tstorm_config.config_flags |=
5395                         TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5396
5397         if (IS_E1HMF(bp))
5398                 tstorm_config.config_flags |=
5399                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5400
5401         tstorm_config.leading_client_id = BP_L_ID(bp);
5402
5403         REG_WR(bp, BAR_TSTRORM_INTMEM +
5404                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5405                (*(u32 *)&tstorm_config));
5406
5407         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5408         bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
5409         bnx2x_set_storm_rx_mode(bp);
5410
5411         for_each_queue(bp, i) {
5412                 u8 cl_id = bp->fp[i].cl_id;
5413
5414                 /* reset xstorm per client statistics */
5415                 offset = BAR_XSTRORM_INTMEM +
5416                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5417                 for (j = 0;
5418                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5419                         REG_WR(bp, offset + j*4, 0);
5420
5421                 /* reset tstorm per client statistics */
5422                 offset = BAR_TSTRORM_INTMEM +
5423                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5424                 for (j = 0;
5425                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5426                         REG_WR(bp, offset + j*4, 0);
5427
5428                 /* reset ustorm per client statistics */
5429                 offset = BAR_USTRORM_INTMEM +
5430                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5431                 for (j = 0;
5432                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5433                         REG_WR(bp, offset + j*4, 0);
5434         }
5435
5436         /* Init statistics related context */
5437         stats_flags.collect_eth = 1;
5438
5439         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5440                ((u32 *)&stats_flags)[0]);
5441         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5442                ((u32 *)&stats_flags)[1]);
5443
5444         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5445                ((u32 *)&stats_flags)[0]);
5446         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5447                ((u32 *)&stats_flags)[1]);
5448
5449         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5450                ((u32 *)&stats_flags)[0]);
5451         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5452                ((u32 *)&stats_flags)[1]);
5453
5454         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5455                ((u32 *)&stats_flags)[0]);
5456         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5457                ((u32 *)&stats_flags)[1]);
5458
5459         REG_WR(bp, BAR_XSTRORM_INTMEM +
5460                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5461                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5462         REG_WR(bp, BAR_XSTRORM_INTMEM +
5463                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5464                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5465
5466         REG_WR(bp, BAR_TSTRORM_INTMEM +
5467                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5468                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5469         REG_WR(bp, BAR_TSTRORM_INTMEM +
5470                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5471                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5472
5473         REG_WR(bp, BAR_USTRORM_INTMEM +
5474                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5475                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5476         REG_WR(bp, BAR_USTRORM_INTMEM +
5477                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5478                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5479
5480         if (CHIP_IS_E1H(bp)) {
5481                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5482                         IS_E1HMF(bp));
5483                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5484                         IS_E1HMF(bp));
5485                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5486                         IS_E1HMF(bp));
5487                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5488                         IS_E1HMF(bp));
5489
5490                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5491                          bp->e1hov);
5492         }
5493
5494         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5495         max_agg_size =
5496                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5497                           SGE_PAGE_SIZE * PAGES_PER_SGE),
5498                     (u32)0xffff);
5499         for_each_rx_queue(bp, i) {
5500                 struct bnx2x_fastpath *fp = &bp->fp[i];
5501
5502                 REG_WR(bp, BAR_USTRORM_INTMEM +
5503                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5504                        U64_LO(fp->rx_comp_mapping));
5505                 REG_WR(bp, BAR_USTRORM_INTMEM +
5506                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5507                        U64_HI(fp->rx_comp_mapping));
5508
5509                 /* Next page */
5510                 REG_WR(bp, BAR_USTRORM_INTMEM +
5511                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5512                        U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5513                 REG_WR(bp, BAR_USTRORM_INTMEM +
5514                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5515                        U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5516
5517                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5518                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5519                          max_agg_size);
5520         }
5521
5522         /* dropless flow control */
5523         if (CHIP_IS_E1H(bp)) {
5524                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5525
5526                 rx_pause.bd_thr_low = 250;
5527                 rx_pause.cqe_thr_low = 250;
5528                 rx_pause.cos = 1;
5529                 rx_pause.sge_thr_low = 0;
5530                 rx_pause.bd_thr_high = 350;
5531                 rx_pause.cqe_thr_high = 350;
5532                 rx_pause.sge_thr_high = 0;
5533
5534                 for_each_rx_queue(bp, i) {
5535                         struct bnx2x_fastpath *fp = &bp->fp[i];
5536
5537                         if (!fp->disable_tpa) {
5538                                 rx_pause.sge_thr_low = 150;
5539                                 rx_pause.sge_thr_high = 250;
5540                         }
5541
5542
5543                         offset = BAR_USTRORM_INTMEM +
5544                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5545                                                                    fp->cl_id);
5546                         for (j = 0;
5547                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5548                              j++)
5549                                 REG_WR(bp, offset + j*4,
5550                                        ((u32 *)&rx_pause)[j]);
5551                 }
5552         }
5553
5554         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5555
5556         /* Init rate shaping and fairness contexts */
5557         if (IS_E1HMF(bp)) {
5558                 int vn;
5559
5560                 /* During init there is no active link
5561                    Until link is up, set link rate to 10Gbps */
5562                 bp->link_vars.line_speed = SPEED_10000;
5563                 bnx2x_init_port_minmax(bp);
5564
5565                 if (!BP_NOMCP(bp))
5566                         bp->mf_config =
5567                               SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
5568                 bnx2x_calc_vn_weight_sum(bp);
5569
5570                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5571                         bnx2x_init_vn_minmax(bp, 2*vn + port);
5572
5573                 /* Enable rate shaping and fairness */
5574                 bp->cmng.flags.cmng_enables |=
5575                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5576
5577         } else {
5578                 /* rate shaping and fairness are disabled */
5579                 DP(NETIF_MSG_IFUP,
5580                    "single function mode  minmax will be disabled\n");
5581         }
5582
5583
5584         /* Store it to internal memory */
5585         if (bp->port.pmf)
5586                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5587                         REG_WR(bp, BAR_XSTRORM_INTMEM +
5588                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5589                                ((u32 *)(&bp->cmng))[i]);
5590 }
5591
5592 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5593 {
5594         switch (load_code) {
5595         case FW_MSG_CODE_DRV_LOAD_COMMON:
5596                 bnx2x_init_internal_common(bp);
5597                 /* no break */
5598
5599         case FW_MSG_CODE_DRV_LOAD_PORT:
5600                 bnx2x_init_internal_port(bp);
5601                 /* no break */
5602
5603         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5604                 bnx2x_init_internal_func(bp);
5605                 break;
5606
5607         default:
5608                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5609                 break;
5610         }
5611 }
5612
5613 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5614 {
5615         int i;
5616
5617         for_each_queue(bp, i) {
5618                 struct bnx2x_fastpath *fp = &bp->fp[i];
5619
5620                 fp->bp = bp;
5621                 fp->state = BNX2X_FP_STATE_CLOSED;
5622                 fp->index = i;
5623                 fp->cl_id = BP_L_ID(bp) + i;
5624 #ifdef BCM_CNIC
5625                 fp->sb_id = fp->cl_id + 1;
5626 #else
5627                 fp->sb_id = fp->cl_id;
5628 #endif
5629                 /* Suitable Rx and Tx SBs are served by the same client */
5630                 if (i >= bp->num_rx_queues)
5631                         fp->cl_id -= bp->num_rx_queues;
5632                 DP(NETIF_MSG_IFUP,
5633                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
5634                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5635                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5636                               fp->sb_id);
5637                 bnx2x_update_fpsb_idx(fp);
5638         }
5639
5640         /* ensure status block indices were read */
5641         rmb();
5642
5643
5644         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5645                           DEF_SB_ID);
5646         bnx2x_update_dsb_idx(bp);
5647         bnx2x_update_coalesce(bp);
5648         bnx2x_init_rx_rings(bp);
5649         bnx2x_init_tx_ring(bp);
5650         bnx2x_init_sp_ring(bp);
5651         bnx2x_init_context(bp);
5652         bnx2x_init_internal(bp, load_code);
5653         bnx2x_init_ind_table(bp);
5654         bnx2x_stats_init(bp);
5655
5656         /* At this point, we are ready for interrupts */
5657         atomic_set(&bp->intr_sem, 0);
5658
5659         /* flush all before enabling interrupts */
5660         mb();
5661         mmiowb();
5662
5663         bnx2x_int_enable(bp);
5664
5665         /* Check for SPIO5 */
5666         bnx2x_attn_int_deasserted0(bp,
5667                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5668                                    AEU_INPUTS_ATTN_BITS_SPIO5);
5669 }
5670
5671 /* end of nic init */
5672
5673 /*
5674  * gzip service functions
5675  */
5676
5677 static int bnx2x_gunzip_init(struct bnx2x *bp)
5678 {
5679         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5680                                               &bp->gunzip_mapping);
5681         if (bp->gunzip_buf  == NULL)
5682                 goto gunzip_nomem1;
5683
5684         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5685         if (bp->strm  == NULL)
5686                 goto gunzip_nomem2;
5687
5688         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5689                                       GFP_KERNEL);
5690         if (bp->strm->workspace == NULL)
5691                 goto gunzip_nomem3;
5692
5693         return 0;
5694
5695 gunzip_nomem3:
5696         kfree(bp->strm);
5697         bp->strm = NULL;
5698
5699 gunzip_nomem2:
5700         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5701                             bp->gunzip_mapping);
5702         bp->gunzip_buf = NULL;
5703
5704 gunzip_nomem1:
5705         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5706                " un-compression\n", bp->dev->name);
5707         return -ENOMEM;
5708 }
5709
5710 static void bnx2x_gunzip_end(struct bnx2x *bp)
5711 {
5712         kfree(bp->strm->workspace);
5713
5714         kfree(bp->strm);
5715         bp->strm = NULL;
5716
5717         if (bp->gunzip_buf) {
5718                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5719                                     bp->gunzip_mapping);
5720                 bp->gunzip_buf = NULL;
5721         }
5722 }
5723
5724 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5725 {
5726         int n, rc;
5727
5728         /* check gzip header */
5729         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5730                 BNX2X_ERR("Bad gzip header\n");
5731                 return -EINVAL;
5732         }
5733
5734         n = 10;
5735
5736 #define FNAME                           0x8
5737
5738         if (zbuf[3] & FNAME)
5739                 while ((zbuf[n++] != 0) && (n < len));
5740
5741         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5742         bp->strm->avail_in = len - n;
5743         bp->strm->next_out = bp->gunzip_buf;
5744         bp->strm->avail_out = FW_BUF_SIZE;
5745
5746         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5747         if (rc != Z_OK)
5748                 return rc;
5749
5750         rc = zlib_inflate(bp->strm, Z_FINISH);
5751         if ((rc != Z_OK) && (rc != Z_STREAM_END))
5752                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5753                        bp->dev->name, bp->strm->msg);
5754
5755         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5756         if (bp->gunzip_outlen & 0x3)
5757                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5758                                     " gunzip_outlen (%d) not aligned\n",
5759                        bp->dev->name, bp->gunzip_outlen);
5760         bp->gunzip_outlen >>= 2;
5761
5762         zlib_inflateEnd(bp->strm);
5763
5764         if (rc == Z_STREAM_END)
5765                 return 0;
5766
5767         return rc;
5768 }
5769
5770 /* nic load/unload */
5771
5772 /*
5773  * General service functions
5774  */
5775
5776 /* send a NIG loopback debug packet */
5777 static void bnx2x_lb_pckt(struct bnx2x *bp)
5778 {
5779         u32 wb_write[3];
5780
5781         /* Ethernet source and destination addresses */
5782         wb_write[0] = 0x55555555;
5783         wb_write[1] = 0x55555555;
5784         wb_write[2] = 0x20;             /* SOP */
5785         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5786
5787         /* NON-IP protocol */
5788         wb_write[0] = 0x09000000;
5789         wb_write[1] = 0x55555555;
5790         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
5791         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5792 }
5793
5794 /* some of the internal memories
5795  * are not directly readable from the driver
5796  * to test them we send debug packets
5797  */
5798 static int bnx2x_int_mem_test(struct bnx2x *bp)
5799 {
5800         int factor;
5801         int count, i;
5802         u32 val = 0;
5803
5804         if (CHIP_REV_IS_FPGA(bp))
5805                 factor = 120;
5806         else if (CHIP_REV_IS_EMUL(bp))
5807                 factor = 200;
5808         else
5809                 factor = 1;
5810
5811         DP(NETIF_MSG_HW, "start part1\n");
5812
5813         /* Disable inputs of parser neighbor blocks */
5814         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5815         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5816         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5817         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5818
5819         /*  Write 0 to parser credits for CFC search request */
5820         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5821
5822         /* send Ethernet packet */
5823         bnx2x_lb_pckt(bp);
5824
5825         /* TODO do i reset NIG statistic? */
5826         /* Wait until NIG register shows 1 packet of size 0x10 */
5827         count = 1000 * factor;
5828         while (count) {
5829
5830                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5831                 val = *bnx2x_sp(bp, wb_data[0]);
5832                 if (val == 0x10)
5833                         break;
5834
5835                 msleep(10);
5836                 count--;
5837         }
5838         if (val != 0x10) {
5839                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5840                 return -1;
5841         }
5842
5843         /* Wait until PRS register shows 1 packet */
5844         count = 1000 * factor;
5845         while (count) {
5846                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5847                 if (val == 1)
5848                         break;
5849
5850                 msleep(10);
5851                 count--;
5852         }
5853         if (val != 0x1) {
5854                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5855                 return -2;
5856         }
5857
5858         /* Reset and init BRB, PRS */
5859         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5860         msleep(50);
5861         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5862         msleep(50);
5863         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5864         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5865
5866         DP(NETIF_MSG_HW, "part2\n");
5867
5868         /* Disable inputs of parser neighbor blocks */
5869         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5870         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5871         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5872         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5873
5874         /* Write 0 to parser credits for CFC search request */
5875         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5876
5877         /* send 10 Ethernet packets */
5878         for (i = 0; i < 10; i++)
5879                 bnx2x_lb_pckt(bp);
5880
5881         /* Wait until NIG register shows 10 + 1
5882            packets of size 11*0x10 = 0xb0 */
5883         count = 1000 * factor;
5884         while (count) {
5885
5886                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5887                 val = *bnx2x_sp(bp, wb_data[0]);
5888                 if (val == 0xb0)
5889                         break;
5890
5891                 msleep(10);
5892                 count--;
5893         }
5894         if (val != 0xb0) {
5895                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5896                 return -3;
5897         }
5898
5899         /* Wait until PRS register shows 2 packets */
5900         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5901         if (val != 2)
5902                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5903
5904         /* Write 1 to parser credits for CFC search request */
5905         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5906
5907         /* Wait until PRS register shows 3 packets */
5908         msleep(10 * factor);
5909         /* Wait until NIG register shows 1 packet of size 0x10 */
5910         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5911         if (val != 3)
5912                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5913
5914         /* clear NIG EOP FIFO */
5915         for (i = 0; i < 11; i++)
5916                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5917         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5918         if (val != 1) {
5919                 BNX2X_ERR("clear of NIG failed\n");
5920                 return -4;
5921         }
5922
5923         /* Reset and init BRB, PRS, NIG */
5924         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5925         msleep(50);
5926         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5927         msleep(50);
5928         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5929         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5930 #ifndef BCM_CNIC
5931         /* set NIC mode */
5932         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5933 #endif
5934
5935         /* Enable inputs of parser neighbor blocks */
5936         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5937         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5938         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5939         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5940
5941         DP(NETIF_MSG_HW, "done\n");
5942
5943         return 0; /* OK */
5944 }
5945
5946 static void enable_blocks_attention(struct bnx2x *bp)
5947 {
5948         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5949         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5950         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5951         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5952         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5953         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5954         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5955         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5956         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5957 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5958 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5959         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5960         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5961         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5962 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5963 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5964         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5965         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5966         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5967         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5968 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5969 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5970         if (CHIP_REV_IS_FPGA(bp))
5971                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5972         else
5973                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5974         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5975         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5976         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5977 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5978 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5979         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5980         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5981 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5982         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5983 }
5984
5985
5986 static void bnx2x_reset_common(struct bnx2x *bp)
5987 {
5988         /* reset_common */
5989         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5990                0xd3ffff7f);
5991         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5992 }
5993
5994 static void bnx2x_init_pxp(struct bnx2x *bp)
5995 {
5996         u16 devctl;
5997         int r_order, w_order;
5998
5999         pci_read_config_word(bp->pdev,
6000                              bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
6001         DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6002         w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6003         if (bp->mrrs == -1)
6004                 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6005         else {
6006                 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6007                 r_order = bp->mrrs;
6008         }
6009
6010         bnx2x_init_pxp_arb(bp, r_order, w_order);
6011 }
6012
6013 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6014 {
6015         u32 val;
6016         u8 port;
6017         u8 is_required = 0;
6018
6019         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6020               SHARED_HW_CFG_FAN_FAILURE_MASK;
6021
6022         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6023                 is_required = 1;
6024
6025         /*
6026          * The fan failure mechanism is usually related to the PHY type since
6027          * the power consumption of the board is affected by the PHY. Currently,
6028          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6029          */
6030         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6031                 for (port = PORT_0; port < PORT_MAX; port++) {
6032                         u32 phy_type =
6033                                 SHMEM_RD(bp, dev_info.port_hw_config[port].
6034                                          external_phy_config) &
6035                                 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6036                         is_required |=
6037                                 ((phy_type ==
6038                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
6039                                  (phy_type ==
6040                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
6041                                  (phy_type ==
6042                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6043                 }
6044
6045         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6046
6047         if (is_required == 0)
6048                 return;
6049
6050         /* Fan failure is indicated by SPIO 5 */
6051         bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6052                        MISC_REGISTERS_SPIO_INPUT_HI_Z);
6053
6054         /* set to active low mode */
6055         val = REG_RD(bp, MISC_REG_SPIO_INT);
6056         val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6057                                 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6058         REG_WR(bp, MISC_REG_SPIO_INT, val);
6059
6060         /* enable interrupt to signal the IGU */
6061         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6062         val |= (1 << MISC_REGISTERS_SPIO_5);
6063         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6064 }
6065
6066 static int bnx2x_init_common(struct bnx2x *bp)
6067 {
6068         u32 val, i;
6069 #ifdef BCM_CNIC
6070         u32 wb_write[2];
6071 #endif
6072
6073         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
6074
6075         bnx2x_reset_common(bp);
6076         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6077         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
6078
6079         bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
6080         if (CHIP_IS_E1H(bp))
6081                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
6082
6083         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6084         msleep(30);
6085         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6086
6087         bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
6088         if (CHIP_IS_E1(bp)) {
6089                 /* enable HW interrupt from PXP on USDM overflow
6090                    bit 16 on INT_MASK_0 */
6091                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6092         }
6093
6094         bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
6095         bnx2x_init_pxp(bp);
6096
6097 #ifdef __BIG_ENDIAN
6098         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6099         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6100         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6101         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6102         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6103         /* make sure this value is 0 */
6104         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6105
6106 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6107         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6108         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6109         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6110         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6111 #endif
6112
6113         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
6114 #ifdef BCM_CNIC
6115         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6116         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6117         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6118 #endif
6119
6120         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6121                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6122
6123         /* let the HW do it's magic ... */
6124         msleep(100);
6125         /* finish PXP init */
6126         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6127         if (val != 1) {
6128                 BNX2X_ERR("PXP2 CFG failed\n");
6129                 return -EBUSY;
6130         }
6131         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6132         if (val != 1) {
6133                 BNX2X_ERR("PXP2 RD_INIT failed\n");
6134                 return -EBUSY;
6135         }
6136
6137         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6138         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6139
6140         bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
6141
6142         /* clean the DMAE memory */
6143         bp->dmae_ready = 1;
6144         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6145
6146         bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6147         bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6148         bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6149         bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
6150
6151         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6152         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6153         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6154         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6155
6156         bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
6157
6158 #ifdef BCM_CNIC
6159         wb_write[0] = 0;
6160         wb_write[1] = 0;
6161         for (i = 0; i < 64; i++) {
6162                 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6163                 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6164
6165                 if (CHIP_IS_E1H(bp)) {
6166                         REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6167                         bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6168                                           wb_write, 2);
6169                 }
6170         }
6171 #endif
6172         /* soft reset pulse */
6173         REG_WR(bp, QM_REG_SOFT_RESET, 1);
6174         REG_WR(bp, QM_REG_SOFT_RESET, 0);
6175
6176 #ifdef BCM_CNIC
6177         bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
6178 #endif
6179
6180         bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
6181         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6182         if (!CHIP_REV_IS_SLOW(bp)) {
6183                 /* enable hw interrupt from doorbell Q */
6184                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6185         }
6186
6187         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6188         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6189         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6190 #ifndef BCM_CNIC
6191         /* set NIC mode */
6192         REG_WR(bp, PRS_REG_NIC_MODE, 1);
6193 #endif
6194         if (CHIP_IS_E1H(bp))
6195                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
6196
6197         bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6198         bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6199         bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6200         bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
6201
6202         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6203         bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6204         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6205         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6206
6207         bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6208         bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6209         bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6210         bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
6211
6212         /* sync semi rtc */
6213         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6214                0x80000000);
6215         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6216                0x80000000);
6217
6218         bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6219         bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6220         bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
6221
6222         REG_WR(bp, SRC_REG_SOFT_RST, 1);
6223         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6224                 REG_WR(bp, i, 0xc0cac01a);
6225                 /* TODO: replace with something meaningful */
6226         }
6227         bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6228 #ifdef BCM_CNIC
6229         REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6230         REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6231         REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6232         REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6233         REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6234         REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6235         REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6236         REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6237         REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6238         REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6239 #endif
6240         REG_WR(bp, SRC_REG_SOFT_RST, 0);
6241
6242         if (sizeof(union cdu_context) != 1024)
6243                 /* we currently assume that a context is 1024 bytes */
6244                 printk(KERN_ALERT PFX "please adjust the size of"
6245                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
6246
6247         bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
6248         val = (4 << 24) + (0 << 12) + 1024;
6249         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6250
6251         bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
6252         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6253         /* enable context validation interrupt from CFC */
6254         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6255
6256         /* set the thresholds to prevent CFC/CDU race */
6257         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6258
6259         bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6260         bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
6261
6262         bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
6263         /* Reset PCIE errors for debug */
6264         REG_WR(bp, 0x2814, 0xffffffff);
6265         REG_WR(bp, 0x3820, 0xffffffff);
6266
6267         bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
6268         bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
6269         bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
6270         bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
6271
6272         bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
6273         if (CHIP_IS_E1H(bp)) {
6274                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6275                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6276         }
6277
6278         if (CHIP_REV_IS_SLOW(bp))
6279                 msleep(200);
6280
6281         /* finish CFC init */
6282         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6283         if (val != 1) {
6284                 BNX2X_ERR("CFC LL_INIT failed\n");
6285                 return -EBUSY;
6286         }
6287         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6288         if (val != 1) {
6289                 BNX2X_ERR("CFC AC_INIT failed\n");
6290                 return -EBUSY;
6291         }
6292         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6293         if (val != 1) {
6294                 BNX2X_ERR("CFC CAM_INIT failed\n");
6295                 return -EBUSY;
6296         }
6297         REG_WR(bp, CFC_REG_DEBUG0, 0);
6298
6299         /* read NIG statistic
6300            to see if this is our first up since powerup */
6301         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6302         val = *bnx2x_sp(bp, wb_data[0]);
6303
6304         /* do internal memory self test */
6305         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6306                 BNX2X_ERR("internal mem self test failed\n");
6307                 return -EBUSY;
6308         }
6309
6310         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6311         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6312         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6313         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6314         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6315                 bp->port.need_hw_lock = 1;
6316                 break;
6317
6318         default:
6319                 break;
6320         }
6321
6322         bnx2x_setup_fan_failure_detection(bp);
6323
6324         /* clear PXP2 attentions */
6325         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6326
6327         enable_blocks_attention(bp);
6328
6329         if (!BP_NOMCP(bp)) {
6330                 bnx2x_acquire_phy_lock(bp);
6331                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6332                 bnx2x_release_phy_lock(bp);
6333         } else
6334                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6335
6336         return 0;
6337 }
6338
6339 static int bnx2x_init_port(struct bnx2x *bp)
6340 {
6341         int port = BP_PORT(bp);
6342         int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
6343         u32 low, high;
6344         u32 val;
6345
6346         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
6347
6348         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6349
6350         bnx2x_init_block(bp, PXP_BLOCK, init_stage);
6351         bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
6352
6353         bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6354         bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6355         bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6356         bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6357
6358 #ifdef BCM_CNIC
6359         REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
6360
6361         bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6362         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6363         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
6364 #endif
6365         bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6366
6367         bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
6368         if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6369                 /* no pause for emulation and FPGA */
6370                 low = 0;
6371                 high = 513;
6372         } else {
6373                 if (IS_E1HMF(bp))
6374                         low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6375                 else if (bp->dev->mtu > 4096) {
6376                         if (bp->flags & ONE_PORT_FLAG)
6377                                 low = 160;
6378                         else {
6379                                 val = bp->dev->mtu;
6380                                 /* (24*1024 + val*4)/256 */
6381                                 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6382                         }
6383                 } else
6384                         low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6385                 high = low + 56;        /* 14*1024/256 */
6386         }
6387         REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6388         REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6389
6390
6391         bnx2x_init_block(bp, PRS_BLOCK, init_stage);
6392
6393         bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
6394         bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
6395         bnx2x_init_block(bp, USDM_BLOCK, init_stage);
6396         bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
6397
6398         bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6399         bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6400         bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6401         bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
6402
6403         bnx2x_init_block(bp, UPB_BLOCK, init_stage);
6404         bnx2x_init_block(bp, XPB_BLOCK, init_stage);
6405
6406         bnx2x_init_block(bp, PBF_BLOCK, init_stage);
6407
6408         /* configure PBF to work without PAUSE mtu 9000 */
6409         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6410
6411         /* update threshold */
6412         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6413         /* update init credit */
6414         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6415
6416         /* probe changes */
6417         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6418         msleep(5);
6419         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6420
6421 #ifdef BCM_CNIC
6422         bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
6423 #endif
6424         bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6425         bnx2x_init_block(bp, CFC_BLOCK, init_stage);
6426
6427         if (CHIP_IS_E1(bp)) {
6428                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6429                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6430         }
6431         bnx2x_init_block(bp, HC_BLOCK, init_stage);
6432
6433         bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6434         /* init aeu_mask_attn_func_0/1:
6435          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6436          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6437          *             bits 4-7 are used for "per vn group attention" */
6438         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6439                (IS_E1HMF(bp) ? 0xF7 : 0x7));
6440
6441         bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6442         bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6443         bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6444         bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6445         bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6446
6447         bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6448
6449         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6450
6451         if (CHIP_IS_E1H(bp)) {
6452                 /* 0x2 disable e1hov, 0x1 enable */
6453                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6454                        (IS_E1HMF(bp) ? 0x1 : 0x2));
6455
6456                 {
6457                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6458                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6459                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6460                 }
6461         }
6462
6463         bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6464         bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6465
6466         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6467         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6468                 {
6469                 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6470
6471                 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6472                                MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6473
6474                 /* The GPIO should be swapped if the swap register is
6475                    set and active */
6476                 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6477                 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6478
6479                 /* Select function upon port-swap configuration */
6480                 if (port == 0) {
6481                         offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6482                         aeu_gpio_mask = (swap_val && swap_override) ?
6483                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6484                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6485                 } else {
6486                         offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6487                         aeu_gpio_mask = (swap_val && swap_override) ?
6488                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6489                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6490                 }
6491                 val = REG_RD(bp, offset);
6492                 /* add GPIO3 to group */
6493                 val |= aeu_gpio_mask;
6494                 REG_WR(bp, offset, val);
6495                 }
6496                 break;
6497
6498         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6499         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6500                 /* add SPIO 5 to group 0 */
6501                 {
6502                 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6503                                        MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6504                 val = REG_RD(bp, reg_addr);
6505                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6506                 REG_WR(bp, reg_addr, val);
6507                 }
6508                 break;
6509
6510         default:
6511                 break;
6512         }
6513
6514         bnx2x__link_reset(bp);
6515
6516         return 0;
6517 }
6518
6519 #define ILT_PER_FUNC            (768/2)
6520 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
6521 /* the phys address is shifted right 12 bits and has an added
6522    1=valid bit added to the 53rd bit
6523    then since this is a wide register(TM)
6524    we split it into two 32 bit writes
6525  */
6526 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6527 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
6528 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
6529 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
6530
6531 #ifdef BCM_CNIC
6532 #define CNIC_ILT_LINES          127
6533 #define CNIC_CTX_PER_ILT        16
6534 #else
6535 #define CNIC_ILT_LINES          0
6536 #endif
6537
6538 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6539 {
6540         int reg;
6541
6542         if (CHIP_IS_E1H(bp))
6543                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6544         else /* E1 */
6545                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6546
6547         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6548 }
6549
6550 static int bnx2x_init_func(struct bnx2x *bp)
6551 {
6552         int port = BP_PORT(bp);
6553         int func = BP_FUNC(bp);
6554         u32 addr, val;
6555         int i;
6556
6557         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
6558
6559         /* set MSI reconfigure capability */
6560         addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6561         val = REG_RD(bp, addr);
6562         val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6563         REG_WR(bp, addr, val);
6564
6565         i = FUNC_ILT_BASE(func);
6566
6567         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6568         if (CHIP_IS_E1H(bp)) {
6569                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6570                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6571         } else /* E1 */
6572                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6573                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6574
6575 #ifdef BCM_CNIC
6576         i += 1 + CNIC_ILT_LINES;
6577         bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6578         if (CHIP_IS_E1(bp))
6579                 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6580         else {
6581                 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6582                 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6583         }
6584
6585         i++;
6586         bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6587         if (CHIP_IS_E1(bp))
6588                 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6589         else {
6590                 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
6591                 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
6592         }
6593
6594         i++;
6595         bnx2x_ilt_wr(bp, i, bp->t1_mapping);
6596         if (CHIP_IS_E1(bp))
6597                 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6598         else {
6599                 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
6600                 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
6601         }
6602
6603         /* tell the searcher where the T2 table is */
6604         REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
6605
6606         bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
6607                     U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
6608
6609         bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
6610                     U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
6611                     U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
6612
6613         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
6614 #endif
6615
6616         if (CHIP_IS_E1H(bp)) {
6617                 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6618                 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6619                 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6620                 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6621                 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6622                 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6623                 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6624                 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6625                 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
6626
6627                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6628                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6629         }
6630
6631         /* HC init per function */
6632         if (CHIP_IS_E1H(bp)) {
6633                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6634
6635                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6636                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6637         }
6638         bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
6639
6640         /* Reset PCIE errors for debug */
6641         REG_WR(bp, 0x2114, 0xffffffff);
6642         REG_WR(bp, 0x2120, 0xffffffff);
6643
6644         return 0;
6645 }
6646
6647 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6648 {
6649         int i, rc = 0;
6650
6651         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
6652            BP_FUNC(bp), load_code);
6653
6654         bp->dmae_ready = 0;
6655         mutex_init(&bp->dmae_mutex);
6656         rc = bnx2x_gunzip_init(bp);
6657         if (rc)
6658                 return rc;
6659
6660         switch (load_code) {
6661         case FW_MSG_CODE_DRV_LOAD_COMMON:
6662                 rc = bnx2x_init_common(bp);
6663                 if (rc)
6664                         goto init_hw_err;
6665                 /* no break */
6666
6667         case FW_MSG_CODE_DRV_LOAD_PORT:
6668                 bp->dmae_ready = 1;
6669                 rc = bnx2x_init_port(bp);
6670                 if (rc)
6671                         goto init_hw_err;
6672                 /* no break */
6673
6674         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6675                 bp->dmae_ready = 1;
6676                 rc = bnx2x_init_func(bp);
6677                 if (rc)
6678                         goto init_hw_err;
6679                 break;
6680
6681         default:
6682                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6683                 break;
6684         }
6685
6686         if (!BP_NOMCP(bp)) {
6687                 int func = BP_FUNC(bp);
6688
6689                 bp->fw_drv_pulse_wr_seq =
6690                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6691                                  DRV_PULSE_SEQ_MASK);
6692                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6693         }
6694
6695         /* this needs to be done before gunzip end */
6696         bnx2x_zero_def_sb(bp);
6697         for_each_queue(bp, i)
6698                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6699 #ifdef BCM_CNIC
6700         bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6701 #endif
6702
6703 init_hw_err:
6704         bnx2x_gunzip_end(bp);
6705
6706         return rc;
6707 }
6708
6709 static void bnx2x_free_mem(struct bnx2x *bp)
6710 {
6711
6712 #define BNX2X_PCI_FREE(x, y, size) \
6713         do { \
6714                 if (x) { \
6715                         pci_free_consistent(bp->pdev, size, x, y); \
6716                         x = NULL; \
6717                         y = 0; \
6718                 } \
6719         } while (0)
6720
6721 #define BNX2X_FREE(x) \
6722         do { \
6723                 if (x) { \
6724                         vfree(x); \
6725                         x = NULL; \
6726                 } \
6727         } while (0)
6728
6729         int i;
6730
6731         /* fastpath */
6732         /* Common */
6733         for_each_queue(bp, i) {
6734
6735                 /* status blocks */
6736                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6737                                bnx2x_fp(bp, i, status_blk_mapping),
6738                                sizeof(struct host_status_block));
6739         }
6740         /* Rx */
6741         for_each_rx_queue(bp, i) {
6742
6743                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6744                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6745                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6746                                bnx2x_fp(bp, i, rx_desc_mapping),
6747                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
6748
6749                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6750                                bnx2x_fp(bp, i, rx_comp_mapping),
6751                                sizeof(struct eth_fast_path_rx_cqe) *
6752                                NUM_RCQ_BD);
6753
6754                 /* SGE ring */
6755                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6756                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6757                                bnx2x_fp(bp, i, rx_sge_mapping),
6758                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6759         }
6760         /* Tx */
6761         for_each_tx_queue(bp, i) {
6762
6763                 /* fastpath tx rings: tx_buf tx_desc */
6764                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6765                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6766                                bnx2x_fp(bp, i, tx_desc_mapping),
6767                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6768         }
6769         /* end of fastpath */
6770
6771         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6772                        sizeof(struct host_def_status_block));
6773
6774         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6775                        sizeof(struct bnx2x_slowpath));
6776
6777 #ifdef BCM_CNIC
6778         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6779         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6780         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6781         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6782         BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
6783                        sizeof(struct host_status_block));
6784 #endif
6785         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6786
6787 #undef BNX2X_PCI_FREE
6788 #undef BNX2X_KFREE
6789 }
6790
6791 static int bnx2x_alloc_mem(struct bnx2x *bp)
6792 {
6793
6794 #define BNX2X_PCI_ALLOC(x, y, size) \
6795         do { \
6796                 x = pci_alloc_consistent(bp->pdev, size, y); \
6797                 if (x == NULL) \
6798                         goto alloc_mem_err; \
6799                 memset(x, 0, size); \
6800         } while (0)
6801
6802 #define BNX2X_ALLOC(x, size) \
6803         do { \
6804                 x = vmalloc(size); \
6805                 if (x == NULL) \
6806                         goto alloc_mem_err; \
6807                 memset(x, 0, size); \
6808         } while (0)
6809
6810         int i;
6811
6812         /* fastpath */
6813         /* Common */
6814         for_each_queue(bp, i) {
6815                 bnx2x_fp(bp, i, bp) = bp;
6816
6817                 /* status blocks */
6818                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6819                                 &bnx2x_fp(bp, i, status_blk_mapping),
6820                                 sizeof(struct host_status_block));
6821         }
6822         /* Rx */
6823         for_each_rx_queue(bp, i) {
6824
6825                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6826                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6827                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6828                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6829                                 &bnx2x_fp(bp, i, rx_desc_mapping),
6830                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6831
6832                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6833                                 &bnx2x_fp(bp, i, rx_comp_mapping),
6834                                 sizeof(struct eth_fast_path_rx_cqe) *
6835                                 NUM_RCQ_BD);
6836
6837                 /* SGE ring */
6838                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6839                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6840                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6841                                 &bnx2x_fp(bp, i, rx_sge_mapping),
6842                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6843         }
6844         /* Tx */
6845         for_each_tx_queue(bp, i) {
6846
6847                 /* fastpath tx rings: tx_buf tx_desc */
6848                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6849                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6850                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6851                                 &bnx2x_fp(bp, i, tx_desc_mapping),
6852                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6853         }
6854         /* end of fastpath */
6855
6856         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6857                         sizeof(struct host_def_status_block));
6858
6859         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6860                         sizeof(struct bnx2x_slowpath));
6861
6862 #ifdef BCM_CNIC
6863         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6864
6865         /* allocate searcher T2 table
6866            we allocate 1/4 of alloc num for T2
6867           (which is not entered into the ILT) */
6868         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6869
6870         /* Initialize T2 (for 1024 connections) */
6871         for (i = 0; i < 16*1024; i += 64)
6872                 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6873
6874         /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
6875         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6876
6877         /* QM queues (128*MAX_CONN) */
6878         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6879
6880         BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
6881                         sizeof(struct host_status_block));
6882 #endif
6883
6884         /* Slow path ring */
6885         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6886
6887         return 0;
6888
6889 alloc_mem_err:
6890         bnx2x_free_mem(bp);
6891         return -ENOMEM;
6892
6893 #undef BNX2X_PCI_ALLOC
6894 #undef BNX2X_ALLOC
6895 }
6896
6897 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6898 {
6899         int i;
6900
6901         for_each_tx_queue(bp, i) {
6902                 struct bnx2x_fastpath *fp = &bp->fp[i];
6903
6904                 u16 bd_cons = fp->tx_bd_cons;
6905                 u16 sw_prod = fp->tx_pkt_prod;
6906                 u16 sw_cons = fp->tx_pkt_cons;
6907
6908                 while (sw_cons != sw_prod) {
6909                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6910                         sw_cons++;
6911                 }
6912         }
6913 }
6914
6915 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6916 {
6917         int i, j;
6918
6919         for_each_rx_queue(bp, j) {
6920                 struct bnx2x_fastpath *fp = &bp->fp[j];
6921
6922                 for (i = 0; i < NUM_RX_BD; i++) {
6923                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6924                         struct sk_buff *skb = rx_buf->skb;
6925
6926                         if (skb == NULL)
6927                                 continue;
6928
6929                         pci_unmap_single(bp->pdev,
6930                                          pci_unmap_addr(rx_buf, mapping),
6931                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6932
6933                         rx_buf->skb = NULL;
6934                         dev_kfree_skb(skb);
6935                 }
6936                 if (!fp->disable_tpa)
6937                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6938                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
6939                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
6940         }
6941 }
6942
6943 static void bnx2x_free_skbs(struct bnx2x *bp)
6944 {
6945         bnx2x_free_tx_skbs(bp);
6946         bnx2x_free_rx_skbs(bp);
6947 }
6948
6949 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6950 {
6951         int i, offset = 1;
6952
6953         free_irq(bp->msix_table[0].vector, bp->dev);
6954         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6955            bp->msix_table[0].vector);
6956
6957 #ifdef BCM_CNIC
6958         offset++;
6959 #endif
6960         for_each_queue(bp, i) {
6961                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
6962                    "state %x\n", i, bp->msix_table[i + offset].vector,
6963                    bnx2x_fp(bp, i, state));
6964
6965                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6966         }
6967 }
6968
6969 static void bnx2x_free_irq(struct bnx2x *bp)
6970 {
6971         if (bp->flags & USING_MSIX_FLAG) {
6972                 bnx2x_free_msix_irqs(bp);
6973                 pci_disable_msix(bp->pdev);
6974                 bp->flags &= ~USING_MSIX_FLAG;
6975
6976         } else if (bp->flags & USING_MSI_FLAG) {
6977                 free_irq(bp->pdev->irq, bp->dev);
6978                 pci_disable_msi(bp->pdev);
6979                 bp->flags &= ~USING_MSI_FLAG;
6980
6981         } else
6982                 free_irq(bp->pdev->irq, bp->dev);
6983 }
6984
6985 static int bnx2x_enable_msix(struct bnx2x *bp)
6986 {
6987         int i, rc, offset = 1;
6988         int igu_vec = 0;
6989
6990         bp->msix_table[0].entry = igu_vec;
6991         DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6992
6993 #ifdef BCM_CNIC
6994         igu_vec = BP_L_ID(bp) + offset;
6995         bp->msix_table[1].entry = igu_vec;
6996         DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
6997         offset++;
6998 #endif
6999         for_each_queue(bp, i) {
7000                 igu_vec = BP_L_ID(bp) + offset + i;
7001                 bp->msix_table[i + offset].entry = igu_vec;
7002                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
7003                    "(fastpath #%u)\n", i + offset, igu_vec, i);
7004         }
7005
7006         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
7007                              BNX2X_NUM_QUEUES(bp) + offset);
7008         if (rc) {
7009                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
7010                 return rc;
7011         }
7012
7013         bp->flags |= USING_MSIX_FLAG;
7014
7015         return 0;
7016 }
7017
7018 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7019 {
7020         int i, rc, offset = 1;
7021
7022         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7023                          bp->dev->name, bp->dev);
7024         if (rc) {
7025                 BNX2X_ERR("request sp irq failed\n");
7026                 return -EBUSY;
7027         }
7028
7029 #ifdef BCM_CNIC
7030         offset++;
7031 #endif
7032         for_each_queue(bp, i) {
7033                 struct bnx2x_fastpath *fp = &bp->fp[i];
7034
7035                 if (i < bp->num_rx_queues)
7036                         sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
7037                 else
7038                         sprintf(fp->name, "%s-tx-%d",
7039                                 bp->dev->name, i - bp->num_rx_queues);
7040
7041                 rc = request_irq(bp->msix_table[i + offset].vector,
7042                                  bnx2x_msix_fp_int, 0, fp->name, fp);
7043                 if (rc) {
7044                         BNX2X_ERR("request fp #%d irq failed  rc %d\n", i, rc);
7045                         bnx2x_free_msix_irqs(bp);
7046                         return -EBUSY;
7047                 }
7048
7049                 fp->state = BNX2X_FP_STATE_IRQ;
7050         }
7051
7052         i = BNX2X_NUM_QUEUES(bp);
7053         printk(KERN_INFO PFX "%s: using MSI-X  IRQs: sp %d  fp[%d] %d"
7054                " ... fp[%d] %d\n",
7055                bp->dev->name, bp->msix_table[0].vector,
7056                0, bp->msix_table[offset].vector,
7057                i - 1, bp->msix_table[offset + i - 1].vector);
7058
7059         return 0;
7060 }
7061
7062 static int bnx2x_enable_msi(struct bnx2x *bp)
7063 {
7064         int rc;
7065
7066         rc = pci_enable_msi(bp->pdev);
7067         if (rc) {
7068                 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7069                 return -1;
7070         }
7071         bp->flags |= USING_MSI_FLAG;
7072
7073         return 0;
7074 }
7075
7076 static int bnx2x_req_irq(struct bnx2x *bp)
7077 {
7078         unsigned long flags;
7079         int rc;
7080
7081         if (bp->flags & USING_MSI_FLAG)
7082                 flags = 0;
7083         else
7084                 flags = IRQF_SHARED;
7085
7086         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
7087                          bp->dev->name, bp->dev);
7088         if (!rc)
7089                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7090
7091         return rc;
7092 }
7093
7094 static void bnx2x_napi_enable(struct bnx2x *bp)
7095 {
7096         int i;
7097
7098         for_each_rx_queue(bp, i)
7099                 napi_enable(&bnx2x_fp(bp, i, napi));
7100 }
7101
7102 static void bnx2x_napi_disable(struct bnx2x *bp)
7103 {
7104         int i;
7105
7106         for_each_rx_queue(bp, i)
7107                 napi_disable(&bnx2x_fp(bp, i, napi));
7108 }
7109
7110 static void bnx2x_netif_start(struct bnx2x *bp)
7111 {
7112         int intr_sem;
7113
7114         intr_sem = atomic_dec_and_test(&bp->intr_sem);
7115         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7116
7117         if (intr_sem) {
7118                 if (netif_running(bp->dev)) {
7119                         bnx2x_napi_enable(bp);
7120                         bnx2x_int_enable(bp);
7121                         if (bp->state == BNX2X_STATE_OPEN)
7122                                 netif_tx_wake_all_queues(bp->dev);
7123                 }
7124         }
7125 }
7126
7127 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
7128 {
7129         bnx2x_int_disable_sync(bp, disable_hw);
7130         bnx2x_napi_disable(bp);
7131         netif_tx_disable(bp->dev);
7132         bp->dev->trans_start = jiffies; /* prevent tx timeout */
7133 }
7134
7135 /*
7136  * Init service functions
7137  */
7138
7139 /**
7140  * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7141  *
7142  * @param bp driver descriptor
7143  * @param set set or clear an entry (1 or 0)
7144  * @param mac pointer to a buffer containing a MAC
7145  * @param cl_bit_vec bit vector of clients to register a MAC for
7146  * @param cam_offset offset in a CAM to use
7147  * @param with_bcast set broadcast MAC as well
7148  */
7149 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7150                                       u32 cl_bit_vec, u8 cam_offset,
7151                                       u8 with_bcast)
7152 {
7153         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
7154         int port = BP_PORT(bp);
7155
7156         /* CAM allocation
7157          * unicasts 0-31:port0 32-63:port1
7158          * multicast 64-127:port0 128-191:port1
7159          */
7160         config->hdr.length = 1 + (with_bcast ? 1 : 0);
7161         config->hdr.offset = cam_offset;
7162         config->hdr.client_id = 0xff;
7163         config->hdr.reserved1 = 0;
7164
7165         /* primary MAC */
7166         config->config_table[0].cam_entry.msb_mac_addr =
7167                                         swab16(*(u16 *)&mac[0]);
7168         config->config_table[0].cam_entry.middle_mac_addr =
7169                                         swab16(*(u16 *)&mac[2]);
7170         config->config_table[0].cam_entry.lsb_mac_addr =
7171                                         swab16(*(u16 *)&mac[4]);
7172         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
7173         if (set)
7174                 config->config_table[0].target_table_entry.flags = 0;
7175         else
7176                 CAM_INVALIDATE(config->config_table[0]);
7177         config->config_table[0].target_table_entry.clients_bit_vector =
7178                                                 cpu_to_le32(cl_bit_vec);
7179         config->config_table[0].target_table_entry.vlan_id = 0;
7180
7181         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7182            (set ? "setting" : "clearing"),
7183            config->config_table[0].cam_entry.msb_mac_addr,
7184            config->config_table[0].cam_entry.middle_mac_addr,
7185            config->config_table[0].cam_entry.lsb_mac_addr);
7186
7187         /* broadcast */
7188         if (with_bcast) {
7189                 config->config_table[1].cam_entry.msb_mac_addr =
7190                         cpu_to_le16(0xffff);
7191                 config->config_table[1].cam_entry.middle_mac_addr =
7192                         cpu_to_le16(0xffff);
7193                 config->config_table[1].cam_entry.lsb_mac_addr =
7194                         cpu_to_le16(0xffff);
7195                 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7196                 if (set)
7197                         config->config_table[1].target_table_entry.flags =
7198                                         TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7199                 else
7200                         CAM_INVALIDATE(config->config_table[1]);
7201                 config->config_table[1].target_table_entry.clients_bit_vector =
7202                                                         cpu_to_le32(cl_bit_vec);
7203                 config->config_table[1].target_table_entry.vlan_id = 0;
7204         }
7205
7206         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7207                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7208                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7209 }
7210
7211 /**
7212  * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7213  *
7214  * @param bp driver descriptor
7215  * @param set set or clear an entry (1 or 0)
7216  * @param mac pointer to a buffer containing a MAC
7217  * @param cl_bit_vec bit vector of clients to register a MAC for
7218  * @param cam_offset offset in a CAM to use
7219  */
7220 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7221                                        u32 cl_bit_vec, u8 cam_offset)
7222 {
7223         struct mac_configuration_cmd_e1h *config =
7224                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7225
7226         config->hdr.length = 1;
7227         config->hdr.offset = cam_offset;
7228         config->hdr.client_id = 0xff;
7229         config->hdr.reserved1 = 0;
7230
7231         /* primary MAC */
7232         config->config_table[0].msb_mac_addr =
7233                                         swab16(*(u16 *)&mac[0]);
7234         config->config_table[0].middle_mac_addr =
7235                                         swab16(*(u16 *)&mac[2]);
7236         config->config_table[0].lsb_mac_addr =
7237                                         swab16(*(u16 *)&mac[4]);
7238         config->config_table[0].clients_bit_vector =
7239                                         cpu_to_le32(cl_bit_vec);
7240         config->config_table[0].vlan_id = 0;
7241         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
7242         if (set)
7243                 config->config_table[0].flags = BP_PORT(bp);
7244         else
7245                 config->config_table[0].flags =
7246                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
7247
7248         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID mask %d\n",
7249            (set ? "setting" : "clearing"),
7250            config->config_table[0].msb_mac_addr,
7251            config->config_table[0].middle_mac_addr,
7252            config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
7253
7254         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7255                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7256                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7257 }
7258
7259 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7260                              int *state_p, int poll)
7261 {
7262         /* can take a while if any port is running */
7263         int cnt = 5000;
7264
7265         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7266            poll ? "polling" : "waiting", state, idx);
7267
7268         might_sleep();
7269         while (cnt--) {
7270                 if (poll) {
7271                         bnx2x_rx_int(bp->fp, 10);
7272                         /* if index is different from 0
7273                          * the reply for some commands will
7274                          * be on the non default queue
7275                          */
7276                         if (idx)
7277                                 bnx2x_rx_int(&bp->fp[idx], 10);
7278                 }
7279
7280                 mb(); /* state is changed by bnx2x_sp_event() */
7281                 if (*state_p == state) {
7282 #ifdef BNX2X_STOP_ON_ERROR
7283                         DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
7284 #endif
7285                         return 0;
7286                 }
7287
7288                 msleep(1);
7289
7290                 if (bp->panic)
7291                         return -EIO;
7292         }
7293
7294         /* timeout! */
7295         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7296                   poll ? "polling" : "waiting", state, idx);
7297 #ifdef BNX2X_STOP_ON_ERROR
7298         bnx2x_panic();
7299 #endif
7300
7301         return -EBUSY;
7302 }
7303
7304 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7305 {
7306         bp->set_mac_pending++;
7307         smp_wmb();
7308
7309         bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7310                                    (1 << bp->fp->cl_id), BP_FUNC(bp));
7311
7312         /* Wait for a completion */
7313         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7314 }
7315
7316 static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7317 {
7318         bp->set_mac_pending++;
7319         smp_wmb();
7320
7321         bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7322                                   (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7323                                   1);
7324
7325         /* Wait for a completion */
7326         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7327 }
7328
7329 #ifdef BCM_CNIC
7330 /**
7331  * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7332  * MAC(s). This function will wait until the ramdord completion
7333  * returns.
7334  *
7335  * @param bp driver handle
7336  * @param set set or clear the CAM entry
7337  *
7338  * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7339  */
7340 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7341 {
7342         u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7343
7344         bp->set_mac_pending++;
7345         smp_wmb();
7346
7347         /* Send a SET_MAC ramrod */
7348         if (CHIP_IS_E1(bp))
7349                 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7350                                   cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7351                                   1);
7352         else
7353                 /* CAM allocation for E1H
7354                 * unicasts: by func number
7355                 * multicast: 20+FUNC*20, 20 each
7356                 */
7357                 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7358                                    cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7359
7360         /* Wait for a completion when setting */
7361         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7362
7363         return 0;
7364 }
7365 #endif
7366
7367 static int bnx2x_setup_leading(struct bnx2x *bp)
7368 {
7369         int rc;
7370
7371         /* reset IGU state */
7372         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7373
7374         /* SETUP ramrod */
7375         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7376
7377         /* Wait for completion */
7378         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7379
7380         return rc;
7381 }
7382
7383 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7384 {
7385         struct bnx2x_fastpath *fp = &bp->fp[index];
7386
7387         /* reset IGU state */
7388         bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7389
7390         /* SETUP ramrod */
7391         fp->state = BNX2X_FP_STATE_OPENING;
7392         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7393                       fp->cl_id, 0);
7394
7395         /* Wait for completion */
7396         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7397                                  &(fp->state), 0);
7398 }
7399
7400 static int bnx2x_poll(struct napi_struct *napi, int budget);
7401
7402 static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out,
7403                                     int *num_tx_queues_out)
7404 {
7405         int _num_rx_queues = 0, _num_tx_queues = 0;
7406
7407         switch (bp->multi_mode) {
7408         case ETH_RSS_MODE_DISABLED:
7409                 _num_rx_queues = 1;
7410                 _num_tx_queues = 1;
7411                 break;
7412
7413         case ETH_RSS_MODE_REGULAR:
7414                 if (num_rx_queues)
7415                         _num_rx_queues = min_t(u32, num_rx_queues,
7416                                                BNX2X_MAX_QUEUES(bp));
7417                 else
7418                         _num_rx_queues = min_t(u32, num_online_cpus(),
7419                                                BNX2X_MAX_QUEUES(bp));
7420
7421                 if (num_tx_queues)
7422                         _num_tx_queues = min_t(u32, num_tx_queues,
7423                                                BNX2X_MAX_QUEUES(bp));
7424                 else
7425                         _num_tx_queues = min_t(u32, num_online_cpus(),
7426                                                BNX2X_MAX_QUEUES(bp));
7427
7428                 /* There must be not more Tx queues than Rx queues */
7429                 if (_num_tx_queues > _num_rx_queues) {
7430                         BNX2X_ERR("number of tx queues (%d) > "
7431                                   "number of rx queues (%d)"
7432                                   "  defaulting to %d\n",
7433                                   _num_tx_queues, _num_rx_queues,
7434                                   _num_rx_queues);
7435                         _num_tx_queues = _num_rx_queues;
7436                 }
7437                 break;
7438
7439
7440         default:
7441                 _num_rx_queues = 1;
7442                 _num_tx_queues = 1;
7443                 break;
7444         }
7445
7446         *num_rx_queues_out = _num_rx_queues;
7447         *num_tx_queues_out = _num_tx_queues;
7448 }
7449
7450 static int bnx2x_set_int_mode(struct bnx2x *bp)
7451 {
7452         int rc = 0;
7453
7454         switch (int_mode) {
7455         case INT_MODE_INTx:
7456         case INT_MODE_MSI:
7457                 bp->num_rx_queues = 1;
7458                 bp->num_tx_queues = 1;
7459                 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7460                 break;
7461
7462         case INT_MODE_MSIX:
7463         default:
7464                 /* Set interrupt mode according to bp->multi_mode value */
7465                 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues,
7466                                         &bp->num_tx_queues);
7467
7468                 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n",
7469                    bp->num_rx_queues, bp->num_tx_queues);
7470
7471                 /* if we can't use MSI-X we only need one fp,
7472                  * so try to enable MSI-X with the requested number of fp's
7473                  * and fallback to MSI or legacy INTx with one fp
7474                  */
7475                 rc = bnx2x_enable_msix(bp);
7476                 if (rc) {
7477                         /* failed to enable MSI-X */
7478                         if (bp->multi_mode)
7479                                 BNX2X_ERR("Multi requested but failed to "
7480                                           "enable MSI-X (rx %d tx %d), "
7481                                           "set number of queues to 1\n",
7482                                           bp->num_rx_queues, bp->num_tx_queues);
7483                         bp->num_rx_queues = 1;
7484                         bp->num_tx_queues = 1;
7485                 }
7486                 break;
7487         }
7488         bp->dev->real_num_tx_queues = bp->num_tx_queues;
7489         return rc;
7490 }
7491
7492 #ifdef BCM_CNIC
7493 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7494 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7495 #endif
7496
7497 /* must be called with rtnl_lock */
7498 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7499 {
7500         u32 load_code;
7501         int i, rc;
7502
7503 #ifdef BNX2X_STOP_ON_ERROR
7504         if (unlikely(bp->panic))
7505                 return -EPERM;
7506 #endif
7507
7508         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7509
7510         rc = bnx2x_set_int_mode(bp);
7511
7512         if (bnx2x_alloc_mem(bp))
7513                 return -ENOMEM;
7514
7515         for_each_rx_queue(bp, i)
7516                 bnx2x_fp(bp, i, disable_tpa) =
7517                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
7518
7519         for_each_rx_queue(bp, i)
7520                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7521                                bnx2x_poll, 128);
7522
7523         bnx2x_napi_enable(bp);
7524
7525         if (bp->flags & USING_MSIX_FLAG) {
7526                 rc = bnx2x_req_msix_irqs(bp);
7527                 if (rc) {
7528                         pci_disable_msix(bp->pdev);
7529                         goto load_error1;
7530                 }
7531         } else {
7532                 /* Fall to INTx if failed to enable MSI-X due to lack of
7533                    memory (in bnx2x_set_int_mode()) */
7534                 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7535                         bnx2x_enable_msi(bp);
7536                 bnx2x_ack_int(bp);
7537                 rc = bnx2x_req_irq(bp);
7538                 if (rc) {
7539                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
7540                         if (bp->flags & USING_MSI_FLAG)
7541                                 pci_disable_msi(bp->pdev);
7542                         goto load_error1;
7543                 }
7544                 if (bp->flags & USING_MSI_FLAG) {
7545                         bp->dev->irq = bp->pdev->irq;
7546                         printk(KERN_INFO PFX "%s: using MSI  IRQ %d\n",
7547                                bp->dev->name, bp->pdev->irq);
7548                 }
7549         }
7550
7551         /* Send LOAD_REQUEST command to MCP
7552            Returns the type of LOAD command:
7553            if it is the first port to be initialized
7554            common blocks should be initialized, otherwise - not
7555         */
7556         if (!BP_NOMCP(bp)) {
7557                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7558                 if (!load_code) {
7559                         BNX2X_ERR("MCP response failure, aborting\n");
7560                         rc = -EBUSY;
7561                         goto load_error2;
7562                 }
7563                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7564                         rc = -EBUSY; /* other port in diagnostic mode */
7565                         goto load_error2;
7566                 }
7567
7568         } else {
7569                 int port = BP_PORT(bp);
7570
7571                 DP(NETIF_MSG_IFUP, "NO MCP - load counts      %d, %d, %d\n",
7572                    load_count[0], load_count[1], load_count[2]);
7573                 load_count[0]++;
7574                 load_count[1 + port]++;
7575                 DP(NETIF_MSG_IFUP, "NO MCP - new load counts  %d, %d, %d\n",
7576                    load_count[0], load_count[1], load_count[2]);
7577                 if (load_count[0] == 1)
7578                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7579                 else if (load_count[1 + port] == 1)
7580                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7581                 else
7582                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7583         }
7584
7585         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7586             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7587                 bp->port.pmf = 1;
7588         else
7589                 bp->port.pmf = 0;
7590         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7591
7592         /* Initialize HW */
7593         rc = bnx2x_init_hw(bp, load_code);
7594         if (rc) {
7595                 BNX2X_ERR("HW init failed, aborting\n");
7596                 goto load_error2;
7597         }
7598
7599         /* Setup NIC internals and enable interrupts */
7600         bnx2x_nic_init(bp, load_code);
7601
7602         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7603             (bp->common.shmem2_base))
7604                 SHMEM2_WR(bp, dcc_support,
7605                           (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7606                            SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7607
7608         /* Send LOAD_DONE command to MCP */
7609         if (!BP_NOMCP(bp)) {
7610                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7611                 if (!load_code) {
7612                         BNX2X_ERR("MCP response failure, aborting\n");
7613                         rc = -EBUSY;
7614                         goto load_error3;
7615                 }
7616         }
7617
7618         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7619
7620         rc = bnx2x_setup_leading(bp);
7621         if (rc) {
7622                 BNX2X_ERR("Setup leading failed!\n");
7623 #ifndef BNX2X_STOP_ON_ERROR
7624                 goto load_error3;
7625 #else
7626                 bp->panic = 1;
7627                 return -EBUSY;
7628 #endif
7629         }
7630
7631         if (CHIP_IS_E1H(bp))
7632                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
7633                         DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
7634                         bp->flags |= MF_FUNC_DIS;
7635                 }
7636
7637         if (bp->state == BNX2X_STATE_OPEN) {
7638 #ifdef BCM_CNIC
7639                 /* Enable Timer scan */
7640                 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
7641 #endif
7642                 for_each_nondefault_queue(bp, i) {
7643                         rc = bnx2x_setup_multi(bp, i);
7644                         if (rc)
7645 #ifdef BCM_CNIC
7646                                 goto load_error4;
7647 #else
7648                                 goto load_error3;
7649 #endif
7650                 }
7651
7652                 if (CHIP_IS_E1(bp))
7653                         bnx2x_set_eth_mac_addr_e1(bp, 1);
7654                 else
7655                         bnx2x_set_eth_mac_addr_e1h(bp, 1);
7656 #ifdef BCM_CNIC
7657                 /* Set iSCSI L2 MAC */
7658                 mutex_lock(&bp->cnic_mutex);
7659                 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
7660                         bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7661                         bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7662                 }
7663                 mutex_unlock(&bp->cnic_mutex);
7664 #endif
7665         }
7666
7667         if (bp->port.pmf)
7668                 bnx2x_initial_phy_init(bp, load_mode);
7669
7670         /* Start fast path */
7671         switch (load_mode) {
7672         case LOAD_NORMAL:
7673                 if (bp->state == BNX2X_STATE_OPEN) {
7674                         /* Tx queue should be only reenabled */
7675                         netif_tx_wake_all_queues(bp->dev);
7676                 }
7677                 /* Initialize the receive filter. */
7678                 bnx2x_set_rx_mode(bp->dev);
7679                 break;
7680
7681         case LOAD_OPEN:
7682                 netif_tx_start_all_queues(bp->dev);
7683                 if (bp->state != BNX2X_STATE_OPEN)
7684                         netif_tx_disable(bp->dev);
7685                 /* Initialize the receive filter. */
7686                 bnx2x_set_rx_mode(bp->dev);
7687                 break;
7688
7689         case LOAD_DIAG:
7690                 /* Initialize the receive filter. */
7691                 bnx2x_set_rx_mode(bp->dev);
7692                 bp->state = BNX2X_STATE_DIAG;
7693                 break;
7694
7695         default:
7696                 break;
7697         }
7698
7699         if (!bp->port.pmf)
7700                 bnx2x__link_status_update(bp);
7701
7702         /* start the timer */
7703         mod_timer(&bp->timer, jiffies + bp->current_interval);
7704
7705 #ifdef BCM_CNIC
7706         bnx2x_setup_cnic_irq_info(bp);
7707         if (bp->state == BNX2X_STATE_OPEN)
7708                 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
7709 #endif
7710
7711         return 0;
7712
7713 #ifdef BCM_CNIC
7714 load_error4:
7715         /* Disable Timer scan */
7716         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
7717 #endif
7718 load_error3:
7719         bnx2x_int_disable_sync(bp, 1);
7720         if (!BP_NOMCP(bp)) {
7721                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7722                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7723         }
7724         bp->port.pmf = 0;
7725         /* Free SKBs, SGEs, TPA pool and driver internals */
7726         bnx2x_free_skbs(bp);
7727         for_each_rx_queue(bp, i)
7728                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7729 load_error2:
7730         /* Release IRQs */
7731         bnx2x_free_irq(bp);
7732 load_error1:
7733         bnx2x_napi_disable(bp);
7734         for_each_rx_queue(bp, i)
7735                 netif_napi_del(&bnx2x_fp(bp, i, napi));
7736         bnx2x_free_mem(bp);
7737
7738         return rc;
7739 }
7740
7741 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7742 {
7743         struct bnx2x_fastpath *fp = &bp->fp[index];
7744         int rc;
7745
7746         /* halt the connection */
7747         fp->state = BNX2X_FP_STATE_HALTING;
7748         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7749
7750         /* Wait for completion */
7751         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7752                                &(fp->state), 1);
7753         if (rc) /* timeout */
7754                 return rc;
7755
7756         /* delete cfc entry */
7757         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7758
7759         /* Wait for completion */
7760         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7761                                &(fp->state), 1);
7762         return rc;
7763 }
7764
7765 static int bnx2x_stop_leading(struct bnx2x *bp)
7766 {
7767         __le16 dsb_sp_prod_idx;
7768         /* if the other port is handling traffic,
7769            this can take a lot of time */
7770         int cnt = 500;
7771         int rc;
7772
7773         might_sleep();
7774
7775         /* Send HALT ramrod */
7776         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7777         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7778
7779         /* Wait for completion */
7780         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7781                                &(bp->fp[0].state), 1);
7782         if (rc) /* timeout */
7783                 return rc;
7784
7785         dsb_sp_prod_idx = *bp->dsb_sp_prod;
7786
7787         /* Send PORT_DELETE ramrod */
7788         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7789
7790         /* Wait for completion to arrive on default status block
7791            we are going to reset the chip anyway
7792            so there is not much to do if this times out
7793          */
7794         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7795                 if (!cnt) {
7796                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7797                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7798                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
7799 #ifdef BNX2X_STOP_ON_ERROR
7800                         bnx2x_panic();
7801 #endif
7802                         rc = -EBUSY;
7803                         break;
7804                 }
7805                 cnt--;
7806                 msleep(1);
7807                 rmb(); /* Refresh the dsb_sp_prod */
7808         }
7809         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7810         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7811
7812         return rc;
7813 }
7814
7815 static void bnx2x_reset_func(struct bnx2x *bp)
7816 {
7817         int port = BP_PORT(bp);
7818         int func = BP_FUNC(bp);
7819         int base, i;
7820
7821         /* Configure IGU */
7822         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7823         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7824
7825 #ifdef BCM_CNIC
7826         /* Disable Timer scan */
7827         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
7828         /*
7829          * Wait for at least 10ms and up to 2 second for the timers scan to
7830          * complete
7831          */
7832         for (i = 0; i < 200; i++) {
7833                 msleep(10);
7834                 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7835                         break;
7836         }
7837 #endif
7838         /* Clear ILT */
7839         base = FUNC_ILT_BASE(func);
7840         for (i = base; i < base + ILT_PER_FUNC; i++)
7841                 bnx2x_ilt_wr(bp, i, 0);
7842 }
7843
7844 static void bnx2x_reset_port(struct bnx2x *bp)
7845 {
7846         int port = BP_PORT(bp);
7847         u32 val;
7848
7849         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7850
7851         /* Do not rcv packets to BRB */
7852         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7853         /* Do not direct rcv packets that are not for MCP to the BRB */
7854         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7855                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7856
7857         /* Configure AEU */
7858         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7859
7860         msleep(100);
7861         /* Check for BRB port occupancy */
7862         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7863         if (val)
7864                 DP(NETIF_MSG_IFDOWN,
7865                    "BRB1 is not empty  %d blocks are occupied\n", val);
7866
7867         /* TODO: Close Doorbell port? */
7868 }
7869
7870 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7871 {
7872         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
7873            BP_FUNC(bp), reset_code);
7874
7875         switch (reset_code) {
7876         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7877                 bnx2x_reset_port(bp);
7878                 bnx2x_reset_func(bp);
7879                 bnx2x_reset_common(bp);
7880                 break;
7881
7882         case FW_MSG_CODE_DRV_UNLOAD_PORT:
7883                 bnx2x_reset_port(bp);
7884                 bnx2x_reset_func(bp);
7885                 break;
7886
7887         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7888                 bnx2x_reset_func(bp);
7889                 break;
7890
7891         default:
7892                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7893                 break;
7894         }
7895 }
7896
7897 /* must be called with rtnl_lock */
7898 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7899 {
7900         int port = BP_PORT(bp);
7901         u32 reset_code = 0;
7902         int i, cnt, rc;
7903
7904 #ifdef BCM_CNIC
7905         bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
7906 #endif
7907         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7908
7909         /* Set "drop all" */
7910         bp->rx_mode = BNX2X_RX_MODE_NONE;
7911         bnx2x_set_storm_rx_mode(bp);
7912
7913         /* Disable HW interrupts, NAPI and Tx */
7914         bnx2x_netif_stop(bp, 1);
7915
7916         del_timer_sync(&bp->timer);
7917         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7918                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7919         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7920
7921         /* Release IRQs */
7922         bnx2x_free_irq(bp);
7923
7924         /* Wait until tx fastpath tasks complete */
7925         for_each_tx_queue(bp, i) {
7926                 struct bnx2x_fastpath *fp = &bp->fp[i];
7927
7928                 cnt = 1000;
7929                 while (bnx2x_has_tx_work_unload(fp)) {
7930
7931                         bnx2x_tx_int(fp);
7932                         if (!cnt) {
7933                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
7934                                           i);
7935 #ifdef BNX2X_STOP_ON_ERROR
7936                                 bnx2x_panic();
7937                                 return -EBUSY;
7938 #else
7939                                 break;
7940 #endif
7941                         }
7942                         cnt--;
7943                         msleep(1);
7944                 }
7945         }
7946         /* Give HW time to discard old tx messages */
7947         msleep(1);
7948
7949         if (CHIP_IS_E1(bp)) {
7950                 struct mac_configuration_cmd *config =
7951                                                 bnx2x_sp(bp, mcast_config);
7952
7953                 bnx2x_set_eth_mac_addr_e1(bp, 0);
7954
7955                 for (i = 0; i < config->hdr.length; i++)
7956                         CAM_INVALIDATE(config->config_table[i]);
7957
7958                 config->hdr.length = i;
7959                 if (CHIP_REV_IS_SLOW(bp))
7960                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7961                 else
7962                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7963                 config->hdr.client_id = bp->fp->cl_id;
7964                 config->hdr.reserved1 = 0;
7965
7966                 bp->set_mac_pending++;
7967                 smp_wmb();
7968
7969                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7970                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7971                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7972
7973         } else { /* E1H */
7974                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7975
7976                 bnx2x_set_eth_mac_addr_e1h(bp, 0);
7977
7978                 for (i = 0; i < MC_HASH_SIZE; i++)
7979                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7980
7981                 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7982         }
7983 #ifdef BCM_CNIC
7984         /* Clear iSCSI L2 MAC */
7985         mutex_lock(&bp->cnic_mutex);
7986         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7987                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7988                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7989         }
7990         mutex_unlock(&bp->cnic_mutex);
7991 #endif
7992
7993         if (unload_mode == UNLOAD_NORMAL)
7994                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7995
7996         else if (bp->flags & NO_WOL_FLAG)
7997                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7998
7999         else if (bp->wol) {
8000                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
8001                 u8 *mac_addr = bp->dev->dev_addr;
8002                 u32 val;
8003                 /* The mac address is written to entries 1-4 to
8004                    preserve entry 0 which is used by the PMF */
8005                 u8 entry = (BP_E1HVN(bp) + 1)*8;
8006
8007                 val = (mac_addr[0] << 8) | mac_addr[1];
8008                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
8009
8010                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8011                       (mac_addr[4] << 8) | mac_addr[5];
8012                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
8013
8014                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
8015
8016         } else
8017                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8018
8019         /* Close multi and leading connections
8020            Completions for ramrods are collected in a synchronous way */
8021         for_each_nondefault_queue(bp, i)
8022                 if (bnx2x_stop_multi(bp, i))
8023                         goto unload_error;
8024
8025         rc = bnx2x_stop_leading(bp);
8026         if (rc) {
8027                 BNX2X_ERR("Stop leading failed!\n");
8028 #ifdef BNX2X_STOP_ON_ERROR
8029                 return -EBUSY;
8030 #else
8031                 goto unload_error;
8032 #endif
8033         }
8034
8035 unload_error:
8036         if (!BP_NOMCP(bp))
8037                 reset_code = bnx2x_fw_command(bp, reset_code);
8038         else {
8039                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts      %d, %d, %d\n",
8040                    load_count[0], load_count[1], load_count[2]);
8041                 load_count[0]--;
8042                 load_count[1 + port]--;
8043                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts  %d, %d, %d\n",
8044                    load_count[0], load_count[1], load_count[2]);
8045                 if (load_count[0] == 0)
8046                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
8047                 else if (load_count[1 + port] == 0)
8048                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8049                 else
8050                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8051         }
8052
8053         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
8054             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
8055                 bnx2x__link_reset(bp);
8056
8057         /* Reset the chip */
8058         bnx2x_reset_chip(bp, reset_code);
8059
8060         /* Report UNLOAD_DONE to MCP */
8061         if (!BP_NOMCP(bp))
8062                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8063
8064         bp->port.pmf = 0;
8065
8066         /* Free SKBs, SGEs, TPA pool and driver internals */
8067         bnx2x_free_skbs(bp);
8068         for_each_rx_queue(bp, i)
8069                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8070         for_each_rx_queue(bp, i)
8071                 netif_napi_del(&bnx2x_fp(bp, i, napi));
8072         bnx2x_free_mem(bp);
8073
8074         bp->state = BNX2X_STATE_CLOSED;
8075
8076         netif_carrier_off(bp->dev);
8077
8078         return 0;
8079 }
8080
8081 static void bnx2x_reset_task(struct work_struct *work)
8082 {
8083         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
8084
8085 #ifdef BNX2X_STOP_ON_ERROR
8086         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8087                   " so reset not done to allow debug dump,\n"
8088                   " you will need to reboot when done\n");
8089         return;
8090 #endif
8091
8092         rtnl_lock();
8093
8094         if (!netif_running(bp->dev))
8095                 goto reset_task_exit;
8096
8097         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8098         bnx2x_nic_load(bp, LOAD_NORMAL);
8099
8100 reset_task_exit:
8101         rtnl_unlock();
8102 }
8103
8104 /* end of nic load/unload */
8105
8106 /* ethtool_ops */
8107
8108 /*
8109  * Init service functions
8110  */
8111
8112 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8113 {
8114         switch (func) {
8115         case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8116         case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8117         case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8118         case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
8119         case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
8120         case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
8121         case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
8122         case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
8123         default:
8124                 BNX2X_ERR("Unsupported function index: %d\n", func);
8125                 return (u32)(-1);
8126         }
8127 }
8128
8129 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
8130 {
8131         u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
8132
8133         /* Flush all outstanding writes */
8134         mmiowb();
8135
8136         /* Pretend to be function 0 */
8137         REG_WR(bp, reg, 0);
8138         /* Flush the GRC transaction (in the chip) */
8139         new_val = REG_RD(bp, reg);
8140         if (new_val != 0) {
8141                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
8142                           new_val);
8143                 BUG();
8144         }
8145
8146         /* From now we are in the "like-E1" mode */
8147         bnx2x_int_disable(bp);
8148
8149         /* Flush all outstanding writes */
8150         mmiowb();
8151
8152         /* Restore the original funtion settings */
8153         REG_WR(bp, reg, orig_func);
8154         new_val = REG_RD(bp, reg);
8155         if (new_val != orig_func) {
8156                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
8157                           orig_func, new_val);
8158                 BUG();
8159         }
8160 }
8161
8162 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
8163 {
8164         if (CHIP_IS_E1H(bp))
8165                 bnx2x_undi_int_disable_e1h(bp, func);
8166         else
8167                 bnx2x_int_disable(bp);
8168 }
8169
8170 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8171 {
8172         u32 val;
8173
8174         /* Check if there is any driver already loaded */
8175         val = REG_RD(bp, MISC_REG_UNPREPARED);
8176         if (val == 0x1) {
8177                 /* Check if it is the UNDI driver
8178                  * UNDI driver initializes CID offset for normal bell to 0x7
8179                  */
8180                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8181                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8182                 if (val == 0x7) {
8183                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8184                         /* save our func */
8185                         int func = BP_FUNC(bp);
8186                         u32 swap_en;
8187                         u32 swap_val;
8188
8189                         /* clear the UNDI indication */
8190                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
8191
8192                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
8193
8194                         /* try unload UNDI on port 0 */
8195                         bp->func = 0;
8196                         bp->fw_seq =
8197                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8198                                 DRV_MSG_SEQ_NUMBER_MASK);
8199                         reset_code = bnx2x_fw_command(bp, reset_code);
8200
8201                         /* if UNDI is loaded on the other port */
8202                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
8203
8204                                 /* send "DONE" for previous unload */
8205                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8206
8207                                 /* unload UNDI on port 1 */
8208                                 bp->func = 1;
8209                                 bp->fw_seq =
8210                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8211                                         DRV_MSG_SEQ_NUMBER_MASK);
8212                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8213
8214                                 bnx2x_fw_command(bp, reset_code);
8215                         }
8216
8217                         /* now it's safe to release the lock */
8218                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8219
8220                         bnx2x_undi_int_disable(bp, func);
8221
8222                         /* close input traffic and wait for it */
8223                         /* Do not rcv packets to BRB */
8224                         REG_WR(bp,
8225                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
8226                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
8227                         /* Do not direct rcv packets that are not for MCP to
8228                          * the BRB */
8229                         REG_WR(bp,
8230                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
8231                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8232                         /* clear AEU */
8233                         REG_WR(bp,
8234                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8235                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
8236                         msleep(10);
8237
8238                         /* save NIG port swap info */
8239                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
8240                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
8241                         /* reset device */
8242                         REG_WR(bp,
8243                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8244                                0xd3ffffff);
8245                         REG_WR(bp,
8246                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8247                                0x1403);
8248                         /* take the NIG out of reset and restore swap values */
8249                         REG_WR(bp,
8250                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
8251                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
8252                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
8253                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
8254
8255                         /* send unload done to the MCP */
8256                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8257
8258                         /* restore our func and fw_seq */
8259                         bp->func = func;
8260                         bp->fw_seq =
8261                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8262                                 DRV_MSG_SEQ_NUMBER_MASK);
8263
8264                 } else
8265                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8266         }
8267 }
8268
8269 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8270 {
8271         u32 val, val2, val3, val4, id;
8272         u16 pmc;
8273
8274         /* Get the chip revision id and number. */
8275         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
8276         val = REG_RD(bp, MISC_REG_CHIP_NUM);
8277         id = ((val & 0xffff) << 16);
8278         val = REG_RD(bp, MISC_REG_CHIP_REV);
8279         id |= ((val & 0xf) << 12);
8280         val = REG_RD(bp, MISC_REG_CHIP_METAL);
8281         id |= ((val & 0xff) << 4);
8282         val = REG_RD(bp, MISC_REG_BOND_ID);
8283         id |= (val & 0xf);
8284         bp->common.chip_id = id;
8285         bp->link_params.chip_id = bp->common.chip_id;
8286         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
8287
8288         val = (REG_RD(bp, 0x2874) & 0x55);
8289         if ((bp->common.chip_id & 0x1) ||
8290             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
8291                 bp->flags |= ONE_PORT_FLAG;
8292                 BNX2X_DEV_INFO("single port device\n");
8293         }
8294
8295         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8296         bp->common.flash_size = (NVRAM_1MB_SIZE <<
8297                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
8298         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8299                        bp->common.flash_size, bp->common.flash_size);
8300
8301         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8302         bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
8303         bp->link_params.shmem_base = bp->common.shmem_base;
8304         BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
8305                        bp->common.shmem_base, bp->common.shmem2_base);
8306
8307         if (!bp->common.shmem_base ||
8308             (bp->common.shmem_base < 0xA0000) ||
8309             (bp->common.shmem_base >= 0xC0000)) {
8310                 BNX2X_DEV_INFO("MCP not active\n");
8311                 bp->flags |= NO_MCP_FLAG;
8312                 return;
8313         }
8314
8315         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8316         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8317                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8318                 BNX2X_ERR("BAD MCP validity signature\n");
8319
8320         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
8321         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
8322
8323         bp->link_params.hw_led_mode = ((bp->common.hw_config &
8324                                         SHARED_HW_CFG_LED_MODE_MASK) >>
8325                                        SHARED_HW_CFG_LED_MODE_SHIFT);
8326
8327         bp->link_params.feature_config_flags = 0;
8328         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8329         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8330                 bp->link_params.feature_config_flags |=
8331                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8332         else
8333                 bp->link_params.feature_config_flags &=
8334                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8335
8336         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8337         bp->common.bc_ver = val;
8338         BNX2X_DEV_INFO("bc_ver %X\n", val);
8339         if (val < BNX2X_BC_VER) {
8340                 /* for now only warn
8341                  * later we might need to enforce this */
8342                 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8343                           " please upgrade BC\n", BNX2X_BC_VER, val);
8344         }
8345         bp->link_params.feature_config_flags |=
8346                 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8347                 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
8348
8349         if (BP_E1HVN(bp) == 0) {
8350                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8351                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8352         } else {
8353                 /* no WOL capability for E1HVN != 0 */
8354                 bp->flags |= NO_WOL_FLAG;
8355         }
8356         BNX2X_DEV_INFO("%sWoL capable\n",
8357                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
8358
8359         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8360         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8361         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8362         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8363
8364         printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8365                val, val2, val3, val4);
8366 }
8367
8368 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8369                                                     u32 switch_cfg)
8370 {
8371         int port = BP_PORT(bp);
8372         u32 ext_phy_type;
8373
8374         switch (switch_cfg) {
8375         case SWITCH_CFG_1G:
8376                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8377
8378                 ext_phy_type =
8379                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8380                 switch (ext_phy_type) {
8381                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8382                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8383                                        ext_phy_type);
8384
8385                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8386                                                SUPPORTED_10baseT_Full |
8387                                                SUPPORTED_100baseT_Half |
8388                                                SUPPORTED_100baseT_Full |
8389                                                SUPPORTED_1000baseT_Full |
8390                                                SUPPORTED_2500baseX_Full |
8391                                                SUPPORTED_TP |
8392                                                SUPPORTED_FIBRE |
8393                                                SUPPORTED_Autoneg |
8394                                                SUPPORTED_Pause |
8395                                                SUPPORTED_Asym_Pause);
8396                         break;
8397
8398                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8399                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8400                                        ext_phy_type);
8401
8402                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8403                                                SUPPORTED_10baseT_Full |
8404                                                SUPPORTED_100baseT_Half |
8405                                                SUPPORTED_100baseT_Full |
8406                                                SUPPORTED_1000baseT_Full |
8407                                                SUPPORTED_TP |
8408                                                SUPPORTED_FIBRE |
8409                                                SUPPORTED_Autoneg |
8410                                                SUPPORTED_Pause |
8411                                                SUPPORTED_Asym_Pause);
8412                         break;
8413
8414                 default:
8415                         BNX2X_ERR("NVRAM config error. "
8416                                   "BAD SerDes ext_phy_config 0x%x\n",
8417                                   bp->link_params.ext_phy_config);
8418                         return;
8419                 }
8420
8421                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8422                                            port*0x10);
8423                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8424                 break;
8425
8426         case SWITCH_CFG_10G:
8427                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8428
8429                 ext_phy_type =
8430                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8431                 switch (ext_phy_type) {
8432                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8433                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8434                                        ext_phy_type);
8435
8436                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8437                                                SUPPORTED_10baseT_Full |
8438                                                SUPPORTED_100baseT_Half |
8439                                                SUPPORTED_100baseT_Full |
8440                                                SUPPORTED_1000baseT_Full |
8441                                                SUPPORTED_2500baseX_Full |
8442                                                SUPPORTED_10000baseT_Full |
8443                                                SUPPORTED_TP |
8444                                                SUPPORTED_FIBRE |
8445                                                SUPPORTED_Autoneg |
8446                                                SUPPORTED_Pause |
8447                                                SUPPORTED_Asym_Pause);
8448                         break;
8449
8450                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8451                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
8452                                        ext_phy_type);
8453
8454                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8455                                                SUPPORTED_1000baseT_Full |
8456                                                SUPPORTED_FIBRE |
8457                                                SUPPORTED_Autoneg |
8458                                                SUPPORTED_Pause |
8459                                                SUPPORTED_Asym_Pause);
8460                         break;
8461
8462                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8463                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
8464                                        ext_phy_type);
8465
8466                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8467                                                SUPPORTED_2500baseX_Full |
8468                                                SUPPORTED_1000baseT_Full |
8469                                                SUPPORTED_FIBRE |
8470                                                SUPPORTED_Autoneg |
8471                                                SUPPORTED_Pause |
8472                                                SUPPORTED_Asym_Pause);
8473                         break;
8474
8475                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8476                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8477                                        ext_phy_type);
8478
8479                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8480                                                SUPPORTED_FIBRE |
8481                                                SUPPORTED_Pause |
8482                                                SUPPORTED_Asym_Pause);
8483                         break;
8484
8485                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8486                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
8487                                        ext_phy_type);
8488
8489                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8490                                                SUPPORTED_1000baseT_Full |
8491                                                SUPPORTED_FIBRE |
8492                                                SUPPORTED_Pause |
8493                                                SUPPORTED_Asym_Pause);
8494                         break;
8495
8496                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8497                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
8498                                        ext_phy_type);
8499
8500                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8501                                                SUPPORTED_1000baseT_Full |
8502                                                SUPPORTED_Autoneg |
8503                                                SUPPORTED_FIBRE |
8504                                                SUPPORTED_Pause |
8505                                                SUPPORTED_Asym_Pause);
8506                         break;
8507
8508                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8509                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8510                                        ext_phy_type);
8511
8512                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8513                                                SUPPORTED_1000baseT_Full |
8514                                                SUPPORTED_Autoneg |
8515                                                SUPPORTED_FIBRE |
8516                                                SUPPORTED_Pause |
8517                                                SUPPORTED_Asym_Pause);
8518                         break;
8519
8520                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8521                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8522                                        ext_phy_type);
8523
8524                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8525                                                SUPPORTED_TP |
8526                                                SUPPORTED_Autoneg |
8527                                                SUPPORTED_Pause |
8528                                                SUPPORTED_Asym_Pause);
8529                         break;
8530
8531                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8532                         BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8533                                        ext_phy_type);
8534
8535                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8536                                                SUPPORTED_10baseT_Full |
8537                                                SUPPORTED_100baseT_Half |
8538                                                SUPPORTED_100baseT_Full |
8539                                                SUPPORTED_1000baseT_Full |
8540                                                SUPPORTED_10000baseT_Full |
8541                                                SUPPORTED_TP |
8542                                                SUPPORTED_Autoneg |
8543                                                SUPPORTED_Pause |
8544                                                SUPPORTED_Asym_Pause);
8545                         break;
8546
8547                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8548                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8549                                   bp->link_params.ext_phy_config);
8550                         break;
8551
8552                 default:
8553                         BNX2X_ERR("NVRAM config error. "
8554                                   "BAD XGXS ext_phy_config 0x%x\n",
8555                                   bp->link_params.ext_phy_config);
8556                         return;
8557                 }
8558
8559                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8560                                            port*0x18);
8561                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8562
8563                 break;
8564
8565         default:
8566                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
8567                           bp->port.link_config);
8568                 return;
8569         }
8570         bp->link_params.phy_addr = bp->port.phy_addr;
8571
8572         /* mask what we support according to speed_cap_mask */
8573         if (!(bp->link_params.speed_cap_mask &
8574                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
8575                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
8576
8577         if (!(bp->link_params.speed_cap_mask &
8578                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
8579                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
8580
8581         if (!(bp->link_params.speed_cap_mask &
8582                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
8583                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
8584
8585         if (!(bp->link_params.speed_cap_mask &
8586                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
8587                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
8588
8589         if (!(bp->link_params.speed_cap_mask &
8590                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
8591                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8592                                         SUPPORTED_1000baseT_Full);
8593
8594         if (!(bp->link_params.speed_cap_mask &
8595                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
8596                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
8597
8598         if (!(bp->link_params.speed_cap_mask &
8599                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
8600                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
8601
8602         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
8603 }
8604
8605 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8606 {
8607         bp->link_params.req_duplex = DUPLEX_FULL;
8608
8609         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
8610         case PORT_FEATURE_LINK_SPEED_AUTO:
8611                 if (bp->port.supported & SUPPORTED_Autoneg) {
8612                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8613                         bp->port.advertising = bp->port.supported;
8614                 } else {
8615                         u32 ext_phy_type =
8616                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8617
8618                         if ((ext_phy_type ==
8619                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8620                             (ext_phy_type ==
8621                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
8622                                 /* force 10G, no AN */
8623                                 bp->link_params.req_line_speed = SPEED_10000;
8624                                 bp->port.advertising =
8625                                                 (ADVERTISED_10000baseT_Full |
8626                                                  ADVERTISED_FIBRE);
8627                                 break;
8628                         }
8629                         BNX2X_ERR("NVRAM config error. "
8630                                   "Invalid link_config 0x%x"
8631                                   "  Autoneg not supported\n",
8632                                   bp->port.link_config);
8633                         return;
8634                 }
8635                 break;
8636
8637         case PORT_FEATURE_LINK_SPEED_10M_FULL:
8638                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
8639                         bp->link_params.req_line_speed = SPEED_10;
8640                         bp->port.advertising = (ADVERTISED_10baseT_Full |
8641                                                 ADVERTISED_TP);
8642                 } else {
8643                         BNX2X_ERR("NVRAM config error. "
8644                                   "Invalid link_config 0x%x"
8645                                   "  speed_cap_mask 0x%x\n",
8646                                   bp->port.link_config,
8647                                   bp->link_params.speed_cap_mask);
8648                         return;
8649                 }
8650                 break;
8651
8652         case PORT_FEATURE_LINK_SPEED_10M_HALF:
8653                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
8654                         bp->link_params.req_line_speed = SPEED_10;
8655                         bp->link_params.req_duplex = DUPLEX_HALF;
8656                         bp->port.advertising = (ADVERTISED_10baseT_Half |
8657                                                 ADVERTISED_TP);
8658                 } else {
8659                         BNX2X_ERR("NVRAM config error. "
8660                                   "Invalid link_config 0x%x"
8661                                   "  speed_cap_mask 0x%x\n",
8662                                   bp->port.link_config,
8663                                   bp->link_params.speed_cap_mask);
8664                         return;
8665                 }
8666                 break;
8667
8668         case PORT_FEATURE_LINK_SPEED_100M_FULL:
8669                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
8670                         bp->link_params.req_line_speed = SPEED_100;
8671                         bp->port.advertising = (ADVERTISED_100baseT_Full |
8672                                                 ADVERTISED_TP);
8673                 } else {
8674                         BNX2X_ERR("NVRAM config error. "
8675                                   "Invalid link_config 0x%x"
8676                                   "  speed_cap_mask 0x%x\n",
8677                                   bp->port.link_config,
8678                                   bp->link_params.speed_cap_mask);
8679                         return;
8680                 }
8681                 break;
8682
8683         case PORT_FEATURE_LINK_SPEED_100M_HALF:
8684                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
8685                         bp->link_params.req_line_speed = SPEED_100;
8686                         bp->link_params.req_duplex = DUPLEX_HALF;
8687                         bp->port.advertising = (ADVERTISED_100baseT_Half |
8688                                                 ADVERTISED_TP);
8689                 } else {
8690                         BNX2X_ERR("NVRAM config error. "
8691                                   "Invalid link_config 0x%x"
8692                                   "  speed_cap_mask 0x%x\n",
8693                                   bp->port.link_config,
8694                                   bp->link_params.speed_cap_mask);
8695                         return;
8696                 }
8697                 break;
8698
8699         case PORT_FEATURE_LINK_SPEED_1G:
8700                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
8701                         bp->link_params.req_line_speed = SPEED_1000;
8702                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
8703                                                 ADVERTISED_TP);
8704                 } else {
8705                         BNX2X_ERR("NVRAM config error. "
8706                                   "Invalid link_config 0x%x"
8707                                   "  speed_cap_mask 0x%x\n",
8708                                   bp->port.link_config,
8709                                   bp->link_params.speed_cap_mask);
8710                         return;
8711                 }
8712                 break;
8713
8714         case PORT_FEATURE_LINK_SPEED_2_5G:
8715                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
8716                         bp->link_params.req_line_speed = SPEED_2500;
8717                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
8718                                                 ADVERTISED_TP);
8719                 } else {
8720                         BNX2X_ERR("NVRAM config error. "
8721                                   "Invalid link_config 0x%x"
8722                                   "  speed_cap_mask 0x%x\n",
8723                                   bp->port.link_config,
8724                                   bp->link_params.speed_cap_mask);
8725                         return;
8726                 }
8727                 break;
8728
8729         case PORT_FEATURE_LINK_SPEED_10G_CX4:
8730         case PORT_FEATURE_LINK_SPEED_10G_KX4:
8731         case PORT_FEATURE_LINK_SPEED_10G_KR:
8732                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
8733                         bp->link_params.req_line_speed = SPEED_10000;
8734                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
8735                                                 ADVERTISED_FIBRE);
8736                 } else {
8737                         BNX2X_ERR("NVRAM config error. "
8738                                   "Invalid link_config 0x%x"
8739                                   "  speed_cap_mask 0x%x\n",
8740                                   bp->port.link_config,
8741                                   bp->link_params.speed_cap_mask);
8742                         return;
8743                 }
8744                 break;
8745
8746         default:
8747                 BNX2X_ERR("NVRAM config error. "
8748                           "BAD link speed link_config 0x%x\n",
8749                           bp->port.link_config);
8750                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8751                 bp->port.advertising = bp->port.supported;
8752                 break;
8753         }
8754
8755         bp->link_params.req_flow_ctrl = (bp->port.link_config &
8756                                          PORT_FEATURE_FLOW_CONTROL_MASK);
8757         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8758             !(bp->port.supported & SUPPORTED_Autoneg))
8759                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8760
8761         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
8762                        "  advertising 0x%x\n",
8763                        bp->link_params.req_line_speed,
8764                        bp->link_params.req_duplex,
8765                        bp->link_params.req_flow_ctrl, bp->port.advertising);
8766 }
8767
8768 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8769 {
8770         mac_hi = cpu_to_be16(mac_hi);
8771         mac_lo = cpu_to_be32(mac_lo);
8772         memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8773         memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8774 }
8775
8776 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8777 {
8778         int port = BP_PORT(bp);
8779         u32 val, val2;
8780         u32 config;
8781         u16 i;
8782         u32 ext_phy_type;
8783
8784         bp->link_params.bp = bp;
8785         bp->link_params.port = port;
8786
8787         bp->link_params.lane_config =
8788                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8789         bp->link_params.ext_phy_config =
8790                 SHMEM_RD(bp,
8791                          dev_info.port_hw_config[port].external_phy_config);
8792         /* BCM8727_NOC => BCM8727 no over current */
8793         if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8794             PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8795                 bp->link_params.ext_phy_config &=
8796                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8797                 bp->link_params.ext_phy_config |=
8798                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8799                 bp->link_params.feature_config_flags |=
8800                         FEATURE_CONFIG_BCM8727_NOC;
8801         }
8802
8803         bp->link_params.speed_cap_mask =
8804                 SHMEM_RD(bp,
8805                          dev_info.port_hw_config[port].speed_capability_mask);
8806
8807         bp->port.link_config =
8808                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8809
8810         /* Get the 4 lanes xgxs config rx and tx */
8811         for (i = 0; i < 2; i++) {
8812                 val = SHMEM_RD(bp,
8813                            dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8814                 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8815                 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8816
8817                 val = SHMEM_RD(bp,
8818                            dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8819                 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8820                 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8821         }
8822
8823         /* If the device is capable of WoL, set the default state according
8824          * to the HW
8825          */
8826         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8827         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8828                    (config & PORT_FEATURE_WOL_ENABLED));
8829
8830         BNX2X_DEV_INFO("lane_config 0x%08x  ext_phy_config 0x%08x"
8831                        "  speed_cap_mask 0x%08x  link_config 0x%08x\n",
8832                        bp->link_params.lane_config,
8833                        bp->link_params.ext_phy_config,
8834                        bp->link_params.speed_cap_mask, bp->port.link_config);
8835
8836         bp->link_params.switch_cfg |= (bp->port.link_config &
8837                                        PORT_FEATURE_CONNECTED_SWITCH_MASK);
8838         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8839
8840         bnx2x_link_settings_requested(bp);
8841
8842         /*
8843          * If connected directly, work with the internal PHY, otherwise, work
8844          * with the external PHY
8845          */
8846         ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8847         if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8848                 bp->mdio.prtad = bp->link_params.phy_addr;
8849
8850         else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8851                  (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8852                 bp->mdio.prtad =
8853                         XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
8854
8855         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8856         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8857         bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8858         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8859         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8860
8861 #ifdef BCM_CNIC
8862         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8863         val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8864         bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8865 #endif
8866 }
8867
8868 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8869 {
8870         int func = BP_FUNC(bp);
8871         u32 val, val2;
8872         int rc = 0;
8873
8874         bnx2x_get_common_hwinfo(bp);
8875
8876         bp->e1hov = 0;
8877         bp->e1hmf = 0;
8878         if (CHIP_IS_E1H(bp)) {
8879                 bp->mf_config =
8880                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8881
8882                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
8883                        FUNC_MF_CFG_E1HOV_TAG_MASK);
8884                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
8885                         bp->e1hmf = 1;
8886                 BNX2X_DEV_INFO("%s function mode\n",
8887                                IS_E1HMF(bp) ? "multi" : "single");
8888
8889                 if (IS_E1HMF(bp)) {
8890                         val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8891                                                                 e1hov_tag) &
8892                                FUNC_MF_CFG_E1HOV_TAG_MASK);
8893                         if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8894                                 bp->e1hov = val;
8895                                 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8896                                                "(0x%04x)\n",
8897                                                func, bp->e1hov, bp->e1hov);
8898                         } else {
8899                                 BNX2X_ERR("!!!  No valid E1HOV for func %d,"
8900                                           "  aborting\n", func);
8901                                 rc = -EPERM;
8902                         }
8903                 } else {
8904                         if (BP_E1HVN(bp)) {
8905                                 BNX2X_ERR("!!!  VN %d in single function mode,"
8906                                           "  aborting\n", BP_E1HVN(bp));
8907                                 rc = -EPERM;
8908                         }
8909                 }
8910         }
8911
8912         if (!BP_NOMCP(bp)) {
8913                 bnx2x_get_port_hwinfo(bp);
8914
8915                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8916                               DRV_MSG_SEQ_NUMBER_MASK);
8917                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8918         }
8919
8920         if (IS_E1HMF(bp)) {
8921                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8922                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
8923                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8924                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8925                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8926                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8927                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8928                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8929                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
8930                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
8931                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8932                                ETH_ALEN);
8933                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8934                                ETH_ALEN);
8935                 }
8936
8937                 return rc;
8938         }
8939
8940         if (BP_NOMCP(bp)) {
8941                 /* only supposed to happen on emulation/FPGA */
8942                 BNX2X_ERR("warning random MAC workaround active\n");
8943                 random_ether_addr(bp->dev->dev_addr);
8944                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8945         }
8946
8947         return rc;
8948 }
8949
8950 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8951 {
8952         int func = BP_FUNC(bp);
8953         int timer_interval;
8954         int rc;
8955
8956         /* Disable interrupt handling until HW is initialized */
8957         atomic_set(&bp->intr_sem, 1);
8958         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8959
8960         mutex_init(&bp->port.phy_mutex);
8961         mutex_init(&bp->fw_mb_mutex);
8962 #ifdef BCM_CNIC
8963         mutex_init(&bp->cnic_mutex);
8964 #endif
8965
8966         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8967         INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8968
8969         rc = bnx2x_get_hwinfo(bp);
8970
8971         /* need to reset chip if undi was active */
8972         if (!BP_NOMCP(bp))
8973                 bnx2x_undi_unload(bp);
8974
8975         if (CHIP_REV_IS_FPGA(bp))
8976                 printk(KERN_ERR PFX "FPGA detected\n");
8977
8978         if (BP_NOMCP(bp) && (func == 0))
8979                 printk(KERN_ERR PFX
8980                        "MCP disabled, must load devices in order!\n");
8981
8982         /* Set multi queue mode */
8983         if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8984             ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8985                 printk(KERN_ERR PFX
8986                       "Multi disabled since int_mode requested is not MSI-X\n");
8987                 multi_mode = ETH_RSS_MODE_DISABLED;
8988         }
8989         bp->multi_mode = multi_mode;
8990
8991
8992         /* Set TPA flags */
8993         if (disable_tpa) {
8994                 bp->flags &= ~TPA_ENABLE_FLAG;
8995                 bp->dev->features &= ~NETIF_F_LRO;
8996         } else {
8997                 bp->flags |= TPA_ENABLE_FLAG;
8998                 bp->dev->features |= NETIF_F_LRO;
8999         }
9000
9001         if (CHIP_IS_E1(bp))
9002                 bp->dropless_fc = 0;
9003         else
9004                 bp->dropless_fc = dropless_fc;
9005
9006         bp->mrrs = mrrs;
9007
9008         bp->tx_ring_size = MAX_TX_AVAIL;
9009         bp->rx_ring_size = MAX_RX_AVAIL;
9010
9011         bp->rx_csum = 1;
9012
9013         bp->tx_ticks = 50;
9014         bp->rx_ticks = 25;
9015
9016         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
9017         bp->current_interval = (poll ? poll : timer_interval);
9018
9019         init_timer(&bp->timer);
9020         bp->timer.expires = jiffies + bp->current_interval;
9021         bp->timer.data = (unsigned long) bp;
9022         bp->timer.function = bnx2x_timer;
9023
9024         return rc;
9025 }
9026
9027 /*
9028  * ethtool service functions
9029  */
9030
9031 /* All ethtool functions called with rtnl_lock */
9032
9033 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9034 {
9035         struct bnx2x *bp = netdev_priv(dev);
9036
9037         cmd->supported = bp->port.supported;
9038         cmd->advertising = bp->port.advertising;
9039
9040         if ((bp->state == BNX2X_STATE_OPEN) &&
9041             !(bp->flags & MF_FUNC_DIS) &&
9042             (bp->link_vars.link_up)) {
9043                 cmd->speed = bp->link_vars.line_speed;
9044                 cmd->duplex = bp->link_vars.duplex;
9045                 if (IS_E1HMF(bp)) {
9046                         u16 vn_max_rate;
9047
9048                         vn_max_rate =
9049                                 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
9050                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
9051                         if (vn_max_rate < cmd->speed)
9052                                 cmd->speed = vn_max_rate;
9053                 }
9054         } else {
9055                 cmd->speed = -1;
9056                 cmd->duplex = -1;
9057         }
9058
9059         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
9060                 u32 ext_phy_type =
9061                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9062
9063                 switch (ext_phy_type) {
9064                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
9065                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
9066                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
9067                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9068                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9069                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
9070                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
9071                         cmd->port = PORT_FIBRE;
9072                         break;
9073
9074                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
9075                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
9076                         cmd->port = PORT_TP;
9077                         break;
9078
9079                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9080                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9081                                   bp->link_params.ext_phy_config);
9082                         break;
9083
9084                 default:
9085                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
9086                            bp->link_params.ext_phy_config);
9087                         break;
9088                 }
9089         } else
9090                 cmd->port = PORT_TP;
9091
9092         cmd->phy_address = bp->mdio.prtad;
9093         cmd->transceiver = XCVR_INTERNAL;
9094
9095         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9096                 cmd->autoneg = AUTONEG_ENABLE;
9097         else
9098                 cmd->autoneg = AUTONEG_DISABLE;
9099
9100         cmd->maxtxpkt = 0;
9101         cmd->maxrxpkt = 0;
9102
9103         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9104            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
9105            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
9106            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
9107            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9108            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9109            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9110
9111         return 0;
9112 }
9113
9114 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9115 {
9116         struct bnx2x *bp = netdev_priv(dev);
9117         u32 advertising;
9118
9119         if (IS_E1HMF(bp))
9120                 return 0;
9121
9122         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9123            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
9124            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
9125            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
9126            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9127            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9128            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9129
9130         if (cmd->autoneg == AUTONEG_ENABLE) {
9131                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9132                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
9133                         return -EINVAL;
9134                 }
9135
9136                 /* advertise the requested speed and duplex if supported */
9137                 cmd->advertising &= bp->port.supported;
9138
9139                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9140                 bp->link_params.req_duplex = DUPLEX_FULL;
9141                 bp->port.advertising |= (ADVERTISED_Autoneg |
9142                                          cmd->advertising);
9143
9144         } else { /* forced speed */
9145                 /* advertise the requested speed and duplex if supported */
9146                 switch (cmd->speed) {
9147                 case SPEED_10:
9148                         if (cmd->duplex == DUPLEX_FULL) {
9149                                 if (!(bp->port.supported &
9150                                       SUPPORTED_10baseT_Full)) {
9151                                         DP(NETIF_MSG_LINK,
9152                                            "10M full not supported\n");
9153                                         return -EINVAL;
9154                                 }
9155
9156                                 advertising = (ADVERTISED_10baseT_Full |
9157                                                ADVERTISED_TP);
9158                         } else {
9159                                 if (!(bp->port.supported &
9160                                       SUPPORTED_10baseT_Half)) {
9161                                         DP(NETIF_MSG_LINK,
9162                                            "10M half not supported\n");
9163                                         return -EINVAL;
9164                                 }
9165
9166                                 advertising = (ADVERTISED_10baseT_Half |
9167                                                ADVERTISED_TP);
9168                         }
9169                         break;
9170
9171                 case SPEED_100:
9172                         if (cmd->duplex == DUPLEX_FULL) {
9173                                 if (!(bp->port.supported &
9174                                                 SUPPORTED_100baseT_Full)) {
9175                                         DP(NETIF_MSG_LINK,
9176                                            "100M full not supported\n");
9177                                         return -EINVAL;
9178                                 }
9179
9180                                 advertising = (ADVERTISED_100baseT_Full |
9181                                                ADVERTISED_TP);
9182                         } else {
9183                                 if (!(bp->port.supported &
9184                                                 SUPPORTED_100baseT_Half)) {
9185                                         DP(NETIF_MSG_LINK,
9186                                            "100M half not supported\n");
9187                                         return -EINVAL;
9188                                 }
9189
9190                                 advertising = (ADVERTISED_100baseT_Half |
9191                                                ADVERTISED_TP);
9192                         }
9193                         break;
9194
9195                 case SPEED_1000:
9196                         if (cmd->duplex != DUPLEX_FULL) {
9197                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
9198                                 return -EINVAL;
9199                         }
9200
9201                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
9202                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
9203                                 return -EINVAL;
9204                         }
9205
9206                         advertising = (ADVERTISED_1000baseT_Full |
9207                                        ADVERTISED_TP);
9208                         break;
9209
9210                 case SPEED_2500:
9211                         if (cmd->duplex != DUPLEX_FULL) {
9212                                 DP(NETIF_MSG_LINK,
9213                                    "2.5G half not supported\n");
9214                                 return -EINVAL;
9215                         }
9216
9217                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
9218                                 DP(NETIF_MSG_LINK,
9219                                    "2.5G full not supported\n");
9220                                 return -EINVAL;
9221                         }
9222
9223                         advertising = (ADVERTISED_2500baseX_Full |
9224                                        ADVERTISED_TP);
9225                         break;
9226
9227                 case SPEED_10000:
9228                         if (cmd->duplex != DUPLEX_FULL) {
9229                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
9230                                 return -EINVAL;
9231                         }
9232
9233                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
9234                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
9235                                 return -EINVAL;
9236                         }
9237
9238                         advertising = (ADVERTISED_10000baseT_Full |
9239                                        ADVERTISED_FIBRE);
9240                         break;
9241
9242                 default:
9243                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
9244                         return -EINVAL;
9245                 }
9246
9247                 bp->link_params.req_line_speed = cmd->speed;
9248                 bp->link_params.req_duplex = cmd->duplex;
9249                 bp->port.advertising = advertising;
9250         }
9251
9252         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
9253            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
9254            bp->link_params.req_line_speed, bp->link_params.req_duplex,
9255            bp->port.advertising);
9256
9257         if (netif_running(dev)) {
9258                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9259                 bnx2x_link_set(bp);
9260         }
9261
9262         return 0;
9263 }
9264
9265 #define IS_E1_ONLINE(info)      (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
9266 #define IS_E1H_ONLINE(info)     (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
9267
9268 static int bnx2x_get_regs_len(struct net_device *dev)
9269 {
9270         struct bnx2x *bp = netdev_priv(dev);
9271         int regdump_len = 0;
9272         int i;
9273
9274         if (CHIP_IS_E1(bp)) {
9275                 for (i = 0; i < REGS_COUNT; i++)
9276                         if (IS_E1_ONLINE(reg_addrs[i].info))
9277                                 regdump_len += reg_addrs[i].size;
9278
9279                 for (i = 0; i < WREGS_COUNT_E1; i++)
9280                         if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
9281                                 regdump_len += wreg_addrs_e1[i].size *
9282                                         (1 + wreg_addrs_e1[i].read_regs_count);
9283
9284         } else { /* E1H */
9285                 for (i = 0; i < REGS_COUNT; i++)
9286                         if (IS_E1H_ONLINE(reg_addrs[i].info))
9287                                 regdump_len += reg_addrs[i].size;
9288
9289                 for (i = 0; i < WREGS_COUNT_E1H; i++)
9290                         if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
9291                                 regdump_len += wreg_addrs_e1h[i].size *
9292                                         (1 + wreg_addrs_e1h[i].read_regs_count);
9293         }
9294         regdump_len *= 4;
9295         regdump_len += sizeof(struct dump_hdr);
9296
9297         return regdump_len;
9298 }
9299
9300 static void bnx2x_get_regs(struct net_device *dev,
9301                            struct ethtool_regs *regs, void *_p)
9302 {
9303         u32 *p = _p, i, j;
9304         struct bnx2x *bp = netdev_priv(dev);
9305         struct dump_hdr dump_hdr = {0};
9306
9307         regs->version = 0;
9308         memset(p, 0, regs->len);
9309
9310         if (!netif_running(bp->dev))
9311                 return;
9312
9313         dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9314         dump_hdr.dump_sign = dump_sign_all;
9315         dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9316         dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9317         dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9318         dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9319         dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9320
9321         memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9322         p += dump_hdr.hdr_size + 1;
9323
9324         if (CHIP_IS_E1(bp)) {
9325                 for (i = 0; i < REGS_COUNT; i++)
9326                         if (IS_E1_ONLINE(reg_addrs[i].info))
9327                                 for (j = 0; j < reg_addrs[i].size; j++)
9328                                         *p++ = REG_RD(bp,
9329                                                       reg_addrs[i].addr + j*4);
9330
9331         } else { /* E1H */
9332                 for (i = 0; i < REGS_COUNT; i++)
9333                         if (IS_E1H_ONLINE(reg_addrs[i].info))
9334                                 for (j = 0; j < reg_addrs[i].size; j++)
9335                                         *p++ = REG_RD(bp,
9336                                                       reg_addrs[i].addr + j*4);
9337         }
9338 }
9339
9340 #define PHY_FW_VER_LEN                  10
9341
9342 static void bnx2x_get_drvinfo(struct net_device *dev,
9343                               struct ethtool_drvinfo *info)
9344 {
9345         struct bnx2x *bp = netdev_priv(dev);
9346         u8 phy_fw_ver[PHY_FW_VER_LEN];
9347
9348         strcpy(info->driver, DRV_MODULE_NAME);
9349         strcpy(info->version, DRV_MODULE_VERSION);
9350
9351         phy_fw_ver[0] = '\0';
9352         if (bp->port.pmf) {
9353                 bnx2x_acquire_phy_lock(bp);
9354                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9355                                              (bp->state != BNX2X_STATE_CLOSED),
9356                                              phy_fw_ver, PHY_FW_VER_LEN);
9357                 bnx2x_release_phy_lock(bp);
9358         }
9359
9360         snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9361                  (bp->common.bc_ver & 0xff0000) >> 16,
9362                  (bp->common.bc_ver & 0xff00) >> 8,
9363                  (bp->common.bc_ver & 0xff),
9364                  ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9365         strcpy(info->bus_info, pci_name(bp->pdev));
9366         info->n_stats = BNX2X_NUM_STATS;
9367         info->testinfo_len = BNX2X_NUM_TESTS;
9368         info->eedump_len = bp->common.flash_size;
9369         info->regdump_len = bnx2x_get_regs_len(dev);
9370 }
9371
9372 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9373 {
9374         struct bnx2x *bp = netdev_priv(dev);
9375
9376         if (bp->flags & NO_WOL_FLAG) {
9377                 wol->supported = 0;
9378                 wol->wolopts = 0;
9379         } else {
9380                 wol->supported = WAKE_MAGIC;
9381                 if (bp->wol)
9382                         wol->wolopts = WAKE_MAGIC;
9383                 else
9384                         wol->wolopts = 0;
9385         }
9386         memset(&wol->sopass, 0, sizeof(wol->sopass));
9387 }
9388
9389 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9390 {
9391         struct bnx2x *bp = netdev_priv(dev);
9392
9393         if (wol->wolopts & ~WAKE_MAGIC)
9394                 return -EINVAL;
9395
9396         if (wol->wolopts & WAKE_MAGIC) {
9397                 if (bp->flags & NO_WOL_FLAG)
9398                         return -EINVAL;
9399
9400                 bp->wol = 1;
9401         } else
9402                 bp->wol = 0;
9403
9404         return 0;
9405 }
9406
9407 static u32 bnx2x_get_msglevel(struct net_device *dev)
9408 {
9409         struct bnx2x *bp = netdev_priv(dev);
9410
9411         return bp->msglevel;
9412 }
9413
9414 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9415 {
9416         struct bnx2x *bp = netdev_priv(dev);
9417
9418         if (capable(CAP_NET_ADMIN))
9419                 bp->msglevel = level;
9420 }
9421
9422 static int bnx2x_nway_reset(struct net_device *dev)
9423 {
9424         struct bnx2x *bp = netdev_priv(dev);
9425
9426         if (!bp->port.pmf)
9427                 return 0;
9428
9429         if (netif_running(dev)) {
9430                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9431                 bnx2x_link_set(bp);
9432         }
9433
9434         return 0;
9435 }
9436
9437 static u32 bnx2x_get_link(struct net_device *dev)
9438 {
9439         struct bnx2x *bp = netdev_priv(dev);
9440
9441         if (bp->flags & MF_FUNC_DIS)
9442                 return 0;
9443
9444         return bp->link_vars.link_up;
9445 }
9446
9447 static int bnx2x_get_eeprom_len(struct net_device *dev)
9448 {
9449         struct bnx2x *bp = netdev_priv(dev);
9450
9451         return bp->common.flash_size;
9452 }
9453
9454 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9455 {
9456         int port = BP_PORT(bp);
9457         int count, i;
9458         u32 val = 0;
9459
9460         /* adjust timeout for emulation/FPGA */
9461         count = NVRAM_TIMEOUT_COUNT;
9462         if (CHIP_REV_IS_SLOW(bp))
9463                 count *= 100;
9464
9465         /* request access to nvram interface */
9466         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9467                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9468
9469         for (i = 0; i < count*10; i++) {
9470                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9471                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9472                         break;
9473
9474                 udelay(5);
9475         }
9476
9477         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
9478                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
9479                 return -EBUSY;
9480         }
9481
9482         return 0;
9483 }
9484
9485 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9486 {
9487         int port = BP_PORT(bp);
9488         int count, i;
9489         u32 val = 0;
9490
9491         /* adjust timeout for emulation/FPGA */
9492         count = NVRAM_TIMEOUT_COUNT;
9493         if (CHIP_REV_IS_SLOW(bp))
9494                 count *= 100;
9495
9496         /* relinquish nvram interface */
9497         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9498                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9499
9500         for (i = 0; i < count*10; i++) {
9501                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9502                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9503                         break;
9504
9505                 udelay(5);
9506         }
9507
9508         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
9509                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
9510                 return -EBUSY;
9511         }
9512
9513         return 0;
9514 }
9515
9516 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9517 {
9518         u32 val;
9519
9520         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9521
9522         /* enable both bits, even on read */
9523         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9524                (val | MCPR_NVM_ACCESS_ENABLE_EN |
9525                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
9526 }
9527
9528 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9529 {
9530         u32 val;
9531
9532         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9533
9534         /* disable both bits, even after read */
9535         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9536                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9537                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9538 }
9539
9540 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
9541                                   u32 cmd_flags)
9542 {
9543         int count, i, rc;
9544         u32 val;
9545
9546         /* build the command word */
9547         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9548
9549         /* need to clear DONE bit separately */
9550         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9551
9552         /* address of the NVRAM to read from */
9553         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9554                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9555
9556         /* issue a read command */
9557         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9558
9559         /* adjust timeout for emulation/FPGA */
9560         count = NVRAM_TIMEOUT_COUNT;
9561         if (CHIP_REV_IS_SLOW(bp))
9562                 count *= 100;
9563
9564         /* wait for completion */
9565         *ret_val = 0;
9566         rc = -EBUSY;
9567         for (i = 0; i < count; i++) {
9568                 udelay(5);
9569                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9570
9571                 if (val & MCPR_NVM_COMMAND_DONE) {
9572                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
9573                         /* we read nvram data in cpu order
9574                          * but ethtool sees it as an array of bytes
9575                          * converting to big-endian will do the work */
9576                         *ret_val = cpu_to_be32(val);
9577                         rc = 0;
9578                         break;
9579                 }
9580         }
9581
9582         return rc;
9583 }
9584
9585 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9586                             int buf_size)
9587 {
9588         int rc;
9589         u32 cmd_flags;
9590         __be32 val;
9591
9592         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9593                 DP(BNX2X_MSG_NVM,
9594                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
9595                    offset, buf_size);
9596                 return -EINVAL;
9597         }
9598
9599         if (offset + buf_size > bp->common.flash_size) {
9600                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9601                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9602                    offset, buf_size, bp->common.flash_size);
9603                 return -EINVAL;
9604         }
9605
9606         /* request access to nvram interface */
9607         rc = bnx2x_acquire_nvram_lock(bp);
9608         if (rc)
9609                 return rc;
9610
9611         /* enable access to nvram interface */
9612         bnx2x_enable_nvram_access(bp);
9613
9614         /* read the first word(s) */
9615         cmd_flags = MCPR_NVM_COMMAND_FIRST;
9616         while ((buf_size > sizeof(u32)) && (rc == 0)) {
9617                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9618                 memcpy(ret_buf, &val, 4);
9619
9620                 /* advance to the next dword */
9621                 offset += sizeof(u32);
9622                 ret_buf += sizeof(u32);
9623                 buf_size -= sizeof(u32);
9624                 cmd_flags = 0;
9625         }
9626
9627         if (rc == 0) {
9628                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9629                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9630                 memcpy(ret_buf, &val, 4);
9631         }
9632
9633         /* disable access to nvram interface */
9634         bnx2x_disable_nvram_access(bp);
9635         bnx2x_release_nvram_lock(bp);
9636
9637         return rc;
9638 }
9639
9640 static int bnx2x_get_eeprom(struct net_device *dev,
9641                             struct ethtool_eeprom *eeprom, u8 *eebuf)
9642 {
9643         struct bnx2x *bp = netdev_priv(dev);
9644         int rc;
9645
9646         if (!netif_running(dev))
9647                 return -EAGAIN;
9648
9649         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9650            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
9651            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9652            eeprom->len, eeprom->len);
9653
9654         /* parameters already validated in ethtool_get_eeprom */
9655
9656         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9657
9658         return rc;
9659 }
9660
9661 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9662                                    u32 cmd_flags)
9663 {
9664         int count, i, rc;
9665
9666         /* build the command word */
9667         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9668
9669         /* need to clear DONE bit separately */
9670         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9671
9672         /* write the data */
9673         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9674
9675         /* address of the NVRAM to write to */
9676         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9677                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9678
9679         /* issue the write command */
9680         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9681
9682         /* adjust timeout for emulation/FPGA */
9683         count = NVRAM_TIMEOUT_COUNT;
9684         if (CHIP_REV_IS_SLOW(bp))
9685                 count *= 100;
9686
9687         /* wait for completion */
9688         rc = -EBUSY;
9689         for (i = 0; i < count; i++) {
9690                 udelay(5);
9691                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9692                 if (val & MCPR_NVM_COMMAND_DONE) {
9693                         rc = 0;
9694                         break;
9695                 }
9696         }
9697
9698         return rc;
9699 }
9700
9701 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
9702
9703 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9704                               int buf_size)
9705 {
9706         int rc;
9707         u32 cmd_flags;
9708         u32 align_offset;
9709         __be32 val;
9710
9711         if (offset + buf_size > bp->common.flash_size) {
9712                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9713                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9714                    offset, buf_size, bp->common.flash_size);
9715                 return -EINVAL;
9716         }
9717
9718         /* request access to nvram interface */
9719         rc = bnx2x_acquire_nvram_lock(bp);
9720         if (rc)
9721                 return rc;
9722
9723         /* enable access to nvram interface */
9724         bnx2x_enable_nvram_access(bp);
9725
9726         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9727         align_offset = (offset & ~0x03);
9728         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9729
9730         if (rc == 0) {
9731                 val &= ~(0xff << BYTE_OFFSET(offset));
9732                 val |= (*data_buf << BYTE_OFFSET(offset));
9733
9734                 /* nvram data is returned as an array of bytes
9735                  * convert it back to cpu order */
9736                 val = be32_to_cpu(val);
9737
9738                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9739                                              cmd_flags);
9740         }
9741
9742         /* disable access to nvram interface */
9743         bnx2x_disable_nvram_access(bp);
9744         bnx2x_release_nvram_lock(bp);
9745
9746         return rc;
9747 }
9748
9749 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9750                              int buf_size)
9751 {
9752         int rc;
9753         u32 cmd_flags;
9754         u32 val;
9755         u32 written_so_far;
9756
9757         if (buf_size == 1)      /* ethtool */
9758                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
9759
9760         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9761                 DP(BNX2X_MSG_NVM,
9762                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
9763                    offset, buf_size);
9764                 return -EINVAL;
9765         }
9766
9767         if (offset + buf_size > bp->common.flash_size) {
9768                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9769                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9770                    offset, buf_size, bp->common.flash_size);
9771                 return -EINVAL;
9772         }
9773
9774         /* request access to nvram interface */
9775         rc = bnx2x_acquire_nvram_lock(bp);
9776         if (rc)
9777                 return rc;
9778
9779         /* enable access to nvram interface */
9780         bnx2x_enable_nvram_access(bp);
9781
9782         written_so_far = 0;
9783         cmd_flags = MCPR_NVM_COMMAND_FIRST;
9784         while ((written_so_far < buf_size) && (rc == 0)) {
9785                 if (written_so_far == (buf_size - sizeof(u32)))
9786                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
9787                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9788                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
9789                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9790                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9791
9792                 memcpy(&val, data_buf, 4);
9793
9794                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9795
9796                 /* advance to the next dword */
9797                 offset += sizeof(u32);
9798                 data_buf += sizeof(u32);
9799                 written_so_far += sizeof(u32);
9800                 cmd_flags = 0;
9801         }
9802
9803         /* disable access to nvram interface */
9804         bnx2x_disable_nvram_access(bp);
9805         bnx2x_release_nvram_lock(bp);
9806
9807         return rc;
9808 }
9809
9810 static int bnx2x_set_eeprom(struct net_device *dev,
9811                             struct ethtool_eeprom *eeprom, u8 *eebuf)
9812 {
9813         struct bnx2x *bp = netdev_priv(dev);
9814         int port = BP_PORT(bp);
9815         int rc = 0;
9816
9817         if (!netif_running(dev))
9818                 return -EAGAIN;
9819
9820         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9821            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
9822            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9823            eeprom->len, eeprom->len);
9824
9825         /* parameters already validated in ethtool_set_eeprom */
9826
9827         /* PHY eeprom can be accessed only by the PMF */
9828         if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9829             !bp->port.pmf)
9830                 return -EINVAL;
9831
9832         if (eeprom->magic == 0x50485950) {
9833                 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9834                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9835
9836                 bnx2x_acquire_phy_lock(bp);
9837                 rc |= bnx2x_link_reset(&bp->link_params,
9838                                        &bp->link_vars, 0);
9839                 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9840                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9841                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9842                                        MISC_REGISTERS_GPIO_HIGH, port);
9843                 bnx2x_release_phy_lock(bp);
9844                 bnx2x_link_report(bp);
9845
9846         } else if (eeprom->magic == 0x50485952) {
9847                 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
9848                 if (bp->state == BNX2X_STATE_OPEN) {
9849                         bnx2x_acquire_phy_lock(bp);
9850                         rc |= bnx2x_link_reset(&bp->link_params,
9851                                                &bp->link_vars, 1);
9852
9853                         rc |= bnx2x_phy_init(&bp->link_params,
9854                                              &bp->link_vars);
9855                         bnx2x_release_phy_lock(bp);
9856                         bnx2x_calc_fc_adv(bp);
9857                 }
9858         } else if (eeprom->magic == 0x53985943) {
9859                 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9860                 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9861                                        PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9862                         u8 ext_phy_addr =
9863                              XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
9864
9865                         /* DSP Remove Download Mode */
9866                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9867                                        MISC_REGISTERS_GPIO_LOW, port);
9868
9869                         bnx2x_acquire_phy_lock(bp);
9870
9871                         bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9872
9873                         /* wait 0.5 sec to allow it to run */
9874                         msleep(500);
9875                         bnx2x_ext_phy_hw_reset(bp, port);
9876                         msleep(500);
9877                         bnx2x_release_phy_lock(bp);
9878                 }
9879         } else
9880                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
9881
9882         return rc;
9883 }
9884
9885 static int bnx2x_get_coalesce(struct net_device *dev,
9886                               struct ethtool_coalesce *coal)
9887 {
9888         struct bnx2x *bp = netdev_priv(dev);
9889
9890         memset(coal, 0, sizeof(struct ethtool_coalesce));
9891
9892         coal->rx_coalesce_usecs = bp->rx_ticks;
9893         coal->tx_coalesce_usecs = bp->tx_ticks;
9894
9895         return 0;
9896 }
9897
9898 #define BNX2X_MAX_COALES_TOUT  (0xf0*12) /* Maximal coalescing timeout in us */
9899 static int bnx2x_set_coalesce(struct net_device *dev,
9900                               struct ethtool_coalesce *coal)
9901 {
9902         struct bnx2x *bp = netdev_priv(dev);
9903
9904         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
9905         if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9906                 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
9907
9908         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
9909         if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9910                 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
9911
9912         if (netif_running(dev))
9913                 bnx2x_update_coalesce(bp);
9914
9915         return 0;
9916 }
9917
9918 static void bnx2x_get_ringparam(struct net_device *dev,
9919                                 struct ethtool_ringparam *ering)
9920 {
9921         struct bnx2x *bp = netdev_priv(dev);
9922
9923         ering->rx_max_pending = MAX_RX_AVAIL;
9924         ering->rx_mini_max_pending = 0;
9925         ering->rx_jumbo_max_pending = 0;
9926
9927         ering->rx_pending = bp->rx_ring_size;
9928         ering->rx_mini_pending = 0;
9929         ering->rx_jumbo_pending = 0;
9930
9931         ering->tx_max_pending = MAX_TX_AVAIL;
9932         ering->tx_pending = bp->tx_ring_size;
9933 }
9934
9935 static int bnx2x_set_ringparam(struct net_device *dev,
9936                                struct ethtool_ringparam *ering)
9937 {
9938         struct bnx2x *bp = netdev_priv(dev);
9939         int rc = 0;
9940
9941         if ((ering->rx_pending > MAX_RX_AVAIL) ||
9942             (ering->tx_pending > MAX_TX_AVAIL) ||
9943             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9944                 return -EINVAL;
9945
9946         bp->rx_ring_size = ering->rx_pending;
9947         bp->tx_ring_size = ering->tx_pending;
9948
9949         if (netif_running(dev)) {
9950                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9951                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9952         }
9953
9954         return rc;
9955 }
9956
9957 static void bnx2x_get_pauseparam(struct net_device *dev,
9958                                  struct ethtool_pauseparam *epause)
9959 {
9960         struct bnx2x *bp = netdev_priv(dev);
9961
9962         epause->autoneg = (bp->link_params.req_flow_ctrl ==
9963                            BNX2X_FLOW_CTRL_AUTO) &&
9964                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9965
9966         epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9967                             BNX2X_FLOW_CTRL_RX);
9968         epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9969                             BNX2X_FLOW_CTRL_TX);
9970
9971         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9972            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9973            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9974 }
9975
9976 static int bnx2x_set_pauseparam(struct net_device *dev,
9977                                 struct ethtool_pauseparam *epause)
9978 {
9979         struct bnx2x *bp = netdev_priv(dev);
9980
9981         if (IS_E1HMF(bp))
9982                 return 0;
9983
9984         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9985            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9986            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9987
9988         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9989
9990         if (epause->rx_pause)
9991                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9992
9993         if (epause->tx_pause)
9994                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9995
9996         if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9997                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9998
9999         if (epause->autoneg) {
10000                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
10001                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
10002                         return -EINVAL;
10003                 }
10004
10005                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
10006                         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
10007         }
10008
10009         DP(NETIF_MSG_LINK,
10010            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
10011
10012         if (netif_running(dev)) {
10013                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10014                 bnx2x_link_set(bp);
10015         }
10016
10017         return 0;
10018 }
10019
10020 static int bnx2x_set_flags(struct net_device *dev, u32 data)
10021 {
10022         struct bnx2x *bp = netdev_priv(dev);
10023         int changed = 0;
10024         int rc = 0;
10025
10026         /* TPA requires Rx CSUM offloading */
10027         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
10028                 if (!(dev->features & NETIF_F_LRO)) {
10029                         dev->features |= NETIF_F_LRO;
10030                         bp->flags |= TPA_ENABLE_FLAG;
10031                         changed = 1;
10032                 }
10033
10034         } else if (dev->features & NETIF_F_LRO) {
10035                 dev->features &= ~NETIF_F_LRO;
10036                 bp->flags &= ~TPA_ENABLE_FLAG;
10037                 changed = 1;
10038         }
10039
10040         if (changed && netif_running(dev)) {
10041                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10042                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10043         }
10044
10045         return rc;
10046 }
10047
10048 static u32 bnx2x_get_rx_csum(struct net_device *dev)
10049 {
10050         struct bnx2x *bp = netdev_priv(dev);
10051
10052         return bp->rx_csum;
10053 }
10054
10055 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
10056 {
10057         struct bnx2x *bp = netdev_priv(dev);
10058         int rc = 0;
10059
10060         bp->rx_csum = data;
10061
10062         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
10063            TPA'ed packets will be discarded due to wrong TCP CSUM */
10064         if (!data) {
10065                 u32 flags = ethtool_op_get_flags(dev);
10066
10067                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
10068         }
10069
10070         return rc;
10071 }
10072
10073 static int bnx2x_set_tso(struct net_device *dev, u32 data)
10074 {
10075         if (data) {
10076                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10077                 dev->features |= NETIF_F_TSO6;
10078         } else {
10079                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
10080                 dev->features &= ~NETIF_F_TSO6;
10081         }
10082
10083         return 0;
10084 }
10085
10086 static const struct {
10087         char string[ETH_GSTRING_LEN];
10088 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
10089         { "register_test (offline)" },
10090         { "memory_test (offline)" },
10091         { "loopback_test (offline)" },
10092         { "nvram_test (online)" },
10093         { "interrupt_test (online)" },
10094         { "link_test (online)" },
10095         { "idle check (online)" }
10096 };
10097
10098 static int bnx2x_test_registers(struct bnx2x *bp)
10099 {
10100         int idx, i, rc = -ENODEV;
10101         u32 wr_val = 0;
10102         int port = BP_PORT(bp);
10103         static const struct {
10104                 u32  offset0;
10105                 u32  offset1;
10106                 u32  mask;
10107         } reg_tbl[] = {
10108 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
10109                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
10110                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
10111                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
10112                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
10113                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
10114                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
10115                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
10116                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
10117                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
10118 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
10119                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
10120                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
10121                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
10122                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
10123                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
10124                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
10125                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
10126                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
10127                 { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
10128 /* 20 */        { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
10129                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
10130                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
10131                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
10132                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
10133                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
10134                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
10135                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
10136                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
10137                 { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
10138 /* 30 */        { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
10139                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
10140                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
10141                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
10142                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
10143                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
10144                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
10145
10146                 { 0xffffffff, 0, 0x00000000 }
10147         };
10148
10149         if (!netif_running(bp->dev))
10150                 return rc;
10151
10152         /* Repeat the test twice:
10153            First by writing 0x00000000, second by writing 0xffffffff */
10154         for (idx = 0; idx < 2; idx++) {
10155
10156                 switch (idx) {
10157                 case 0:
10158                         wr_val = 0;
10159                         break;
10160                 case 1:
10161                         wr_val = 0xffffffff;
10162                         break;
10163                 }
10164
10165                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
10166                         u32 offset, mask, save_val, val;
10167
10168                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
10169                         mask = reg_tbl[i].mask;
10170
10171                         save_val = REG_RD(bp, offset);
10172
10173                         REG_WR(bp, offset, wr_val);
10174                         val = REG_RD(bp, offset);
10175
10176                         /* Restore the original register's value */
10177                         REG_WR(bp, offset, save_val);
10178
10179                         /* verify that value is as expected value */
10180                         if ((val & mask) != (wr_val & mask))
10181                                 goto test_reg_exit;
10182                 }
10183         }
10184
10185         rc = 0;
10186
10187 test_reg_exit:
10188         return rc;
10189 }
10190
10191 static int bnx2x_test_memory(struct bnx2x *bp)
10192 {
10193         int i, j, rc = -ENODEV;
10194         u32 val;
10195         static const struct {
10196                 u32 offset;
10197                 int size;
10198         } mem_tbl[] = {
10199                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
10200                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
10201                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
10202                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
10203                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
10204                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
10205                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
10206
10207                 { 0xffffffff, 0 }
10208         };
10209         static const struct {
10210                 char *name;
10211                 u32 offset;
10212                 u32 e1_mask;
10213                 u32 e1h_mask;
10214         } prty_tbl[] = {
10215                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
10216                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
10217                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
10218                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
10219                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
10220                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
10221
10222                 { NULL, 0xffffffff, 0, 0 }
10223         };
10224
10225         if (!netif_running(bp->dev))
10226                 return rc;
10227
10228         /* Go through all the memories */
10229         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
10230                 for (j = 0; j < mem_tbl[i].size; j++)
10231                         REG_RD(bp, mem_tbl[i].offset + j*4);
10232
10233         /* Check the parity status */
10234         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
10235                 val = REG_RD(bp, prty_tbl[i].offset);
10236                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
10237                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
10238                         DP(NETIF_MSG_HW,
10239                            "%s is 0x%x\n", prty_tbl[i].name, val);
10240                         goto test_mem_exit;
10241                 }
10242         }
10243
10244         rc = 0;
10245
10246 test_mem_exit:
10247         return rc;
10248 }
10249
10250 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
10251 {
10252         int cnt = 1000;
10253
10254         if (link_up)
10255                 while (bnx2x_link_test(bp) && cnt--)
10256                         msleep(10);
10257 }
10258
10259 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10260 {
10261         unsigned int pkt_size, num_pkts, i;
10262         struct sk_buff *skb;
10263         unsigned char *packet;
10264         struct bnx2x_fastpath *fp_rx = &bp->fp[0];
10265         struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues];
10266         u16 tx_start_idx, tx_idx;
10267         u16 rx_start_idx, rx_idx;
10268         u16 pkt_prod, bd_prod;
10269         struct sw_tx_bd *tx_buf;
10270         struct eth_tx_start_bd *tx_start_bd;
10271         struct eth_tx_parse_bd *pbd = NULL;
10272         dma_addr_t mapping;
10273         union eth_rx_cqe *cqe;
10274         u8 cqe_fp_flags;
10275         struct sw_rx_bd *rx_buf;
10276         u16 len;
10277         int rc = -ENODEV;
10278
10279         /* check the loopback mode */
10280         switch (loopback_mode) {
10281         case BNX2X_PHY_LOOPBACK:
10282                 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
10283                         return -EINVAL;
10284                 break;
10285         case BNX2X_MAC_LOOPBACK:
10286                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
10287                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
10288                 break;
10289         default:
10290                 return -EINVAL;
10291         }
10292
10293         /* prepare the loopback packet */
10294         pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
10295                      bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
10296         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
10297         if (!skb) {
10298                 rc = -ENOMEM;
10299                 goto test_loopback_exit;
10300         }
10301         packet = skb_put(skb, pkt_size);
10302         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
10303         memset(packet + ETH_ALEN, 0, ETH_ALEN);
10304         memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
10305         for (i = ETH_HLEN; i < pkt_size; i++)
10306                 packet[i] = (unsigned char) (i & 0xff);
10307
10308         /* send the loopback packet */
10309         num_pkts = 0;
10310         tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10311         rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10312
10313         pkt_prod = fp_tx->tx_pkt_prod++;
10314         tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10315         tx_buf->first_bd = fp_tx->tx_bd_prod;
10316         tx_buf->skb = skb;
10317         tx_buf->flags = 0;
10318
10319         bd_prod = TX_BD(fp_tx->tx_bd_prod);
10320         tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
10321         mapping = pci_map_single(bp->pdev, skb->data,
10322                                  skb_headlen(skb), PCI_DMA_TODEVICE);
10323         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10324         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10325         tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10326         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10327         tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10328         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10329         tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10330                                 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10331
10332         /* turn on parsing and get a BD */
10333         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10334         pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10335
10336         memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10337
10338         wmb();
10339
10340         fp_tx->tx_db.data.prod += 2;
10341         barrier();
10342         DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw);
10343
10344         mmiowb();
10345
10346         num_pkts++;
10347         fp_tx->tx_bd_prod += 2; /* start + pbd */
10348         bp->dev->trans_start = jiffies;
10349
10350         udelay(100);
10351
10352         tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10353         if (tx_idx != tx_start_idx + num_pkts)
10354                 goto test_loopback_exit;
10355
10356         rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10357         if (rx_idx != rx_start_idx + num_pkts)
10358                 goto test_loopback_exit;
10359
10360         cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
10361         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10362         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10363                 goto test_loopback_rx_exit;
10364
10365         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10366         if (len != pkt_size)
10367                 goto test_loopback_rx_exit;
10368
10369         rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
10370         skb = rx_buf->skb;
10371         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10372         for (i = ETH_HLEN; i < pkt_size; i++)
10373                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10374                         goto test_loopback_rx_exit;
10375
10376         rc = 0;
10377
10378 test_loopback_rx_exit:
10379
10380         fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10381         fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10382         fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10383         fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
10384
10385         /* Update producers */
10386         bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10387                              fp_rx->rx_sge_prod);
10388
10389 test_loopback_exit:
10390         bp->link_params.loopback_mode = LOOPBACK_NONE;
10391
10392         return rc;
10393 }
10394
10395 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10396 {
10397         int rc = 0, res;
10398
10399         if (!netif_running(bp->dev))
10400                 return BNX2X_LOOPBACK_FAILED;
10401
10402         bnx2x_netif_stop(bp, 1);
10403         bnx2x_acquire_phy_lock(bp);
10404
10405         res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10406         if (res) {
10407                 DP(NETIF_MSG_PROBE, "  PHY loopback failed  (res %d)\n", res);
10408                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
10409         }
10410
10411         res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10412         if (res) {
10413                 DP(NETIF_MSG_PROBE, "  MAC loopback failed  (res %d)\n", res);
10414                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
10415         }
10416
10417         bnx2x_release_phy_lock(bp);
10418         bnx2x_netif_start(bp);
10419
10420         return rc;
10421 }
10422
10423 #define CRC32_RESIDUAL                  0xdebb20e3
10424
10425 static int bnx2x_test_nvram(struct bnx2x *bp)
10426 {
10427         static const struct {
10428                 int offset;
10429                 int size;
10430         } nvram_tbl[] = {
10431                 {     0,  0x14 }, /* bootstrap */
10432                 {  0x14,  0xec }, /* dir */
10433                 { 0x100, 0x350 }, /* manuf_info */
10434                 { 0x450,  0xf0 }, /* feature_info */
10435                 { 0x640,  0x64 }, /* upgrade_key_info */
10436                 { 0x6a4,  0x64 },
10437                 { 0x708,  0x70 }, /* manuf_key_info */
10438                 { 0x778,  0x70 },
10439                 {     0,     0 }
10440         };
10441         __be32 buf[0x350 / 4];
10442         u8 *data = (u8 *)buf;
10443         int i, rc;
10444         u32 magic, crc;
10445
10446         rc = bnx2x_nvram_read(bp, 0, data, 4);
10447         if (rc) {
10448                 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
10449                 goto test_nvram_exit;
10450         }
10451
10452         magic = be32_to_cpu(buf[0]);
10453         if (magic != 0x669955aa) {
10454                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10455                 rc = -ENODEV;
10456                 goto test_nvram_exit;
10457         }
10458
10459         for (i = 0; nvram_tbl[i].size; i++) {
10460
10461                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10462                                       nvram_tbl[i].size);
10463                 if (rc) {
10464                         DP(NETIF_MSG_PROBE,
10465                            "nvram_tbl[%d] read data (rc %d)\n", i, rc);
10466                         goto test_nvram_exit;
10467                 }
10468
10469                 crc = ether_crc_le(nvram_tbl[i].size, data);
10470                 if (crc != CRC32_RESIDUAL) {
10471                         DP(NETIF_MSG_PROBE,
10472                            "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
10473                         rc = -ENODEV;
10474                         goto test_nvram_exit;
10475                 }
10476         }
10477
10478 test_nvram_exit:
10479         return rc;
10480 }
10481
10482 static int bnx2x_test_intr(struct bnx2x *bp)
10483 {
10484         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10485         int i, rc;
10486
10487         if (!netif_running(bp->dev))
10488                 return -ENODEV;
10489
10490         config->hdr.length = 0;
10491         if (CHIP_IS_E1(bp))
10492                 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10493         else
10494                 config->hdr.offset = BP_FUNC(bp);
10495         config->hdr.client_id = bp->fp->cl_id;
10496         config->hdr.reserved1 = 0;
10497
10498         bp->set_mac_pending++;
10499         smp_wmb();
10500         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10501                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10502                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10503         if (rc == 0) {
10504                 for (i = 0; i < 10; i++) {
10505                         if (!bp->set_mac_pending)
10506                                 break;
10507                         smp_rmb();
10508                         msleep_interruptible(10);
10509                 }
10510                 if (i == 10)
10511                         rc = -ENODEV;
10512         }
10513
10514         return rc;
10515 }
10516
10517 static void bnx2x_self_test(struct net_device *dev,
10518                             struct ethtool_test *etest, u64 *buf)
10519 {
10520         struct bnx2x *bp = netdev_priv(dev);
10521
10522         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10523
10524         if (!netif_running(dev))
10525                 return;
10526
10527         /* offline tests are not supported in MF mode */
10528         if (IS_E1HMF(bp))
10529                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10530
10531         if (etest->flags & ETH_TEST_FL_OFFLINE) {
10532                 int port = BP_PORT(bp);
10533                 u32 val;
10534                 u8 link_up;
10535
10536                 /* save current value of input enable for TX port IF */
10537                 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10538                 /* disable input for TX port IF */
10539                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10540
10541                 link_up = bp->link_vars.link_up;
10542                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10543                 bnx2x_nic_load(bp, LOAD_DIAG);
10544                 /* wait until link state is restored */
10545                 bnx2x_wait_for_link(bp, link_up);
10546
10547                 if (bnx2x_test_registers(bp) != 0) {
10548                         buf[0] = 1;
10549                         etest->flags |= ETH_TEST_FL_FAILED;
10550                 }
10551                 if (bnx2x_test_memory(bp) != 0) {
10552                         buf[1] = 1;
10553                         etest->flags |= ETH_TEST_FL_FAILED;
10554                 }
10555                 buf[2] = bnx2x_test_loopback(bp, link_up);
10556                 if (buf[2] != 0)
10557                         etest->flags |= ETH_TEST_FL_FAILED;
10558
10559                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10560
10561                 /* restore input for TX port IF */
10562                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10563
10564                 bnx2x_nic_load(bp, LOAD_NORMAL);
10565                 /* wait until link state is restored */
10566                 bnx2x_wait_for_link(bp, link_up);
10567         }
10568         if (bnx2x_test_nvram(bp) != 0) {
10569                 buf[3] = 1;
10570                 etest->flags |= ETH_TEST_FL_FAILED;
10571         }
10572         if (bnx2x_test_intr(bp) != 0) {
10573                 buf[4] = 1;
10574                 etest->flags |= ETH_TEST_FL_FAILED;
10575         }
10576         if (bp->port.pmf)
10577                 if (bnx2x_link_test(bp) != 0) {
10578                         buf[5] = 1;
10579                         etest->flags |= ETH_TEST_FL_FAILED;
10580                 }
10581
10582 #ifdef BNX2X_EXTRA_DEBUG
10583         bnx2x_panic_dump(bp);
10584 #endif
10585 }
10586
10587 static const struct {
10588         long offset;
10589         int size;
10590         u8 string[ETH_GSTRING_LEN];
10591 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10592 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10593         { Q_STATS_OFFSET32(error_bytes_received_hi),
10594                                                 8, "[%d]: rx_error_bytes" },
10595         { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10596                                                 8, "[%d]: rx_ucast_packets" },
10597         { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10598                                                 8, "[%d]: rx_mcast_packets" },
10599         { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10600                                                 8, "[%d]: rx_bcast_packets" },
10601         { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10602         { Q_STATS_OFFSET32(rx_err_discard_pkt),
10603                                          4, "[%d]: rx_phy_ip_err_discards"},
10604         { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10605                                          4, "[%d]: rx_skb_alloc_discard" },
10606         { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10607
10608 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10609         { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10610                                                         8, "[%d]: tx_packets" }
10611 };
10612
10613 static const struct {
10614         long offset;
10615         int size;
10616         u32 flags;
10617 #define STATS_FLAGS_PORT                1
10618 #define STATS_FLAGS_FUNC                2
10619 #define STATS_FLAGS_BOTH                (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
10620         u8 string[ETH_GSTRING_LEN];
10621 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
10622 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10623                                 8, STATS_FLAGS_BOTH, "rx_bytes" },
10624         { STATS_OFFSET32(error_bytes_received_hi),
10625                                 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
10626         { STATS_OFFSET32(total_unicast_packets_received_hi),
10627                                 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
10628         { STATS_OFFSET32(total_multicast_packets_received_hi),
10629                                 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
10630         { STATS_OFFSET32(total_broadcast_packets_received_hi),
10631                                 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
10632         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
10633                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
10634         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
10635                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
10636         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10637                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10638         { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10639                                 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10640 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10641                                 8, STATS_FLAGS_PORT, "rx_fragments" },
10642         { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10643                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
10644         { STATS_OFFSET32(no_buff_discard_hi),
10645                                 8, STATS_FLAGS_BOTH, "rx_discards" },
10646         { STATS_OFFSET32(mac_filter_discard),
10647                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10648         { STATS_OFFSET32(xxoverflow_discard),
10649                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10650         { STATS_OFFSET32(brb_drop_hi),
10651                                 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10652         { STATS_OFFSET32(brb_truncate_hi),
10653                                 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10654         { STATS_OFFSET32(pause_frames_received_hi),
10655                                 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10656         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10657                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10658         { STATS_OFFSET32(nig_timer_max),
10659                         4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10660 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10661                                 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10662         { STATS_OFFSET32(rx_skb_alloc_failed),
10663                                 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10664         { STATS_OFFSET32(hw_csum_err),
10665                                 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10666
10667         { STATS_OFFSET32(total_bytes_transmitted_hi),
10668                                 8, STATS_FLAGS_BOTH, "tx_bytes" },
10669         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10670                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10671         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10672                                 8, STATS_FLAGS_BOTH, "tx_packets" },
10673         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10674                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10675         { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10676                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
10677         { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
10678                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
10679         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
10680                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
10681 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
10682                                 8, STATS_FLAGS_PORT, "tx_deferred" },
10683         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
10684                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
10685         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
10686                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
10687         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
10688                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
10689         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
10690                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
10691         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
10692                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
10693         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
10694                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
10695         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
10696                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
10697         { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
10698                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
10699         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
10700                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
10701 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
10702                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
10703         { STATS_OFFSET32(pause_frames_sent_hi),
10704                                 8, STATS_FLAGS_PORT, "tx_pause_frames" }
10705 };
10706
10707 #define IS_PORT_STAT(i) \
10708         ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10709 #define IS_FUNC_STAT(i)         (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10710 #define IS_E1HMF_MODE_STAT(bp) \
10711                         (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
10712
10713 static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10714 {
10715         struct bnx2x *bp = netdev_priv(dev);
10716         int i, num_stats;
10717
10718         switch(stringset) {
10719         case ETH_SS_STATS:
10720                 if (is_multi(bp)) {
10721                         num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
10722                         if (!IS_E1HMF_MODE_STAT(bp))
10723                                 num_stats += BNX2X_NUM_STATS;
10724                 } else {
10725                         if (IS_E1HMF_MODE_STAT(bp)) {
10726                                 num_stats = 0;
10727                                 for (i = 0; i < BNX2X_NUM_STATS; i++)
10728                                         if (IS_FUNC_STAT(i))
10729                                                 num_stats++;
10730                         } else
10731                                 num_stats = BNX2X_NUM_STATS;
10732                 }
10733                 return num_stats;
10734
10735         case ETH_SS_TEST:
10736                 return BNX2X_NUM_TESTS;
10737
10738         default:
10739                 return -EINVAL;
10740         }
10741 }
10742
10743 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10744 {
10745         struct bnx2x *bp = netdev_priv(dev);
10746         int i, j, k;
10747
10748         switch (stringset) {
10749         case ETH_SS_STATS:
10750                 if (is_multi(bp)) {
10751                         k = 0;
10752                         for_each_rx_queue(bp, i) {
10753                                 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10754                                         sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10755                                                 bnx2x_q_stats_arr[j].string, i);
10756                                 k += BNX2X_NUM_Q_STATS;
10757                         }
10758                         if (IS_E1HMF_MODE_STAT(bp))
10759                                 break;
10760                         for (j = 0; j < BNX2X_NUM_STATS; j++)
10761                                 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10762                                        bnx2x_stats_arr[j].string);
10763                 } else {
10764                         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10765                                 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10766                                         continue;
10767                                 strcpy(buf + j*ETH_GSTRING_LEN,
10768                                        bnx2x_stats_arr[i].string);
10769                                 j++;
10770                         }
10771                 }
10772                 break;
10773
10774         case ETH_SS_TEST:
10775                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10776                 break;
10777         }
10778 }
10779
10780 static void bnx2x_get_ethtool_stats(struct net_device *dev,
10781                                     struct ethtool_stats *stats, u64 *buf)
10782 {
10783         struct bnx2x *bp = netdev_priv(dev);
10784         u32 *hw_stats, *offset;
10785         int i, j, k;
10786
10787         if (is_multi(bp)) {
10788                 k = 0;
10789                 for_each_rx_queue(bp, i) {
10790                         hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10791                         for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10792                                 if (bnx2x_q_stats_arr[j].size == 0) {
10793                                         /* skip this counter */
10794                                         buf[k + j] = 0;
10795                                         continue;
10796                                 }
10797                                 offset = (hw_stats +
10798                                           bnx2x_q_stats_arr[j].offset);
10799                                 if (bnx2x_q_stats_arr[j].size == 4) {
10800                                         /* 4-byte counter */
10801                                         buf[k + j] = (u64) *offset;
10802                                         continue;
10803                                 }
10804                                 /* 8-byte counter */
10805                                 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10806                         }
10807                         k += BNX2X_NUM_Q_STATS;
10808                 }
10809                 if (IS_E1HMF_MODE_STAT(bp))
10810                         return;
10811                 hw_stats = (u32 *)&bp->eth_stats;
10812                 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10813                         if (bnx2x_stats_arr[j].size == 0) {
10814                                 /* skip this counter */
10815                                 buf[k + j] = 0;
10816                                 continue;
10817                         }
10818                         offset = (hw_stats + bnx2x_stats_arr[j].offset);
10819                         if (bnx2x_stats_arr[j].size == 4) {
10820                                 /* 4-byte counter */
10821                                 buf[k + j] = (u64) *offset;
10822                                 continue;
10823                         }
10824                         /* 8-byte counter */
10825                         buf[k + j] = HILO_U64(*offset, *(offset + 1));
10826                 }
10827         } else {
10828                 hw_stats = (u32 *)&bp->eth_stats;
10829                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10830                         if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10831                                 continue;
10832                         if (bnx2x_stats_arr[i].size == 0) {
10833                                 /* skip this counter */
10834                                 buf[j] = 0;
10835                                 j++;
10836                                 continue;
10837                         }
10838                         offset = (hw_stats + bnx2x_stats_arr[i].offset);
10839                         if (bnx2x_stats_arr[i].size == 4) {
10840                                 /* 4-byte counter */
10841                                 buf[j] = (u64) *offset;
10842                                 j++;
10843                                 continue;
10844                         }
10845                         /* 8-byte counter */
10846                         buf[j] = HILO_U64(*offset, *(offset + 1));
10847                         j++;
10848                 }
10849         }
10850 }
10851
10852 static int bnx2x_phys_id(struct net_device *dev, u32 data)
10853 {
10854         struct bnx2x *bp = netdev_priv(dev);
10855         int port = BP_PORT(bp);
10856         int i;
10857
10858         if (!netif_running(dev))
10859                 return 0;
10860
10861         if (!bp->port.pmf)
10862                 return 0;
10863
10864         if (data == 0)
10865                 data = 2;
10866
10867         for (i = 0; i < (data * 2); i++) {
10868                 if ((i % 2) == 0)
10869                         bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
10870                                       bp->link_params.hw_led_mode,
10871                                       bp->link_params.chip_id);
10872                 else
10873                         bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
10874                                       bp->link_params.hw_led_mode,
10875                                       bp->link_params.chip_id);
10876
10877                 msleep_interruptible(500);
10878                 if (signal_pending(current))
10879                         break;
10880         }
10881
10882         if (bp->link_vars.link_up)
10883                 bnx2x_set_led(bp, port, LED_MODE_OPER,
10884                               bp->link_vars.line_speed,
10885                               bp->link_params.hw_led_mode,
10886                               bp->link_params.chip_id);
10887
10888         return 0;
10889 }
10890
10891 static const struct ethtool_ops bnx2x_ethtool_ops = {
10892         .get_settings           = bnx2x_get_settings,
10893         .set_settings           = bnx2x_set_settings,
10894         .get_drvinfo            = bnx2x_get_drvinfo,
10895         .get_regs_len           = bnx2x_get_regs_len,
10896         .get_regs               = bnx2x_get_regs,
10897         .get_wol                = bnx2x_get_wol,
10898         .set_wol                = bnx2x_set_wol,
10899         .get_msglevel           = bnx2x_get_msglevel,
10900         .set_msglevel           = bnx2x_set_msglevel,
10901         .nway_reset             = bnx2x_nway_reset,
10902         .get_link               = bnx2x_get_link,
10903         .get_eeprom_len         = bnx2x_get_eeprom_len,
10904         .get_eeprom             = bnx2x_get_eeprom,
10905         .set_eeprom             = bnx2x_set_eeprom,
10906         .get_coalesce           = bnx2x_get_coalesce,
10907         .set_coalesce           = bnx2x_set_coalesce,
10908         .get_ringparam          = bnx2x_get_ringparam,
10909         .set_ringparam          = bnx2x_set_ringparam,
10910         .get_pauseparam         = bnx2x_get_pauseparam,
10911         .set_pauseparam         = bnx2x_set_pauseparam,
10912         .get_rx_csum            = bnx2x_get_rx_csum,
10913         .set_rx_csum            = bnx2x_set_rx_csum,
10914         .get_tx_csum            = ethtool_op_get_tx_csum,
10915         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
10916         .set_flags              = bnx2x_set_flags,
10917         .get_flags              = ethtool_op_get_flags,
10918         .get_sg                 = ethtool_op_get_sg,
10919         .set_sg                 = ethtool_op_set_sg,
10920         .get_tso                = ethtool_op_get_tso,
10921         .set_tso                = bnx2x_set_tso,
10922         .self_test              = bnx2x_self_test,
10923         .get_sset_count         = bnx2x_get_sset_count,
10924         .get_strings            = bnx2x_get_strings,
10925         .phys_id                = bnx2x_phys_id,
10926         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
10927 };
10928
10929 /* end of ethtool_ops */
10930
10931 /****************************************************************************
10932 * General service functions
10933 ****************************************************************************/
10934
10935 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10936 {
10937         u16 pmcsr;
10938
10939         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10940
10941         switch (state) {
10942         case PCI_D0:
10943                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10944                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10945                                        PCI_PM_CTRL_PME_STATUS));
10946
10947                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10948                         /* delay required during transition out of D3hot */
10949                         msleep(20);
10950                 break;
10951
10952         case PCI_D3hot:
10953                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10954                 pmcsr |= 3;
10955
10956                 if (bp->wol)
10957                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10958
10959                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10960                                       pmcsr);
10961
10962                 /* No more memory access after this point until
10963                 * device is brought back to D0.
10964                 */
10965                 break;
10966
10967         default:
10968                 return -EINVAL;
10969         }
10970         return 0;
10971 }
10972
10973 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10974 {
10975         u16 rx_cons_sb;
10976
10977         /* Tell compiler that status block fields can change */
10978         barrier();
10979         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10980         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10981                 rx_cons_sb++;
10982         return (fp->rx_comp_cons != rx_cons_sb);
10983 }
10984
10985 /*
10986  * net_device service functions
10987  */
10988
10989 static int bnx2x_poll(struct napi_struct *napi, int budget)
10990 {
10991         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10992                                                  napi);
10993         struct bnx2x *bp = fp->bp;
10994         int work_done = 0;
10995
10996 #ifdef BNX2X_STOP_ON_ERROR
10997         if (unlikely(bp->panic))
10998                 goto poll_panic;
10999 #endif
11000
11001         prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
11002         prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
11003
11004         bnx2x_update_fpsb_idx(fp);
11005
11006         if (bnx2x_has_rx_work(fp)) {
11007                 work_done = bnx2x_rx_int(fp, budget);
11008
11009                 /* must not complete if we consumed full budget */
11010                 if (work_done >= budget)
11011                         goto poll_again;
11012         }
11013
11014         /* bnx2x_has_rx_work() reads the status block, thus we need to
11015          * ensure that status block indices have been actually read
11016          * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work)
11017          * so that we won't write the "newer" value of the status block to IGU
11018          * (if there was a DMA right after bnx2x_has_rx_work and
11019          * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
11020          * may be postponed to right before bnx2x_ack_sb). In this case
11021          * there will never be another interrupt until there is another update
11022          * of the status block, while there is still unhandled work.
11023          */
11024         rmb();
11025
11026         if (!bnx2x_has_rx_work(fp)) {
11027 #ifdef BNX2X_STOP_ON_ERROR
11028 poll_panic:
11029 #endif
11030                 napi_complete(napi);
11031
11032                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
11033                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
11034                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
11035                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
11036         }
11037
11038 poll_again:
11039         return work_done;
11040 }
11041
11042
11043 /* we split the first BD into headers and data BDs
11044  * to ease the pain of our fellow microcode engineers
11045  * we use one mapping for both BDs
11046  * So far this has only been observed to happen
11047  * in Other Operating Systems(TM)
11048  */
11049 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
11050                                    struct bnx2x_fastpath *fp,
11051                                    struct sw_tx_bd *tx_buf,
11052                                    struct eth_tx_start_bd **tx_bd, u16 hlen,
11053                                    u16 bd_prod, int nbd)
11054 {
11055         struct eth_tx_start_bd *h_tx_bd = *tx_bd;
11056         struct eth_tx_bd *d_tx_bd;
11057         dma_addr_t mapping;
11058         int old_len = le16_to_cpu(h_tx_bd->nbytes);
11059
11060         /* first fix first BD */
11061         h_tx_bd->nbd = cpu_to_le16(nbd);
11062         h_tx_bd->nbytes = cpu_to_le16(hlen);
11063
11064         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
11065            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
11066            h_tx_bd->addr_lo, h_tx_bd->nbd);
11067
11068         /* now get a new data BD
11069          * (after the pbd) and fill it */
11070         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11071         d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11072
11073         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
11074                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
11075
11076         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11077         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11078         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
11079
11080         /* this marks the BD as one that has no individual mapping */
11081         tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
11082
11083         DP(NETIF_MSG_TX_QUEUED,
11084            "TSO split data size is %d (%x:%x)\n",
11085            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
11086
11087         /* update tx_bd */
11088         *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
11089
11090         return bd_prod;
11091 }
11092
11093 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
11094 {
11095         if (fix > 0)
11096                 csum = (u16) ~csum_fold(csum_sub(csum,
11097                                 csum_partial(t_header - fix, fix, 0)));
11098
11099         else if (fix < 0)
11100                 csum = (u16) ~csum_fold(csum_add(csum,
11101                                 csum_partial(t_header, -fix, 0)));
11102
11103         return swab16(csum);
11104 }
11105
11106 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
11107 {
11108         u32 rc;
11109
11110         if (skb->ip_summed != CHECKSUM_PARTIAL)
11111                 rc = XMIT_PLAIN;
11112
11113         else {
11114                 if (skb->protocol == htons(ETH_P_IPV6)) {
11115                         rc = XMIT_CSUM_V6;
11116                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
11117                                 rc |= XMIT_CSUM_TCP;
11118
11119                 } else {
11120                         rc = XMIT_CSUM_V4;
11121                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
11122                                 rc |= XMIT_CSUM_TCP;
11123                 }
11124         }
11125
11126         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
11127                 rc |= XMIT_GSO_V4;
11128
11129         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
11130                 rc |= XMIT_GSO_V6;
11131
11132         return rc;
11133 }
11134
11135 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
11136 /* check if packet requires linearization (packet is too fragmented)
11137    no need to check fragmentation if page size > 8K (there will be no
11138    violation to FW restrictions) */
11139 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
11140                              u32 xmit_type)
11141 {
11142         int to_copy = 0;
11143         int hlen = 0;
11144         int first_bd_sz = 0;
11145
11146         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
11147         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
11148
11149                 if (xmit_type & XMIT_GSO) {
11150                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
11151                         /* Check if LSO packet needs to be copied:
11152                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
11153                         int wnd_size = MAX_FETCH_BD - 3;
11154                         /* Number of windows to check */
11155                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
11156                         int wnd_idx = 0;
11157                         int frag_idx = 0;
11158                         u32 wnd_sum = 0;
11159
11160                         /* Headers length */
11161                         hlen = (int)(skb_transport_header(skb) - skb->data) +
11162                                 tcp_hdrlen(skb);
11163
11164                         /* Amount of data (w/o headers) on linear part of SKB*/
11165                         first_bd_sz = skb_headlen(skb) - hlen;
11166
11167                         wnd_sum  = first_bd_sz;
11168
11169                         /* Calculate the first sum - it's special */
11170                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
11171                                 wnd_sum +=
11172                                         skb_shinfo(skb)->frags[frag_idx].size;
11173
11174                         /* If there was data on linear skb data - check it */
11175                         if (first_bd_sz > 0) {
11176                                 if (unlikely(wnd_sum < lso_mss)) {
11177                                         to_copy = 1;
11178                                         goto exit_lbl;
11179                                 }
11180
11181                                 wnd_sum -= first_bd_sz;
11182                         }
11183
11184                         /* Others are easier: run through the frag list and
11185                            check all windows */
11186                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
11187                                 wnd_sum +=
11188                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
11189
11190                                 if (unlikely(wnd_sum < lso_mss)) {
11191                                         to_copy = 1;
11192                                         break;
11193                                 }
11194                                 wnd_sum -=
11195                                         skb_shinfo(skb)->frags[wnd_idx].size;
11196                         }
11197                 } else {
11198                         /* in non-LSO too fragmented packet should always
11199                            be linearized */
11200                         to_copy = 1;
11201                 }
11202         }
11203
11204 exit_lbl:
11205         if (unlikely(to_copy))
11206                 DP(NETIF_MSG_TX_QUEUED,
11207                    "Linearization IS REQUIRED for %s packet. "
11208                    "num_frags %d  hlen %d  first_bd_sz %d\n",
11209                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
11210                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
11211
11212         return to_copy;
11213 }
11214 #endif
11215
11216 /* called with netif_tx_lock
11217  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
11218  * netif_wake_queue()
11219  */
11220 static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11221 {
11222         struct bnx2x *bp = netdev_priv(dev);
11223         struct bnx2x_fastpath *fp, *fp_stat;
11224         struct netdev_queue *txq;
11225         struct sw_tx_bd *tx_buf;
11226         struct eth_tx_start_bd *tx_start_bd;
11227         struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
11228         struct eth_tx_parse_bd *pbd = NULL;
11229         u16 pkt_prod, bd_prod;
11230         int nbd, fp_index;
11231         dma_addr_t mapping;
11232         u32 xmit_type = bnx2x_xmit_type(bp, skb);
11233         int i;
11234         u8 hlen = 0;
11235         __le16 pkt_size = 0;
11236
11237 #ifdef BNX2X_STOP_ON_ERROR
11238         if (unlikely(bp->panic))
11239                 return NETDEV_TX_BUSY;
11240 #endif
11241
11242         fp_index = skb_get_queue_mapping(skb);
11243         txq = netdev_get_tx_queue(dev, fp_index);
11244
11245         fp = &bp->fp[fp_index + bp->num_rx_queues];
11246         fp_stat = &bp->fp[fp_index];
11247
11248         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
11249                 fp_stat->eth_q_stats.driver_xoff++;
11250                 netif_tx_stop_queue(txq);
11251                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
11252                 return NETDEV_TX_BUSY;
11253         }
11254
11255         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
11256            "  gso type %x  xmit_type %x\n",
11257            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
11258            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
11259
11260 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
11261         /* First, check if we need to linearize the skb (due to FW
11262            restrictions). No need to check fragmentation if page size > 8K
11263            (there will be no violation to FW restrictions) */
11264         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
11265                 /* Statistics of linearization */
11266                 bp->lin_cnt++;
11267                 if (skb_linearize(skb) != 0) {
11268                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
11269                            "silently dropping this SKB\n");
11270                         dev_kfree_skb_any(skb);
11271                         return NETDEV_TX_OK;
11272                 }
11273         }
11274 #endif
11275
11276         /*
11277         Please read carefully. First we use one BD which we mark as start,
11278         then we have a parsing info BD (used for TSO or xsum),
11279         and only then we have the rest of the TSO BDs.
11280         (don't forget to mark the last one as last,
11281         and to unmap only AFTER you write to the BD ...)
11282         And above all, all pdb sizes are in words - NOT DWORDS!
11283         */
11284
11285         pkt_prod = fp->tx_pkt_prod++;
11286         bd_prod = TX_BD(fp->tx_bd_prod);
11287
11288         /* get a tx_buf and first BD */
11289         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
11290         tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
11291
11292         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11293         tx_start_bd->general_data = (UNICAST_ADDRESS <<
11294                                      ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
11295         /* header nbd */
11296         tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
11297
11298         /* remember the first BD of the packet */
11299         tx_buf->first_bd = fp->tx_bd_prod;
11300         tx_buf->skb = skb;
11301         tx_buf->flags = 0;
11302
11303         DP(NETIF_MSG_TX_QUEUED,
11304            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
11305            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
11306
11307 #ifdef BCM_VLAN
11308         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
11309             (bp->flags & HW_VLAN_TX_FLAG)) {
11310                 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
11311                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
11312         } else
11313 #endif
11314                 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11315
11316         /* turn on parsing and get a BD */
11317         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11318         pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
11319
11320         memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
11321
11322         if (xmit_type & XMIT_CSUM) {
11323                 hlen = (skb_network_header(skb) - skb->data) / 2;
11324
11325                 /* for now NS flag is not used in Linux */
11326                 pbd->global_data =
11327                         (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11328                                  ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
11329
11330                 pbd->ip_hlen = (skb_transport_header(skb) -
11331                                 skb_network_header(skb)) / 2;
11332
11333                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
11334
11335                 pbd->total_hlen = cpu_to_le16(hlen);
11336                 hlen = hlen*2;
11337
11338                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
11339
11340                 if (xmit_type & XMIT_CSUM_V4)
11341                         tx_start_bd->bd_flags.as_bitfield |=
11342                                                 ETH_TX_BD_FLAGS_IP_CSUM;
11343                 else
11344                         tx_start_bd->bd_flags.as_bitfield |=
11345                                                 ETH_TX_BD_FLAGS_IPV6;
11346
11347                 if (xmit_type & XMIT_CSUM_TCP) {
11348                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11349
11350                 } else {
11351                         s8 fix = SKB_CS_OFF(skb); /* signed! */
11352
11353                         pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
11354
11355                         DP(NETIF_MSG_TX_QUEUED,
11356                            "hlen %d  fix %d  csum before fix %x\n",
11357                            le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
11358
11359                         /* HW bug: fixup the CSUM */
11360                         pbd->tcp_pseudo_csum =
11361                                 bnx2x_csum_fix(skb_transport_header(skb),
11362                                                SKB_CS(skb), fix);
11363
11364                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11365                            pbd->tcp_pseudo_csum);
11366                 }
11367         }
11368
11369         mapping = pci_map_single(bp->pdev, skb->data,
11370                                  skb_headlen(skb), PCI_DMA_TODEVICE);
11371
11372         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11373         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11374         nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11375         tx_start_bd->nbd = cpu_to_le16(nbd);
11376         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11377         pkt_size = tx_start_bd->nbytes;
11378
11379         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
11380            "  nbytes %d  flags %x  vlan %x\n",
11381            tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11382            le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11383            tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
11384
11385         if (xmit_type & XMIT_GSO) {
11386
11387                 DP(NETIF_MSG_TX_QUEUED,
11388                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
11389                    skb->len, hlen, skb_headlen(skb),
11390                    skb_shinfo(skb)->gso_size);
11391
11392                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
11393
11394                 if (unlikely(skb_headlen(skb) > hlen))
11395                         bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11396                                                  hlen, bd_prod, ++nbd);
11397
11398                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11399                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
11400                 pbd->tcp_flags = pbd_tcp_flags(skb);
11401
11402                 if (xmit_type & XMIT_GSO_V4) {
11403                         pbd->ip_id = swab16(ip_hdr(skb)->id);
11404                         pbd->tcp_pseudo_csum =
11405                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11406                                                           ip_hdr(skb)->daddr,
11407                                                           0, IPPROTO_TCP, 0));
11408
11409                 } else
11410                         pbd->tcp_pseudo_csum =
11411                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11412                                                         &ipv6_hdr(skb)->daddr,
11413                                                         0, IPPROTO_TCP, 0));
11414
11415                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11416         }
11417         tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
11418
11419         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11420                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
11421
11422                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11423                 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11424                 if (total_pkt_bd == NULL)
11425                         total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11426
11427                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11428                                        frag->size, PCI_DMA_TODEVICE);
11429
11430                 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11431                 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11432                 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11433                 le16_add_cpu(&pkt_size, frag->size);
11434
11435                 DP(NETIF_MSG_TX_QUEUED,
11436                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
11437                    i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11438                    le16_to_cpu(tx_data_bd->nbytes));
11439         }
11440
11441         DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
11442
11443         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11444
11445         /* now send a tx doorbell, counting the next BD
11446          * if the packet contains or ends with it
11447          */
11448         if (TX_BD_POFF(bd_prod) < nbd)
11449                 nbd++;
11450
11451         if (total_pkt_bd != NULL)
11452                 total_pkt_bd->total_pkt_bytes = pkt_size;
11453
11454         if (pbd)
11455                 DP(NETIF_MSG_TX_QUEUED,
11456                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
11457                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
11458                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11459                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
11460                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
11461
11462         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
11463
11464         /*
11465          * Make sure that the BD data is updated before updating the producer
11466          * since FW might read the BD right after the producer is updated.
11467          * This is only applicable for weak-ordered memory model archs such
11468          * as IA-64. The following barrier is also mandatory since FW will
11469          * assumes packets must have BDs.
11470          */
11471         wmb();
11472
11473         fp->tx_db.data.prod += nbd;
11474         barrier();
11475         DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw);
11476
11477         mmiowb();
11478
11479         fp->tx_bd_prod += nbd;
11480
11481         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
11482                 netif_tx_stop_queue(txq);
11483                 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11484                    if we put Tx into XOFF state. */
11485                 smp_mb();
11486                 fp_stat->eth_q_stats.driver_xoff++;
11487                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
11488                         netif_tx_wake_queue(txq);
11489         }
11490         fp_stat->tx_pkt++;
11491
11492         return NETDEV_TX_OK;
11493 }
11494
11495 /* called with rtnl_lock */
11496 static int bnx2x_open(struct net_device *dev)
11497 {
11498         struct bnx2x *bp = netdev_priv(dev);
11499
11500         netif_carrier_off(dev);
11501
11502         bnx2x_set_power_state(bp, PCI_D0);
11503
11504         return bnx2x_nic_load(bp, LOAD_OPEN);
11505 }
11506
11507 /* called with rtnl_lock */
11508 static int bnx2x_close(struct net_device *dev)
11509 {
11510         struct bnx2x *bp = netdev_priv(dev);
11511
11512         /* Unload the driver, release IRQs */
11513         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11514         if (atomic_read(&bp->pdev->enable_cnt) == 1)
11515                 if (!CHIP_REV_IS_SLOW(bp))
11516                         bnx2x_set_power_state(bp, PCI_D3hot);
11517
11518         return 0;
11519 }
11520
11521 /* called with netif_tx_lock from dev_mcast.c */
11522 static void bnx2x_set_rx_mode(struct net_device *dev)
11523 {
11524         struct bnx2x *bp = netdev_priv(dev);
11525         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11526         int port = BP_PORT(bp);
11527
11528         if (bp->state != BNX2X_STATE_OPEN) {
11529                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11530                 return;
11531         }
11532
11533         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11534
11535         if (dev->flags & IFF_PROMISC)
11536                 rx_mode = BNX2X_RX_MODE_PROMISC;
11537
11538         else if ((dev->flags & IFF_ALLMULTI) ||
11539                  ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11540                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11541
11542         else { /* some multicasts */
11543                 if (CHIP_IS_E1(bp)) {
11544                         int i, old, offset;
11545                         struct dev_mc_list *mclist;
11546                         struct mac_configuration_cmd *config =
11547                                                 bnx2x_sp(bp, mcast_config);
11548
11549                         for (i = 0, mclist = dev->mc_list;
11550                              mclist && (i < dev->mc_count);
11551                              i++, mclist = mclist->next) {
11552
11553                                 config->config_table[i].
11554                                         cam_entry.msb_mac_addr =
11555                                         swab16(*(u16 *)&mclist->dmi_addr[0]);
11556                                 config->config_table[i].
11557                                         cam_entry.middle_mac_addr =
11558                                         swab16(*(u16 *)&mclist->dmi_addr[2]);
11559                                 config->config_table[i].
11560                                         cam_entry.lsb_mac_addr =
11561                                         swab16(*(u16 *)&mclist->dmi_addr[4]);
11562                                 config->config_table[i].cam_entry.flags =
11563                                                         cpu_to_le16(port);
11564                                 config->config_table[i].
11565                                         target_table_entry.flags = 0;
11566                                 config->config_table[i].target_table_entry.
11567                                         clients_bit_vector =
11568                                                 cpu_to_le32(1 << BP_L_ID(bp));
11569                                 config->config_table[i].
11570                                         target_table_entry.vlan_id = 0;
11571
11572                                 DP(NETIF_MSG_IFUP,
11573                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11574                                    config->config_table[i].
11575                                                 cam_entry.msb_mac_addr,
11576                                    config->config_table[i].
11577                                                 cam_entry.middle_mac_addr,
11578                                    config->config_table[i].
11579                                                 cam_entry.lsb_mac_addr);
11580                         }
11581                         old = config->hdr.length;
11582                         if (old > i) {
11583                                 for (; i < old; i++) {
11584                                         if (CAM_IS_INVALID(config->
11585                                                            config_table[i])) {
11586                                                 /* already invalidated */
11587                                                 break;
11588                                         }
11589                                         /* invalidate */
11590                                         CAM_INVALIDATE(config->
11591                                                        config_table[i]);
11592                                 }
11593                         }
11594
11595                         if (CHIP_REV_IS_SLOW(bp))
11596                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11597                         else
11598                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
11599
11600                         config->hdr.length = i;
11601                         config->hdr.offset = offset;
11602                         config->hdr.client_id = bp->fp->cl_id;
11603                         config->hdr.reserved1 = 0;
11604
11605                         bp->set_mac_pending++;
11606                         smp_wmb();
11607
11608                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11609                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11610                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11611                                       0);
11612                 } else { /* E1H */
11613                         /* Accept one or more multicasts */
11614                         struct dev_mc_list *mclist;
11615                         u32 mc_filter[MC_HASH_SIZE];
11616                         u32 crc, bit, regidx;
11617                         int i;
11618
11619                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11620
11621                         for (i = 0, mclist = dev->mc_list;
11622                              mclist && (i < dev->mc_count);
11623                              i++, mclist = mclist->next) {
11624
11625                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11626                                    mclist->dmi_addr);
11627
11628                                 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11629                                 bit = (crc >> 24) & 0xff;
11630                                 regidx = bit >> 5;
11631                                 bit &= 0x1f;
11632                                 mc_filter[regidx] |= (1 << bit);
11633                         }
11634
11635                         for (i = 0; i < MC_HASH_SIZE; i++)
11636                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11637                                        mc_filter[i]);
11638                 }
11639         }
11640
11641         bp->rx_mode = rx_mode;
11642         bnx2x_set_storm_rx_mode(bp);
11643 }
11644
11645 /* called with rtnl_lock */
11646 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11647 {
11648         struct sockaddr *addr = p;
11649         struct bnx2x *bp = netdev_priv(dev);
11650
11651         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
11652                 return -EINVAL;
11653
11654         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
11655         if (netif_running(dev)) {
11656                 if (CHIP_IS_E1(bp))
11657                         bnx2x_set_eth_mac_addr_e1(bp, 1);
11658                 else
11659                         bnx2x_set_eth_mac_addr_e1h(bp, 1);
11660         }
11661
11662         return 0;
11663 }
11664
11665 /* called with rtnl_lock */
11666 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11667                            int devad, u16 addr)
11668 {
11669         struct bnx2x *bp = netdev_priv(netdev);
11670         u16 value;
11671         int rc;
11672         u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11673
11674         DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11675            prtad, devad, addr);
11676
11677         if (prtad != bp->mdio.prtad) {
11678                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11679                    prtad, bp->mdio.prtad);
11680                 return -EINVAL;
11681         }
11682
11683         /* The HW expects different devad if CL22 is used */
11684         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11685
11686         bnx2x_acquire_phy_lock(bp);
11687         rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11688                              devad, addr, &value);
11689         bnx2x_release_phy_lock(bp);
11690         DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
11691
11692         if (!rc)
11693                 rc = value;
11694         return rc;
11695 }
11696
11697 /* called with rtnl_lock */
11698 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11699                             u16 addr, u16 value)
11700 {
11701         struct bnx2x *bp = netdev_priv(netdev);
11702         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11703         int rc;
11704
11705         DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11706                            " value 0x%x\n", prtad, devad, addr, value);
11707
11708         if (prtad != bp->mdio.prtad) {
11709                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11710                    prtad, bp->mdio.prtad);
11711                 return -EINVAL;
11712         }
11713
11714         /* The HW expects different devad if CL22 is used */
11715         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11716
11717         bnx2x_acquire_phy_lock(bp);
11718         rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11719                               devad, addr, value);
11720         bnx2x_release_phy_lock(bp);
11721         return rc;
11722 }
11723
11724 /* called with rtnl_lock */
11725 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11726 {
11727         struct bnx2x *bp = netdev_priv(dev);
11728         struct mii_ioctl_data *mdio = if_mii(ifr);
11729
11730         DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11731            mdio->phy_id, mdio->reg_num, mdio->val_in);
11732
11733         if (!netif_running(dev))
11734                 return -EAGAIN;
11735
11736         return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
11737 }
11738
11739 /* called with rtnl_lock */
11740 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11741 {
11742         struct bnx2x *bp = netdev_priv(dev);
11743         int rc = 0;
11744
11745         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11746             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11747                 return -EINVAL;
11748
11749         /* This does not race with packet allocation
11750          * because the actual alloc size is
11751          * only updated as part of load
11752          */
11753         dev->mtu = new_mtu;
11754
11755         if (netif_running(dev)) {
11756                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11757                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11758         }
11759
11760         return rc;
11761 }
11762
11763 static void bnx2x_tx_timeout(struct net_device *dev)
11764 {
11765         struct bnx2x *bp = netdev_priv(dev);
11766
11767 #ifdef BNX2X_STOP_ON_ERROR
11768         if (!bp->panic)
11769                 bnx2x_panic();
11770 #endif
11771         /* This allows the netif to be shutdown gracefully before resetting */
11772         schedule_work(&bp->reset_task);
11773 }
11774
11775 #ifdef BCM_VLAN
11776 /* called with rtnl_lock */
11777 static void bnx2x_vlan_rx_register(struct net_device *dev,
11778                                    struct vlan_group *vlgrp)
11779 {
11780         struct bnx2x *bp = netdev_priv(dev);
11781
11782         bp->vlgrp = vlgrp;
11783
11784         /* Set flags according to the required capabilities */
11785         bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11786
11787         if (dev->features & NETIF_F_HW_VLAN_TX)
11788                 bp->flags |= HW_VLAN_TX_FLAG;
11789
11790         if (dev->features & NETIF_F_HW_VLAN_RX)
11791                 bp->flags |= HW_VLAN_RX_FLAG;
11792
11793         if (netif_running(dev))
11794                 bnx2x_set_client_config(bp);
11795 }
11796
11797 #endif
11798
11799 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11800 static void poll_bnx2x(struct net_device *dev)
11801 {
11802         struct bnx2x *bp = netdev_priv(dev);
11803
11804         disable_irq(bp->pdev->irq);
11805         bnx2x_interrupt(bp->pdev->irq, dev);
11806         enable_irq(bp->pdev->irq);
11807 }
11808 #endif
11809
11810 static const struct net_device_ops bnx2x_netdev_ops = {
11811         .ndo_open               = bnx2x_open,
11812         .ndo_stop               = bnx2x_close,
11813         .ndo_start_xmit         = bnx2x_start_xmit,
11814         .ndo_set_multicast_list = bnx2x_set_rx_mode,
11815         .ndo_set_mac_address    = bnx2x_change_mac_addr,
11816         .ndo_validate_addr      = eth_validate_addr,
11817         .ndo_do_ioctl           = bnx2x_ioctl,
11818         .ndo_change_mtu         = bnx2x_change_mtu,
11819         .ndo_tx_timeout         = bnx2x_tx_timeout,
11820 #ifdef BCM_VLAN
11821         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
11822 #endif
11823 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11824         .ndo_poll_controller    = poll_bnx2x,
11825 #endif
11826 };
11827
11828 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11829                                     struct net_device *dev)
11830 {
11831         struct bnx2x *bp;
11832         int rc;
11833
11834         SET_NETDEV_DEV(dev, &pdev->dev);
11835         bp = netdev_priv(dev);
11836
11837         bp->dev = dev;
11838         bp->pdev = pdev;
11839         bp->flags = 0;
11840         bp->func = PCI_FUNC(pdev->devfn);
11841
11842         rc = pci_enable_device(pdev);
11843         if (rc) {
11844                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11845                 goto err_out;
11846         }
11847
11848         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11849                 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11850                        " aborting\n");
11851                 rc = -ENODEV;
11852                 goto err_out_disable;
11853         }
11854
11855         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11856                 printk(KERN_ERR PFX "Cannot find second PCI device"
11857                        " base address, aborting\n");
11858                 rc = -ENODEV;
11859                 goto err_out_disable;
11860         }
11861
11862         if (atomic_read(&pdev->enable_cnt) == 1) {
11863                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11864                 if (rc) {
11865                         printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11866                                " aborting\n");
11867                         goto err_out_disable;
11868                 }
11869
11870                 pci_set_master(pdev);
11871                 pci_save_state(pdev);
11872         }
11873
11874         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11875         if (bp->pm_cap == 0) {
11876                 printk(KERN_ERR PFX "Cannot find power management"
11877                        " capability, aborting\n");
11878                 rc = -EIO;
11879                 goto err_out_release;
11880         }
11881
11882         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11883         if (bp->pcie_cap == 0) {
11884                 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11885                        " aborting\n");
11886                 rc = -EIO;
11887                 goto err_out_release;
11888         }
11889
11890         if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
11891                 bp->flags |= USING_DAC_FLAG;
11892                 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
11893                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11894                                " failed, aborting\n");
11895                         rc = -EIO;
11896                         goto err_out_release;
11897                 }
11898
11899         } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
11900                 printk(KERN_ERR PFX "System does not support DMA,"
11901                        " aborting\n");
11902                 rc = -EIO;
11903                 goto err_out_release;
11904         }
11905
11906         dev->mem_start = pci_resource_start(pdev, 0);
11907         dev->base_addr = dev->mem_start;
11908         dev->mem_end = pci_resource_end(pdev, 0);
11909
11910         dev->irq = pdev->irq;
11911
11912         bp->regview = pci_ioremap_bar(pdev, 0);
11913         if (!bp->regview) {
11914                 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11915                 rc = -ENOMEM;
11916                 goto err_out_release;
11917         }
11918
11919         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11920                                         min_t(u64, BNX2X_DB_SIZE,
11921                                               pci_resource_len(pdev, 2)));
11922         if (!bp->doorbells) {
11923                 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11924                 rc = -ENOMEM;
11925                 goto err_out_unmap;
11926         }
11927
11928         bnx2x_set_power_state(bp, PCI_D0);
11929
11930         /* clean indirect addresses */
11931         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11932                                PCICFG_VENDOR_ID_OFFSET);
11933         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11934         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11935         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11936         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11937
11938         dev->watchdog_timeo = TX_TIMEOUT;
11939
11940         dev->netdev_ops = &bnx2x_netdev_ops;
11941         dev->ethtool_ops = &bnx2x_ethtool_ops;
11942         dev->features |= NETIF_F_SG;
11943         dev->features |= NETIF_F_HW_CSUM;
11944         if (bp->flags & USING_DAC_FLAG)
11945                 dev->features |= NETIF_F_HIGHDMA;
11946         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11947         dev->features |= NETIF_F_TSO6;
11948 #ifdef BCM_VLAN
11949         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
11950         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11951
11952         dev->vlan_features |= NETIF_F_SG;
11953         dev->vlan_features |= NETIF_F_HW_CSUM;
11954         if (bp->flags & USING_DAC_FLAG)
11955                 dev->vlan_features |= NETIF_F_HIGHDMA;
11956         dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11957         dev->vlan_features |= NETIF_F_TSO6;
11958 #endif
11959
11960         /* get_port_hwinfo() will set prtad and mmds properly */
11961         bp->mdio.prtad = MDIO_PRTAD_NONE;
11962         bp->mdio.mmds = 0;
11963         bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11964         bp->mdio.dev = dev;
11965         bp->mdio.mdio_read = bnx2x_mdio_read;
11966         bp->mdio.mdio_write = bnx2x_mdio_write;
11967
11968         return 0;
11969
11970 err_out_unmap:
11971         if (bp->regview) {
11972                 iounmap(bp->regview);
11973                 bp->regview = NULL;
11974         }
11975         if (bp->doorbells) {
11976                 iounmap(bp->doorbells);
11977                 bp->doorbells = NULL;
11978         }
11979
11980 err_out_release:
11981         if (atomic_read(&pdev->enable_cnt) == 1)
11982                 pci_release_regions(pdev);
11983
11984 err_out_disable:
11985         pci_disable_device(pdev);
11986         pci_set_drvdata(pdev, NULL);
11987
11988 err_out:
11989         return rc;
11990 }
11991
11992 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11993                                                  int *width, int *speed)
11994 {
11995         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11996
11997         *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11998
11999         /* return value of 1=2.5GHz 2=5GHz */
12000         *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
12001 }
12002
12003 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
12004 {
12005         const struct firmware *firmware = bp->firmware;
12006         struct bnx2x_fw_file_hdr *fw_hdr;
12007         struct bnx2x_fw_file_section *sections;
12008         u32 offset, len, num_ops;
12009         u16 *ops_offsets;
12010         int i;
12011         const u8 *fw_ver;
12012
12013         if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
12014                 return -EINVAL;
12015
12016         fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
12017         sections = (struct bnx2x_fw_file_section *)fw_hdr;
12018
12019         /* Make sure none of the offsets and sizes make us read beyond
12020          * the end of the firmware data */
12021         for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
12022                 offset = be32_to_cpu(sections[i].offset);
12023                 len = be32_to_cpu(sections[i].len);
12024                 if (offset + len > firmware->size) {
12025                         printk(KERN_ERR PFX "Section %d length is out of "
12026                                             "bounds\n", i);
12027                         return -EINVAL;
12028                 }
12029         }
12030
12031         /* Likewise for the init_ops offsets */
12032         offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
12033         ops_offsets = (u16 *)(firmware->data + offset);
12034         num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
12035
12036         for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
12037                 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
12038                         printk(KERN_ERR PFX "Section offset %d is out of "
12039                                             "bounds\n", i);
12040                         return -EINVAL;
12041                 }
12042         }
12043
12044         /* Check FW version */
12045         offset = be32_to_cpu(fw_hdr->fw_version.offset);
12046         fw_ver = firmware->data + offset;
12047         if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
12048             (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
12049             (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
12050             (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
12051                 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
12052                                     " Should be %d.%d.%d.%d\n",
12053                        fw_ver[0], fw_ver[1], fw_ver[2],
12054                        fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
12055                        BCM_5710_FW_MINOR_VERSION,
12056                        BCM_5710_FW_REVISION_VERSION,
12057                        BCM_5710_FW_ENGINEERING_VERSION);
12058                 return -EINVAL;
12059         }
12060
12061         return 0;
12062 }
12063
12064 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
12065 {
12066         const __be32 *source = (const __be32 *)_source;
12067         u32 *target = (u32 *)_target;
12068         u32 i;
12069
12070         for (i = 0; i < n/4; i++)
12071                 target[i] = be32_to_cpu(source[i]);
12072 }
12073
12074 /*
12075    Ops array is stored in the following format:
12076    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
12077  */
12078 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
12079 {
12080         const __be32 *source = (const __be32 *)_source;
12081         struct raw_op *target = (struct raw_op *)_target;
12082         u32 i, j, tmp;
12083
12084         for (i = 0, j = 0; i < n/8; i++, j += 2) {
12085                 tmp = be32_to_cpu(source[j]);
12086                 target[i].op = (tmp >> 24) & 0xff;
12087                 target[i].offset =  tmp & 0xffffff;
12088                 target[i].raw_data = be32_to_cpu(source[j+1]);
12089         }
12090 }
12091
12092 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
12093 {
12094         const __be16 *source = (const __be16 *)_source;
12095         u16 *target = (u16 *)_target;
12096         u32 i;
12097
12098         for (i = 0; i < n/2; i++)
12099                 target[i] = be16_to_cpu(source[i]);
12100 }
12101
12102 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
12103         do { \
12104                 u32 len = be32_to_cpu(fw_hdr->arr.len); \
12105                 bp->arr = kmalloc(len, GFP_KERNEL); \
12106                 if (!bp->arr) { \
12107                         printk(KERN_ERR PFX "Failed to allocate %d bytes " \
12108                                             "for "#arr"\n", len); \
12109                         goto lbl; \
12110                 } \
12111                 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
12112                      (u8 *)bp->arr, len); \
12113         } while (0)
12114
12115 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
12116 {
12117         char fw_file_name[40] = {0};
12118         struct bnx2x_fw_file_hdr *fw_hdr;
12119         int rc, offset;
12120
12121         /* Create a FW file name */
12122         if (CHIP_IS_E1(bp))
12123                 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
12124         else
12125                 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
12126
12127         sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
12128                 BCM_5710_FW_MAJOR_VERSION,
12129                 BCM_5710_FW_MINOR_VERSION,
12130                 BCM_5710_FW_REVISION_VERSION,
12131                 BCM_5710_FW_ENGINEERING_VERSION);
12132
12133         printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
12134
12135         rc = request_firmware(&bp->firmware, fw_file_name, dev);
12136         if (rc) {
12137                 printk(KERN_ERR PFX "Can't load firmware file %s\n",
12138                        fw_file_name);
12139                 goto request_firmware_exit;
12140         }
12141
12142         rc = bnx2x_check_firmware(bp);
12143         if (rc) {
12144                 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
12145                 goto request_firmware_exit;
12146         }
12147
12148         fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
12149
12150         /* Initialize the pointers to the init arrays */
12151         /* Blob */
12152         BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
12153
12154         /* Opcodes */
12155         BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
12156
12157         /* Offsets */
12158         BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
12159                             be16_to_cpu_n);
12160
12161         /* STORMs firmware */
12162         INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12163                         be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
12164         INIT_TSEM_PRAM_DATA(bp)      = bp->firmware->data +
12165                         be32_to_cpu(fw_hdr->tsem_pram_data.offset);
12166         INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12167                         be32_to_cpu(fw_hdr->usem_int_table_data.offset);
12168         INIT_USEM_PRAM_DATA(bp)      = bp->firmware->data +
12169                         be32_to_cpu(fw_hdr->usem_pram_data.offset);
12170         INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12171                         be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
12172         INIT_XSEM_PRAM_DATA(bp)      = bp->firmware->data +
12173                         be32_to_cpu(fw_hdr->xsem_pram_data.offset);
12174         INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12175                         be32_to_cpu(fw_hdr->csem_int_table_data.offset);
12176         INIT_CSEM_PRAM_DATA(bp)      = bp->firmware->data +
12177                         be32_to_cpu(fw_hdr->csem_pram_data.offset);
12178
12179         return 0;
12180
12181 init_offsets_alloc_err:
12182         kfree(bp->init_ops);
12183 init_ops_alloc_err:
12184         kfree(bp->init_data);
12185 request_firmware_exit:
12186         release_firmware(bp->firmware);
12187
12188         return rc;
12189 }
12190
12191
12192 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12193                                     const struct pci_device_id *ent)
12194 {
12195         struct net_device *dev = NULL;
12196         struct bnx2x *bp;
12197         int pcie_width, pcie_speed;
12198         int rc;
12199
12200         /* dev zeroed in init_etherdev */
12201         dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
12202         if (!dev) {
12203                 printk(KERN_ERR PFX "Cannot allocate net device\n");
12204                 return -ENOMEM;
12205         }
12206
12207         bp = netdev_priv(dev);
12208         bp->msglevel = debug;
12209
12210         pci_set_drvdata(pdev, dev);
12211
12212         rc = bnx2x_init_dev(pdev, dev);
12213         if (rc < 0) {
12214                 free_netdev(dev);
12215                 return rc;
12216         }
12217
12218         rc = bnx2x_init_bp(bp);
12219         if (rc)
12220                 goto init_one_exit;
12221
12222         /* Set init arrays */
12223         rc = bnx2x_init_firmware(bp, &pdev->dev);
12224         if (rc) {
12225                 printk(KERN_ERR PFX "Error loading firmware\n");
12226                 goto init_one_exit;
12227         }
12228
12229         rc = register_netdev(dev);
12230         if (rc) {
12231                 dev_err(&pdev->dev, "Cannot register net device\n");
12232                 goto init_one_exit;
12233         }
12234
12235         bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
12236         printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
12237                " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
12238                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
12239                pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
12240                dev->base_addr, bp->pdev->irq);
12241         printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
12242
12243         return 0;
12244
12245 init_one_exit:
12246         if (bp->regview)
12247                 iounmap(bp->regview);
12248
12249         if (bp->doorbells)
12250                 iounmap(bp->doorbells);
12251
12252         free_netdev(dev);
12253
12254         if (atomic_read(&pdev->enable_cnt) == 1)
12255                 pci_release_regions(pdev);
12256
12257         pci_disable_device(pdev);
12258         pci_set_drvdata(pdev, NULL);
12259
12260         return rc;
12261 }
12262
12263 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
12264 {
12265         struct net_device *dev = pci_get_drvdata(pdev);
12266         struct bnx2x *bp;
12267
12268         if (!dev) {
12269                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12270                 return;
12271         }
12272         bp = netdev_priv(dev);
12273
12274         unregister_netdev(dev);
12275
12276         kfree(bp->init_ops_offsets);
12277         kfree(bp->init_ops);
12278         kfree(bp->init_data);
12279         release_firmware(bp->firmware);
12280
12281         if (bp->regview)
12282                 iounmap(bp->regview);
12283
12284         if (bp->doorbells)
12285                 iounmap(bp->doorbells);
12286
12287         free_netdev(dev);
12288
12289         if (atomic_read(&pdev->enable_cnt) == 1)
12290                 pci_release_regions(pdev);
12291
12292         pci_disable_device(pdev);
12293         pci_set_drvdata(pdev, NULL);
12294 }
12295
12296 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12297 {
12298         struct net_device *dev = pci_get_drvdata(pdev);
12299         struct bnx2x *bp;
12300
12301         if (!dev) {
12302                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12303                 return -ENODEV;
12304         }
12305         bp = netdev_priv(dev);
12306
12307         rtnl_lock();
12308
12309         pci_save_state(pdev);
12310
12311         if (!netif_running(dev)) {
12312                 rtnl_unlock();
12313                 return 0;
12314         }
12315
12316         netif_device_detach(dev);
12317
12318         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
12319
12320         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
12321
12322         rtnl_unlock();
12323
12324         return 0;
12325 }
12326
12327 static int bnx2x_resume(struct pci_dev *pdev)
12328 {
12329         struct net_device *dev = pci_get_drvdata(pdev);
12330         struct bnx2x *bp;
12331         int rc;
12332
12333         if (!dev) {
12334                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12335                 return -ENODEV;
12336         }
12337         bp = netdev_priv(dev);
12338
12339         rtnl_lock();
12340
12341         pci_restore_state(pdev);
12342
12343         if (!netif_running(dev)) {
12344                 rtnl_unlock();
12345                 return 0;
12346         }
12347
12348         bnx2x_set_power_state(bp, PCI_D0);
12349         netif_device_attach(dev);
12350
12351         rc = bnx2x_nic_load(bp, LOAD_OPEN);
12352
12353         rtnl_unlock();
12354
12355         return rc;
12356 }
12357
12358 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12359 {
12360         int i;
12361
12362         bp->state = BNX2X_STATE_ERROR;
12363
12364         bp->rx_mode = BNX2X_RX_MODE_NONE;
12365
12366         bnx2x_netif_stop(bp, 0);
12367
12368         del_timer_sync(&bp->timer);
12369         bp->stats_state = STATS_STATE_DISABLED;
12370         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12371
12372         /* Release IRQs */
12373         bnx2x_free_irq(bp);
12374
12375         if (CHIP_IS_E1(bp)) {
12376                 struct mac_configuration_cmd *config =
12377                                                 bnx2x_sp(bp, mcast_config);
12378
12379                 for (i = 0; i < config->hdr.length; i++)
12380                         CAM_INVALIDATE(config->config_table[i]);
12381         }
12382
12383         /* Free SKBs, SGEs, TPA pool and driver internals */
12384         bnx2x_free_skbs(bp);
12385         for_each_rx_queue(bp, i)
12386                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
12387         for_each_rx_queue(bp, i)
12388                 netif_napi_del(&bnx2x_fp(bp, i, napi));
12389         bnx2x_free_mem(bp);
12390
12391         bp->state = BNX2X_STATE_CLOSED;
12392
12393         netif_carrier_off(bp->dev);
12394
12395         return 0;
12396 }
12397
12398 static void bnx2x_eeh_recover(struct bnx2x *bp)
12399 {
12400         u32 val;
12401
12402         mutex_init(&bp->port.phy_mutex);
12403
12404         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12405         bp->link_params.shmem_base = bp->common.shmem_base;
12406         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12407
12408         if (!bp->common.shmem_base ||
12409             (bp->common.shmem_base < 0xA0000) ||
12410             (bp->common.shmem_base >= 0xC0000)) {
12411                 BNX2X_DEV_INFO("MCP not active\n");
12412                 bp->flags |= NO_MCP_FLAG;
12413                 return;
12414         }
12415
12416         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12417         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12418                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12419                 BNX2X_ERR("BAD MCP validity signature\n");
12420
12421         if (!BP_NOMCP(bp)) {
12422                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12423                               & DRV_MSG_SEQ_NUMBER_MASK);
12424                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12425         }
12426 }
12427
12428 /**
12429  * bnx2x_io_error_detected - called when PCI error is detected
12430  * @pdev: Pointer to PCI device
12431  * @state: The current pci connection state
12432  *
12433  * This function is called after a PCI bus error affecting
12434  * this device has been detected.
12435  */
12436 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12437                                                 pci_channel_state_t state)
12438 {
12439         struct net_device *dev = pci_get_drvdata(pdev);
12440         struct bnx2x *bp = netdev_priv(dev);
12441
12442         rtnl_lock();
12443
12444         netif_device_detach(dev);
12445
12446         if (state == pci_channel_io_perm_failure) {
12447                 rtnl_unlock();
12448                 return PCI_ERS_RESULT_DISCONNECT;
12449         }
12450
12451         if (netif_running(dev))
12452                 bnx2x_eeh_nic_unload(bp);
12453
12454         pci_disable_device(pdev);
12455
12456         rtnl_unlock();
12457
12458         /* Request a slot reset */
12459         return PCI_ERS_RESULT_NEED_RESET;
12460 }
12461
12462 /**
12463  * bnx2x_io_slot_reset - called after the PCI bus has been reset
12464  * @pdev: Pointer to PCI device
12465  *
12466  * Restart the card from scratch, as if from a cold-boot.
12467  */
12468 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12469 {
12470         struct net_device *dev = pci_get_drvdata(pdev);
12471         struct bnx2x *bp = netdev_priv(dev);
12472
12473         rtnl_lock();
12474
12475         if (pci_enable_device(pdev)) {
12476                 dev_err(&pdev->dev,
12477                         "Cannot re-enable PCI device after reset\n");
12478                 rtnl_unlock();
12479                 return PCI_ERS_RESULT_DISCONNECT;
12480         }
12481
12482         pci_set_master(pdev);
12483         pci_restore_state(pdev);
12484
12485         if (netif_running(dev))
12486                 bnx2x_set_power_state(bp, PCI_D0);
12487
12488         rtnl_unlock();
12489
12490         return PCI_ERS_RESULT_RECOVERED;
12491 }
12492
12493 /**
12494  * bnx2x_io_resume - called when traffic can start flowing again
12495  * @pdev: Pointer to PCI device
12496  *
12497  * This callback is called when the error recovery driver tells us that
12498  * its OK to resume normal operation.
12499  */
12500 static void bnx2x_io_resume(struct pci_dev *pdev)
12501 {
12502         struct net_device *dev = pci_get_drvdata(pdev);
12503         struct bnx2x *bp = netdev_priv(dev);
12504
12505         rtnl_lock();
12506
12507         bnx2x_eeh_recover(bp);
12508
12509         if (netif_running(dev))
12510                 bnx2x_nic_load(bp, LOAD_NORMAL);
12511
12512         netif_device_attach(dev);
12513
12514         rtnl_unlock();
12515 }
12516
12517 static struct pci_error_handlers bnx2x_err_handler = {
12518         .error_detected = bnx2x_io_error_detected,
12519         .slot_reset     = bnx2x_io_slot_reset,
12520         .resume         = bnx2x_io_resume,
12521 };
12522
12523 static struct pci_driver bnx2x_pci_driver = {
12524         .name        = DRV_MODULE_NAME,
12525         .id_table    = bnx2x_pci_tbl,
12526         .probe       = bnx2x_init_one,
12527         .remove      = __devexit_p(bnx2x_remove_one),
12528         .suspend     = bnx2x_suspend,
12529         .resume      = bnx2x_resume,
12530         .err_handler = &bnx2x_err_handler,
12531 };
12532
12533 static int __init bnx2x_init(void)
12534 {
12535         int ret;
12536
12537         printk(KERN_INFO "%s", version);
12538
12539         bnx2x_wq = create_singlethread_workqueue("bnx2x");
12540         if (bnx2x_wq == NULL) {
12541                 printk(KERN_ERR PFX "Cannot create workqueue\n");
12542                 return -ENOMEM;
12543         }
12544
12545         ret = pci_register_driver(&bnx2x_pci_driver);
12546         if (ret) {
12547                 printk(KERN_ERR PFX "Cannot register driver\n");
12548                 destroy_workqueue(bnx2x_wq);
12549         }
12550         return ret;
12551 }
12552
12553 static void __exit bnx2x_cleanup(void)
12554 {
12555         pci_unregister_driver(&bnx2x_pci_driver);
12556
12557         destroy_workqueue(bnx2x_wq);
12558 }
12559
12560 module_init(bnx2x_init);
12561 module_exit(bnx2x_cleanup);
12562
12563 #ifdef BCM_CNIC
12564
12565 /* count denotes the number of new completions we have seen */
12566 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
12567 {
12568         struct eth_spe *spe;
12569
12570 #ifdef BNX2X_STOP_ON_ERROR
12571         if (unlikely(bp->panic))
12572                 return;
12573 #endif
12574
12575         spin_lock_bh(&bp->spq_lock);
12576         bp->cnic_spq_pending -= count;
12577
12578         for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
12579              bp->cnic_spq_pending++) {
12580
12581                 if (!bp->cnic_kwq_pending)
12582                         break;
12583
12584                 spe = bnx2x_sp_get_next(bp);
12585                 *spe = *bp->cnic_kwq_cons;
12586
12587                 bp->cnic_kwq_pending--;
12588
12589                 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
12590                    bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
12591
12592                 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
12593                         bp->cnic_kwq_cons = bp->cnic_kwq;
12594                 else
12595                         bp->cnic_kwq_cons++;
12596         }
12597         bnx2x_sp_prod_update(bp);
12598         spin_unlock_bh(&bp->spq_lock);
12599 }
12600
12601 static int bnx2x_cnic_sp_queue(struct net_device *dev,
12602                                struct kwqe_16 *kwqes[], u32 count)
12603 {
12604         struct bnx2x *bp = netdev_priv(dev);
12605         int i;
12606
12607 #ifdef BNX2X_STOP_ON_ERROR
12608         if (unlikely(bp->panic))
12609                 return -EIO;
12610 #endif
12611
12612         spin_lock_bh(&bp->spq_lock);
12613
12614         for (i = 0; i < count; i++) {
12615                 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
12616
12617                 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
12618                         break;
12619
12620                 *bp->cnic_kwq_prod = *spe;
12621
12622                 bp->cnic_kwq_pending++;
12623
12624                 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
12625                    spe->hdr.conn_and_cmd_data, spe->hdr.type,
12626                    spe->data.mac_config_addr.hi,
12627                    spe->data.mac_config_addr.lo,
12628                    bp->cnic_kwq_pending);
12629
12630                 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
12631                         bp->cnic_kwq_prod = bp->cnic_kwq;
12632                 else
12633                         bp->cnic_kwq_prod++;
12634         }
12635
12636         spin_unlock_bh(&bp->spq_lock);
12637
12638         if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
12639                 bnx2x_cnic_sp_post(bp, 0);
12640
12641         return i;
12642 }
12643
12644 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12645 {
12646         struct cnic_ops *c_ops;
12647         int rc = 0;
12648
12649         mutex_lock(&bp->cnic_mutex);
12650         c_ops = bp->cnic_ops;
12651         if (c_ops)
12652                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12653         mutex_unlock(&bp->cnic_mutex);
12654
12655         return rc;
12656 }
12657
12658 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12659 {
12660         struct cnic_ops *c_ops;
12661         int rc = 0;
12662
12663         rcu_read_lock();
12664         c_ops = rcu_dereference(bp->cnic_ops);
12665         if (c_ops)
12666                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12667         rcu_read_unlock();
12668
12669         return rc;
12670 }
12671
12672 /*
12673  * for commands that have no data
12674  */
12675 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
12676 {
12677         struct cnic_ctl_info ctl = {0};
12678
12679         ctl.cmd = cmd;
12680
12681         return bnx2x_cnic_ctl_send(bp, &ctl);
12682 }
12683
12684 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
12685 {
12686         struct cnic_ctl_info ctl;
12687
12688         /* first we tell CNIC and only then we count this as a completion */
12689         ctl.cmd = CNIC_CTL_COMPLETION_CMD;
12690         ctl.data.comp.cid = cid;
12691
12692         bnx2x_cnic_ctl_send_bh(bp, &ctl);
12693         bnx2x_cnic_sp_post(bp, 1);
12694 }
12695
12696 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
12697 {
12698         struct bnx2x *bp = netdev_priv(dev);
12699         int rc = 0;
12700
12701         switch (ctl->cmd) {
12702         case DRV_CTL_CTXTBL_WR_CMD: {
12703                 u32 index = ctl->data.io.offset;
12704                 dma_addr_t addr = ctl->data.io.dma_addr;
12705
12706                 bnx2x_ilt_wr(bp, index, addr);
12707                 break;
12708         }
12709
12710         case DRV_CTL_COMPLETION_CMD: {
12711                 int count = ctl->data.comp.comp_count;
12712
12713                 bnx2x_cnic_sp_post(bp, count);
12714                 break;
12715         }
12716
12717         /* rtnl_lock is held.  */
12718         case DRV_CTL_START_L2_CMD: {
12719                 u32 cli = ctl->data.ring.client_id;
12720
12721                 bp->rx_mode_cl_mask |= (1 << cli);
12722                 bnx2x_set_storm_rx_mode(bp);
12723                 break;
12724         }
12725
12726         /* rtnl_lock is held.  */
12727         case DRV_CTL_STOP_L2_CMD: {
12728                 u32 cli = ctl->data.ring.client_id;
12729
12730                 bp->rx_mode_cl_mask &= ~(1 << cli);
12731                 bnx2x_set_storm_rx_mode(bp);
12732                 break;
12733         }
12734
12735         default:
12736                 BNX2X_ERR("unknown command %x\n", ctl->cmd);
12737                 rc = -EINVAL;
12738         }
12739
12740         return rc;
12741 }
12742
12743 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
12744 {
12745         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12746
12747         if (bp->flags & USING_MSIX_FLAG) {
12748                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
12749                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
12750                 cp->irq_arr[0].vector = bp->msix_table[1].vector;
12751         } else {
12752                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
12753                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
12754         }
12755         cp->irq_arr[0].status_blk = bp->cnic_sb;
12756         cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
12757         cp->irq_arr[1].status_blk = bp->def_status_blk;
12758         cp->irq_arr[1].status_blk_num = DEF_SB_ID;
12759
12760         cp->num_irq = 2;
12761 }
12762
12763 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
12764                                void *data)
12765 {
12766         struct bnx2x *bp = netdev_priv(dev);
12767         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12768
12769         if (ops == NULL)
12770                 return -EINVAL;
12771
12772         if (atomic_read(&bp->intr_sem) != 0)
12773                 return -EBUSY;
12774
12775         bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
12776         if (!bp->cnic_kwq)
12777                 return -ENOMEM;
12778
12779         bp->cnic_kwq_cons = bp->cnic_kwq;
12780         bp->cnic_kwq_prod = bp->cnic_kwq;
12781         bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
12782
12783         bp->cnic_spq_pending = 0;
12784         bp->cnic_kwq_pending = 0;
12785
12786         bp->cnic_data = data;
12787
12788         cp->num_irq = 0;
12789         cp->drv_state = CNIC_DRV_STATE_REGD;
12790
12791         bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
12792
12793         bnx2x_setup_cnic_irq_info(bp);
12794         bnx2x_set_iscsi_eth_mac_addr(bp, 1);
12795         bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
12796         rcu_assign_pointer(bp->cnic_ops, ops);
12797
12798         return 0;
12799 }
12800
12801 static int bnx2x_unregister_cnic(struct net_device *dev)
12802 {
12803         struct bnx2x *bp = netdev_priv(dev);
12804         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12805
12806         mutex_lock(&bp->cnic_mutex);
12807         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
12808                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
12809                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
12810         }
12811         cp->drv_state = 0;
12812         rcu_assign_pointer(bp->cnic_ops, NULL);
12813         mutex_unlock(&bp->cnic_mutex);
12814         synchronize_rcu();
12815         kfree(bp->cnic_kwq);
12816         bp->cnic_kwq = NULL;
12817
12818         return 0;
12819 }
12820
12821 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
12822 {
12823         struct bnx2x *bp = netdev_priv(dev);
12824         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12825
12826         cp->drv_owner = THIS_MODULE;
12827         cp->chip_id = CHIP_ID(bp);
12828         cp->pdev = bp->pdev;
12829         cp->io_base = bp->regview;
12830         cp->io_base2 = bp->doorbells;
12831         cp->max_kwqe_pending = 8;
12832         cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
12833         cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
12834         cp->ctx_tbl_len = CNIC_ILT_LINES;
12835         cp->starting_cid = BCM_CNIC_CID_START;
12836         cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
12837         cp->drv_ctl = bnx2x_drv_ctl;
12838         cp->drv_register_cnic = bnx2x_register_cnic;
12839         cp->drv_unregister_cnic = bnx2x_unregister_cnic;
12840
12841         return cp;
12842 }
12843 EXPORT_SYMBOL(bnx2x_cnic_probe);
12844
12845 #endif /* BCM_CNIC */
12846