bnx2x: Refactor bnx2x_sp_post().
[safe/jmp/linux-2.6] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52
53
54 #include "bnx2x.h"
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_dump.h"
58
59 #define DRV_MODULE_VERSION      "1.52.1"
60 #define DRV_MODULE_RELDATE      "2009/08/12"
61 #define BNX2X_BC_VER            0x040200
62
63 #include <linux/firmware.h>
64 #include "bnx2x_fw_file_hdr.h"
65 /* FW files */
66 #define FW_FILE_PREFIX_E1       "bnx2x-e1-"
67 #define FW_FILE_PREFIX_E1H      "bnx2x-e1h-"
68
69 /* Time in jiffies before concluding the transmitter is hung */
70 #define TX_TIMEOUT              (5*HZ)
71
72 static char version[] __devinitdata =
73         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
74         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
76 MODULE_AUTHOR("Eliezer Tamir");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
80
81 static int multi_mode = 1;
82 module_param(multi_mode, int, 0);
83 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84                              "(0 Disable; 1 Enable (default))");
85
86 static int num_rx_queues;
87 module_param(num_rx_queues, int, 0);
88 MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89                                 " (default is half number of CPUs)");
90
91 static int num_tx_queues;
92 module_param(num_tx_queues, int, 0);
93 MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94                                 " (default is half number of CPUs)");
95
96 static int disable_tpa;
97 module_param(disable_tpa, int, 0);
98 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
99
100 static int int_mode;
101 module_param(int_mode, int, 0);
102 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
103
104 static int dropless_fc;
105 module_param(dropless_fc, int, 0);
106 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
107
108 static int poll;
109 module_param(poll, int, 0);
110 MODULE_PARM_DESC(poll, " Use polling (for debug)");
111
112 static int mrrs = -1;
113 module_param(mrrs, int, 0);
114 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
115
116 static int debug;
117 module_param(debug, int, 0);
118 MODULE_PARM_DESC(debug, " Default debug msglevel");
119
120 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
121
122 static struct workqueue_struct *bnx2x_wq;
123
124 enum bnx2x_board_type {
125         BCM57710 = 0,
126         BCM57711 = 1,
127         BCM57711E = 2,
128 };
129
130 /* indexed by board_type, above */
131 static struct {
132         char *name;
133 } board_info[] __devinitdata = {
134         { "Broadcom NetXtreme II BCM57710 XGb" },
135         { "Broadcom NetXtreme II BCM57711 XGb" },
136         { "Broadcom NetXtreme II BCM57711E XGb" }
137 };
138
139
140 static const struct pci_device_id bnx2x_pci_tbl[] = {
141         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
142         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
143         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
144         { 0 }
145 };
146
147 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
148
149 /****************************************************************************
150 * General service functions
151 ****************************************************************************/
152
153 /* used only at init
154  * locking is done by mcp
155  */
156 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
157 {
158         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
159         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
160         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
161                                PCICFG_VENDOR_ID_OFFSET);
162 }
163
164 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
165 {
166         u32 val;
167
168         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
169         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
170         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
171                                PCICFG_VENDOR_ID_OFFSET);
172
173         return val;
174 }
175
176 static const u32 dmae_reg_go_c[] = {
177         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
178         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
179         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
180         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
181 };
182
183 /* copy command into DMAE command memory and set DMAE command go */
184 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
185                             int idx)
186 {
187         u32 cmd_offset;
188         int i;
189
190         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
191         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
192                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
193
194                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
195                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
196         }
197         REG_WR(bp, dmae_reg_go_c[idx], 1);
198 }
199
200 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
201                       u32 len32)
202 {
203         struct dmae_command dmae;
204         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
205         int cnt = 200;
206
207         if (!bp->dmae_ready) {
208                 u32 *data = bnx2x_sp(bp, wb_data[0]);
209
210                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
211                    "  using indirect\n", dst_addr, len32);
212                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
213                 return;
214         }
215
216         memset(&dmae, 0, sizeof(struct dmae_command));
217
218         dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
219                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
220                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
221 #ifdef __BIG_ENDIAN
222                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
223 #else
224                        DMAE_CMD_ENDIANITY_DW_SWAP |
225 #endif
226                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
227                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
228         dmae.src_addr_lo = U64_LO(dma_addr);
229         dmae.src_addr_hi = U64_HI(dma_addr);
230         dmae.dst_addr_lo = dst_addr >> 2;
231         dmae.dst_addr_hi = 0;
232         dmae.len = len32;
233         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
234         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
235         dmae.comp_val = DMAE_COMP_VAL;
236
237         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
238            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
239                     "dst_addr [%x:%08x (%08x)]\n"
240            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
241            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
242            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
243            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
244         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
245            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
246            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
247
248         mutex_lock(&bp->dmae_mutex);
249
250         *wb_comp = 0;
251
252         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
253
254         udelay(5);
255
256         while (*wb_comp != DMAE_COMP_VAL) {
257                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
258
259                 if (!cnt) {
260                         BNX2X_ERR("DMAE timeout!\n");
261                         break;
262                 }
263                 cnt--;
264                 /* adjust delay for emulation/FPGA */
265                 if (CHIP_REV_IS_SLOW(bp))
266                         msleep(100);
267                 else
268                         udelay(5);
269         }
270
271         mutex_unlock(&bp->dmae_mutex);
272 }
273
274 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
275 {
276         struct dmae_command dmae;
277         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
278         int cnt = 200;
279
280         if (!bp->dmae_ready) {
281                 u32 *data = bnx2x_sp(bp, wb_data[0]);
282                 int i;
283
284                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
285                    "  using indirect\n", src_addr, len32);
286                 for (i = 0; i < len32; i++)
287                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
288                 return;
289         }
290
291         memset(&dmae, 0, sizeof(struct dmae_command));
292
293         dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
294                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
295                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
296 #ifdef __BIG_ENDIAN
297                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
298 #else
299                        DMAE_CMD_ENDIANITY_DW_SWAP |
300 #endif
301                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
302                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
303         dmae.src_addr_lo = src_addr >> 2;
304         dmae.src_addr_hi = 0;
305         dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
306         dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
307         dmae.len = len32;
308         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
309         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
310         dmae.comp_val = DMAE_COMP_VAL;
311
312         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
313            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
314                     "dst_addr [%x:%08x (%08x)]\n"
315            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
316            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
317            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
318            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
319
320         mutex_lock(&bp->dmae_mutex);
321
322         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
323         *wb_comp = 0;
324
325         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
326
327         udelay(5);
328
329         while (*wb_comp != DMAE_COMP_VAL) {
330
331                 if (!cnt) {
332                         BNX2X_ERR("DMAE timeout!\n");
333                         break;
334                 }
335                 cnt--;
336                 /* adjust delay for emulation/FPGA */
337                 if (CHIP_REV_IS_SLOW(bp))
338                         msleep(100);
339                 else
340                         udelay(5);
341         }
342         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
343            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
344            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
345
346         mutex_unlock(&bp->dmae_mutex);
347 }
348
349 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
350                                u32 addr, u32 len)
351 {
352         int offset = 0;
353
354         while (len > DMAE_LEN32_WR_MAX) {
355                 bnx2x_write_dmae(bp, phys_addr + offset,
356                                  addr + offset, DMAE_LEN32_WR_MAX);
357                 offset += DMAE_LEN32_WR_MAX * 4;
358                 len -= DMAE_LEN32_WR_MAX;
359         }
360
361         bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
362 }
363
364 /* used only for slowpath so not inlined */
365 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
366 {
367         u32 wb_write[2];
368
369         wb_write[0] = val_hi;
370         wb_write[1] = val_lo;
371         REG_WR_DMAE(bp, reg, wb_write, 2);
372 }
373
374 #ifdef USE_WB_RD
375 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
376 {
377         u32 wb_data[2];
378
379         REG_RD_DMAE(bp, reg, wb_data, 2);
380
381         return HILO_U64(wb_data[0], wb_data[1]);
382 }
383 #endif
384
385 static int bnx2x_mc_assert(struct bnx2x *bp)
386 {
387         char last_idx;
388         int i, rc = 0;
389         u32 row0, row1, row2, row3;
390
391         /* XSTORM */
392         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
393                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
394         if (last_idx)
395                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
396
397         /* print the asserts */
398         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
399
400                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
401                               XSTORM_ASSERT_LIST_OFFSET(i));
402                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
403                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
404                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
405                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
406                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
407                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
408
409                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
410                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
411                                   " 0x%08x 0x%08x 0x%08x\n",
412                                   i, row3, row2, row1, row0);
413                         rc++;
414                 } else {
415                         break;
416                 }
417         }
418
419         /* TSTORM */
420         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
421                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
422         if (last_idx)
423                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
424
425         /* print the asserts */
426         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
427
428                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
429                               TSTORM_ASSERT_LIST_OFFSET(i));
430                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
431                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
432                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
433                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
434                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
435                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
436
437                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
438                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
439                                   " 0x%08x 0x%08x 0x%08x\n",
440                                   i, row3, row2, row1, row0);
441                         rc++;
442                 } else {
443                         break;
444                 }
445         }
446
447         /* CSTORM */
448         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
449                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
450         if (last_idx)
451                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
452
453         /* print the asserts */
454         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
455
456                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
457                               CSTORM_ASSERT_LIST_OFFSET(i));
458                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
459                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
460                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
461                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
462                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
463                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
464
465                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
466                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
467                                   " 0x%08x 0x%08x 0x%08x\n",
468                                   i, row3, row2, row1, row0);
469                         rc++;
470                 } else {
471                         break;
472                 }
473         }
474
475         /* USTORM */
476         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
477                            USTORM_ASSERT_LIST_INDEX_OFFSET);
478         if (last_idx)
479                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
480
481         /* print the asserts */
482         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
483
484                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
485                               USTORM_ASSERT_LIST_OFFSET(i));
486                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
487                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
488                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
489                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
490                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
491                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
492
493                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
494                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
495                                   " 0x%08x 0x%08x 0x%08x\n",
496                                   i, row3, row2, row1, row0);
497                         rc++;
498                 } else {
499                         break;
500                 }
501         }
502
503         return rc;
504 }
505
506 static void bnx2x_fw_dump(struct bnx2x *bp)
507 {
508         u32 mark, offset;
509         __be32 data[9];
510         int word;
511
512         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
513         mark = ((mark + 0x3) & ~0x3);
514         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
515
516         printk(KERN_ERR PFX);
517         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
518                 for (word = 0; word < 8; word++)
519                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
520                                                   offset + 4*word));
521                 data[8] = 0x0;
522                 printk(KERN_CONT "%s", (char *)data);
523         }
524         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
525                 for (word = 0; word < 8; word++)
526                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
527                                                   offset + 4*word));
528                 data[8] = 0x0;
529                 printk(KERN_CONT "%s", (char *)data);
530         }
531         printk(KERN_ERR PFX "end of fw dump\n");
532 }
533
534 static void bnx2x_panic_dump(struct bnx2x *bp)
535 {
536         int i;
537         u16 j, start, end;
538
539         bp->stats_state = STATS_STATE_DISABLED;
540         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
541
542         BNX2X_ERR("begin crash dump -----------------\n");
543
544         /* Indices */
545         /* Common */
546         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
547                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
548                   "  spq_prod_idx(%u)\n",
549                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
550                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
551
552         /* Rx */
553         for_each_rx_queue(bp, i) {
554                 struct bnx2x_fastpath *fp = &bp->fp[i];
555
556                 BNX2X_ERR("fp%d: rx_bd_prod(%x)  rx_bd_cons(%x)"
557                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
558                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
559                           i, fp->rx_bd_prod, fp->rx_bd_cons,
560                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
561                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
562                 BNX2X_ERR("      rx_sge_prod(%x)  last_max_sge(%x)"
563                           "  fp_u_idx(%x) *sb_u_idx(%x)\n",
564                           fp->rx_sge_prod, fp->last_max_sge,
565                           le16_to_cpu(fp->fp_u_idx),
566                           fp->status_blk->u_status_block.status_block_index);
567         }
568
569         /* Tx */
570         for_each_tx_queue(bp, i) {
571                 struct bnx2x_fastpath *fp = &bp->fp[i];
572
573                 BNX2X_ERR("fp%d: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
574                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
575                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
576                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
577                 BNX2X_ERR("      fp_c_idx(%x)  *sb_c_idx(%x)"
578                           "  tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
579                           fp->status_blk->c_status_block.status_block_index,
580                           fp->tx_db.data.prod);
581         }
582
583         /* Rings */
584         /* Rx */
585         for_each_rx_queue(bp, i) {
586                 struct bnx2x_fastpath *fp = &bp->fp[i];
587
588                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
589                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
590                 for (j = start; j != end; j = RX_BD(j + 1)) {
591                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
592                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
593
594                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
595                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
596                 }
597
598                 start = RX_SGE(fp->rx_sge_prod);
599                 end = RX_SGE(fp->last_max_sge);
600                 for (j = start; j != end; j = RX_SGE(j + 1)) {
601                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
602                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
603
604                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
605                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
606                 }
607
608                 start = RCQ_BD(fp->rx_comp_cons - 10);
609                 end = RCQ_BD(fp->rx_comp_cons + 503);
610                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
611                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
612
613                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
614                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
615                 }
616         }
617
618         /* Tx */
619         for_each_tx_queue(bp, i) {
620                 struct bnx2x_fastpath *fp = &bp->fp[i];
621
622                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
623                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
624                 for (j = start; j != end; j = TX_BD(j + 1)) {
625                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
626
627                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
628                                   i, j, sw_bd->skb, sw_bd->first_bd);
629                 }
630
631                 start = TX_BD(fp->tx_bd_cons - 10);
632                 end = TX_BD(fp->tx_bd_cons + 254);
633                 for (j = start; j != end; j = TX_BD(j + 1)) {
634                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
635
636                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
637                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
638                 }
639         }
640
641         bnx2x_fw_dump(bp);
642         bnx2x_mc_assert(bp);
643         BNX2X_ERR("end crash dump -----------------\n");
644 }
645
646 static void bnx2x_int_enable(struct bnx2x *bp)
647 {
648         int port = BP_PORT(bp);
649         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
650         u32 val = REG_RD(bp, addr);
651         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
652         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
653
654         if (msix) {
655                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
656                          HC_CONFIG_0_REG_INT_LINE_EN_0);
657                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
658                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
659         } else if (msi) {
660                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
661                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
662                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
663                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
664         } else {
665                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
666                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
667                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
668                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
669
670                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
671                    val, port, addr);
672
673                 REG_WR(bp, addr, val);
674
675                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
676         }
677
678         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
679            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
680
681         REG_WR(bp, addr, val);
682         /*
683          * Ensure that HC_CONFIG is written before leading/trailing edge config
684          */
685         mmiowb();
686         barrier();
687
688         if (CHIP_IS_E1H(bp)) {
689                 /* init leading/trailing edge */
690                 if (IS_E1HMF(bp)) {
691                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
692                         if (bp->port.pmf)
693                                 /* enable nig and gpio3 attention */
694                                 val |= 0x1100;
695                 } else
696                         val = 0xffff;
697
698                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
699                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
700         }
701
702         /* Make sure that interrupts are indeed enabled from here on */
703         mmiowb();
704 }
705
706 static void bnx2x_int_disable(struct bnx2x *bp)
707 {
708         int port = BP_PORT(bp);
709         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
710         u32 val = REG_RD(bp, addr);
711
712         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
713                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
714                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
715                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
716
717         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
718            val, port, addr);
719
720         /* flush all outstanding writes */
721         mmiowb();
722
723         REG_WR(bp, addr, val);
724         if (REG_RD(bp, addr) != val)
725                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
726 }
727
728 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
729 {
730         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
731         int i, offset;
732
733         /* disable interrupt handling */
734         atomic_inc(&bp->intr_sem);
735         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
736
737         if (disable_hw)
738                 /* prevent the HW from sending interrupts */
739                 bnx2x_int_disable(bp);
740
741         /* make sure all ISRs are done */
742         if (msix) {
743                 synchronize_irq(bp->msix_table[0].vector);
744                 offset = 1;
745                 for_each_queue(bp, i)
746                         synchronize_irq(bp->msix_table[i + offset].vector);
747         } else
748                 synchronize_irq(bp->pdev->irq);
749
750         /* make sure sp_task is not running */
751         cancel_delayed_work(&bp->sp_task);
752         flush_workqueue(bnx2x_wq);
753 }
754
755 /* fast path */
756
757 /*
758  * General service functions
759  */
760
761 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
762                                 u8 storm, u16 index, u8 op, u8 update)
763 {
764         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
765                        COMMAND_REG_INT_ACK);
766         struct igu_ack_register igu_ack;
767
768         igu_ack.status_block_index = index;
769         igu_ack.sb_id_and_flags =
770                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
771                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
772                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
773                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
774
775         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
776            (*(u32 *)&igu_ack), hc_addr);
777         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
778
779         /* Make sure that ACK is written */
780         mmiowb();
781         barrier();
782 }
783
784 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
785 {
786         struct host_status_block *fpsb = fp->status_blk;
787         u16 rc = 0;
788
789         barrier(); /* status block is written to by the chip */
790         if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
791                 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
792                 rc |= 1;
793         }
794         if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
795                 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
796                 rc |= 2;
797         }
798         return rc;
799 }
800
801 static u16 bnx2x_ack_int(struct bnx2x *bp)
802 {
803         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
804                        COMMAND_REG_SIMD_MASK);
805         u32 result = REG_RD(bp, hc_addr);
806
807         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
808            result, hc_addr);
809
810         return result;
811 }
812
813
814 /*
815  * fast path service functions
816  */
817
818 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
819 {
820         /* Tell compiler that consumer and producer can change */
821         barrier();
822         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
823 }
824
825 /* free skb in the packet ring at pos idx
826  * return idx of last bd freed
827  */
828 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
829                              u16 idx)
830 {
831         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
832         struct eth_tx_start_bd *tx_start_bd;
833         struct eth_tx_bd *tx_data_bd;
834         struct sk_buff *skb = tx_buf->skb;
835         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
836         int nbd;
837
838         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
839            idx, tx_buf, skb);
840
841         /* unmap first bd */
842         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
843         tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
844         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
845                          BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
846
847         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
848 #ifdef BNX2X_STOP_ON_ERROR
849         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
850                 BNX2X_ERR("BAD nbd!\n");
851                 bnx2x_panic();
852         }
853 #endif
854         new_cons = nbd + tx_buf->first_bd;
855
856         /* Get the next bd */
857         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
858
859         /* Skip a parse bd... */
860         --nbd;
861         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
862
863         /* ...and the TSO split header bd since they have no mapping */
864         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
865                 --nbd;
866                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
867         }
868
869         /* now free frags */
870         while (nbd > 0) {
871
872                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
873                 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
874                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
875                                BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
876                 if (--nbd)
877                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
878         }
879
880         /* release skb */
881         WARN_ON(!skb);
882         dev_kfree_skb_any(skb);
883         tx_buf->first_bd = 0;
884         tx_buf->skb = NULL;
885
886         return new_cons;
887 }
888
889 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
890 {
891         s16 used;
892         u16 prod;
893         u16 cons;
894
895         barrier(); /* Tell compiler that prod and cons can change */
896         prod = fp->tx_bd_prod;
897         cons = fp->tx_bd_cons;
898
899         /* NUM_TX_RINGS = number of "next-page" entries
900            It will be used as a threshold */
901         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
902
903 #ifdef BNX2X_STOP_ON_ERROR
904         WARN_ON(used < 0);
905         WARN_ON(used > fp->bp->tx_ring_size);
906         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
907 #endif
908
909         return (s16)(fp->bp->tx_ring_size) - used;
910 }
911
912 static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
913 {
914         struct bnx2x *bp = fp->bp;
915         struct netdev_queue *txq;
916         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
917         int done = 0;
918
919 #ifdef BNX2X_STOP_ON_ERROR
920         if (unlikely(bp->panic))
921                 return;
922 #endif
923
924         txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
925         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
926         sw_cons = fp->tx_pkt_cons;
927
928         while (sw_cons != hw_cons) {
929                 u16 pkt_cons;
930
931                 pkt_cons = TX_BD(sw_cons);
932
933                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
934
935                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
936                    hw_cons, sw_cons, pkt_cons);
937
938 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
939                         rmb();
940                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
941                 }
942 */
943                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
944                 sw_cons++;
945                 done++;
946         }
947
948         fp->tx_pkt_cons = sw_cons;
949         fp->tx_bd_cons = bd_cons;
950
951         /* TBD need a thresh? */
952         if (unlikely(netif_tx_queue_stopped(txq))) {
953
954                 /* Need to make the tx_bd_cons update visible to start_xmit()
955                  * before checking for netif_tx_queue_stopped().  Without the
956                  * memory barrier, there is a small possibility that
957                  * start_xmit() will miss it and cause the queue to be stopped
958                  * forever.
959                  */
960                 smp_mb();
961
962                 if ((netif_tx_queue_stopped(txq)) &&
963                     (bp->state == BNX2X_STATE_OPEN) &&
964                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
965                         netif_tx_wake_queue(txq);
966         }
967 }
968
969
970 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
971                            union eth_rx_cqe *rr_cqe)
972 {
973         struct bnx2x *bp = fp->bp;
974         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
975         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
976
977         DP(BNX2X_MSG_SP,
978            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
979            fp->index, cid, command, bp->state,
980            rr_cqe->ramrod_cqe.ramrod_type);
981
982         bp->spq_left++;
983
984         if (fp->index) {
985                 switch (command | fp->state) {
986                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
987                                                 BNX2X_FP_STATE_OPENING):
988                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
989                            cid);
990                         fp->state = BNX2X_FP_STATE_OPEN;
991                         break;
992
993                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
994                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
995                            cid);
996                         fp->state = BNX2X_FP_STATE_HALTED;
997                         break;
998
999                 default:
1000                         BNX2X_ERR("unexpected MC reply (%d)  "
1001                                   "fp->state is %x\n", command, fp->state);
1002                         break;
1003                 }
1004                 mb(); /* force bnx2x_wait_ramrod() to see the change */
1005                 return;
1006         }
1007
1008         switch (command | bp->state) {
1009         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1010                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1011                 bp->state = BNX2X_STATE_OPEN;
1012                 break;
1013
1014         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1015                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1016                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1017                 fp->state = BNX2X_FP_STATE_HALTED;
1018                 break;
1019
1020         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1021                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1022                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1023                 break;
1024
1025
1026         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1027         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1028                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1029                 bp->set_mac_pending = 0;
1030                 break;
1031
1032         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1033         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED):
1034                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1035                 break;
1036
1037         default:
1038                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
1039                           command, bp->state);
1040                 break;
1041         }
1042         mb(); /* force bnx2x_wait_ramrod() to see the change */
1043 }
1044
1045 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1046                                      struct bnx2x_fastpath *fp, u16 index)
1047 {
1048         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1049         struct page *page = sw_buf->page;
1050         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1051
1052         /* Skip "next page" elements */
1053         if (!page)
1054                 return;
1055
1056         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1057                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1058         __free_pages(page, PAGES_PER_SGE_SHIFT);
1059
1060         sw_buf->page = NULL;
1061         sge->addr_hi = 0;
1062         sge->addr_lo = 0;
1063 }
1064
1065 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1066                                            struct bnx2x_fastpath *fp, int last)
1067 {
1068         int i;
1069
1070         for (i = 0; i < last; i++)
1071                 bnx2x_free_rx_sge(bp, fp, i);
1072 }
1073
1074 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1075                                      struct bnx2x_fastpath *fp, u16 index)
1076 {
1077         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1078         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1079         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1080         dma_addr_t mapping;
1081
1082         if (unlikely(page == NULL))
1083                 return -ENOMEM;
1084
1085         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1086                                PCI_DMA_FROMDEVICE);
1087         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1088                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1089                 return -ENOMEM;
1090         }
1091
1092         sw_buf->page = page;
1093         pci_unmap_addr_set(sw_buf, mapping, mapping);
1094
1095         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1096         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1097
1098         return 0;
1099 }
1100
1101 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1102                                      struct bnx2x_fastpath *fp, u16 index)
1103 {
1104         struct sk_buff *skb;
1105         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1106         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1107         dma_addr_t mapping;
1108
1109         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1110         if (unlikely(skb == NULL))
1111                 return -ENOMEM;
1112
1113         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1114                                  PCI_DMA_FROMDEVICE);
1115         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1116                 dev_kfree_skb(skb);
1117                 return -ENOMEM;
1118         }
1119
1120         rx_buf->skb = skb;
1121         pci_unmap_addr_set(rx_buf, mapping, mapping);
1122
1123         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1124         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1125
1126         return 0;
1127 }
1128
1129 /* note that we are not allocating a new skb,
1130  * we are just moving one from cons to prod
1131  * we are not creating a new mapping,
1132  * so there is no need to check for dma_mapping_error().
1133  */
1134 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1135                                struct sk_buff *skb, u16 cons, u16 prod)
1136 {
1137         struct bnx2x *bp = fp->bp;
1138         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1139         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1140         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1141         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1142
1143         pci_dma_sync_single_for_device(bp->pdev,
1144                                        pci_unmap_addr(cons_rx_buf, mapping),
1145                                        RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1146
1147         prod_rx_buf->skb = cons_rx_buf->skb;
1148         pci_unmap_addr_set(prod_rx_buf, mapping,
1149                            pci_unmap_addr(cons_rx_buf, mapping));
1150         *prod_bd = *cons_bd;
1151 }
1152
1153 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1154                                              u16 idx)
1155 {
1156         u16 last_max = fp->last_max_sge;
1157
1158         if (SUB_S16(idx, last_max) > 0)
1159                 fp->last_max_sge = idx;
1160 }
1161
1162 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1163 {
1164         int i, j;
1165
1166         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1167                 int idx = RX_SGE_CNT * i - 1;
1168
1169                 for (j = 0; j < 2; j++) {
1170                         SGE_MASK_CLEAR_BIT(fp, idx);
1171                         idx--;
1172                 }
1173         }
1174 }
1175
1176 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1177                                   struct eth_fast_path_rx_cqe *fp_cqe)
1178 {
1179         struct bnx2x *bp = fp->bp;
1180         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1181                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1182                       SGE_PAGE_SHIFT;
1183         u16 last_max, last_elem, first_elem;
1184         u16 delta = 0;
1185         u16 i;
1186
1187         if (!sge_len)
1188                 return;
1189
1190         /* First mark all used pages */
1191         for (i = 0; i < sge_len; i++)
1192                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1193
1194         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1195            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1196
1197         /* Here we assume that the last SGE index is the biggest */
1198         prefetch((void *)(fp->sge_mask));
1199         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1200
1201         last_max = RX_SGE(fp->last_max_sge);
1202         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1203         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1204
1205         /* If ring is not full */
1206         if (last_elem + 1 != first_elem)
1207                 last_elem++;
1208
1209         /* Now update the prod */
1210         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1211                 if (likely(fp->sge_mask[i]))
1212                         break;
1213
1214                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1215                 delta += RX_SGE_MASK_ELEM_SZ;
1216         }
1217
1218         if (delta > 0) {
1219                 fp->rx_sge_prod += delta;
1220                 /* clear page-end entries */
1221                 bnx2x_clear_sge_mask_next_elems(fp);
1222         }
1223
1224         DP(NETIF_MSG_RX_STATUS,
1225            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1226            fp->last_max_sge, fp->rx_sge_prod);
1227 }
1228
1229 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1230 {
1231         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1232         memset(fp->sge_mask, 0xff,
1233                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1234
1235         /* Clear the two last indices in the page to 1:
1236            these are the indices that correspond to the "next" element,
1237            hence will never be indicated and should be removed from
1238            the calculations. */
1239         bnx2x_clear_sge_mask_next_elems(fp);
1240 }
1241
1242 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1243                             struct sk_buff *skb, u16 cons, u16 prod)
1244 {
1245         struct bnx2x *bp = fp->bp;
1246         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1247         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1248         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1249         dma_addr_t mapping;
1250
1251         /* move empty skb from pool to prod and map it */
1252         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1253         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1254                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1255         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1256
1257         /* move partial skb from cons to pool (don't unmap yet) */
1258         fp->tpa_pool[queue] = *cons_rx_buf;
1259
1260         /* mark bin state as start - print error if current state != stop */
1261         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1262                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1263
1264         fp->tpa_state[queue] = BNX2X_TPA_START;
1265
1266         /* point prod_bd to new skb */
1267         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1268         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1269
1270 #ifdef BNX2X_STOP_ON_ERROR
1271         fp->tpa_queue_used |= (1 << queue);
1272 #ifdef __powerpc64__
1273         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1274 #else
1275         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1276 #endif
1277            fp->tpa_queue_used);
1278 #endif
1279 }
1280
1281 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1282                                struct sk_buff *skb,
1283                                struct eth_fast_path_rx_cqe *fp_cqe,
1284                                u16 cqe_idx)
1285 {
1286         struct sw_rx_page *rx_pg, old_rx_pg;
1287         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1288         u32 i, frag_len, frag_size, pages;
1289         int err;
1290         int j;
1291
1292         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1293         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1294
1295         /* This is needed in order to enable forwarding support */
1296         if (frag_size)
1297                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1298                                                max(frag_size, (u32)len_on_bd));
1299
1300 #ifdef BNX2X_STOP_ON_ERROR
1301         if (pages >
1302             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1303                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1304                           pages, cqe_idx);
1305                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1306                           fp_cqe->pkt_len, len_on_bd);
1307                 bnx2x_panic();
1308                 return -EINVAL;
1309         }
1310 #endif
1311
1312         /* Run through the SGL and compose the fragmented skb */
1313         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1314                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1315
1316                 /* FW gives the indices of the SGE as if the ring is an array
1317                    (meaning that "next" element will consume 2 indices) */
1318                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1319                 rx_pg = &fp->rx_page_ring[sge_idx];
1320                 old_rx_pg = *rx_pg;
1321
1322                 /* If we fail to allocate a substitute page, we simply stop
1323                    where we are and drop the whole packet */
1324                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1325                 if (unlikely(err)) {
1326                         fp->eth_q_stats.rx_skb_alloc_failed++;
1327                         return err;
1328                 }
1329
1330                 /* Unmap the page as we r going to pass it to the stack */
1331                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1332                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1333
1334                 /* Add one frag and update the appropriate fields in the skb */
1335                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1336
1337                 skb->data_len += frag_len;
1338                 skb->truesize += frag_len;
1339                 skb->len += frag_len;
1340
1341                 frag_size -= frag_len;
1342         }
1343
1344         return 0;
1345 }
1346
1347 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1348                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1349                            u16 cqe_idx)
1350 {
1351         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1352         struct sk_buff *skb = rx_buf->skb;
1353         /* alloc new skb */
1354         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1355
1356         /* Unmap skb in the pool anyway, as we are going to change
1357            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1358            fails. */
1359         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1360                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1361
1362         if (likely(new_skb)) {
1363                 /* fix ip xsum and give it to the stack */
1364                 /* (no need to map the new skb) */
1365 #ifdef BCM_VLAN
1366                 int is_vlan_cqe =
1367                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1368                          PARSING_FLAGS_VLAN);
1369                 int is_not_hwaccel_vlan_cqe =
1370                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1371 #endif
1372
1373                 prefetch(skb);
1374                 prefetch(((char *)(skb)) + 128);
1375
1376 #ifdef BNX2X_STOP_ON_ERROR
1377                 if (pad + len > bp->rx_buf_size) {
1378                         BNX2X_ERR("skb_put is about to fail...  "
1379                                   "pad %d  len %d  rx_buf_size %d\n",
1380                                   pad, len, bp->rx_buf_size);
1381                         bnx2x_panic();
1382                         return;
1383                 }
1384 #endif
1385
1386                 skb_reserve(skb, pad);
1387                 skb_put(skb, len);
1388
1389                 skb->protocol = eth_type_trans(skb, bp->dev);
1390                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1391
1392                 {
1393                         struct iphdr *iph;
1394
1395                         iph = (struct iphdr *)skb->data;
1396 #ifdef BCM_VLAN
1397                         /* If there is no Rx VLAN offloading -
1398                            take VLAN tag into an account */
1399                         if (unlikely(is_not_hwaccel_vlan_cqe))
1400                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1401 #endif
1402                         iph->check = 0;
1403                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1404                 }
1405
1406                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1407                                          &cqe->fast_path_cqe, cqe_idx)) {
1408 #ifdef BCM_VLAN
1409                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1410                             (!is_not_hwaccel_vlan_cqe))
1411                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1412                                                 le16_to_cpu(cqe->fast_path_cqe.
1413                                                             vlan_tag));
1414                         else
1415 #endif
1416                                 netif_receive_skb(skb);
1417                 } else {
1418                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1419                            " - dropping packet!\n");
1420                         dev_kfree_skb(skb);
1421                 }
1422
1423
1424                 /* put new skb in bin */
1425                 fp->tpa_pool[queue].skb = new_skb;
1426
1427         } else {
1428                 /* else drop the packet and keep the buffer in the bin */
1429                 DP(NETIF_MSG_RX_STATUS,
1430                    "Failed to allocate new skb - dropping packet!\n");
1431                 fp->eth_q_stats.rx_skb_alloc_failed++;
1432         }
1433
1434         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1435 }
1436
1437 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1438                                         struct bnx2x_fastpath *fp,
1439                                         u16 bd_prod, u16 rx_comp_prod,
1440                                         u16 rx_sge_prod)
1441 {
1442         struct ustorm_eth_rx_producers rx_prods = {0};
1443         int i;
1444
1445         /* Update producers */
1446         rx_prods.bd_prod = bd_prod;
1447         rx_prods.cqe_prod = rx_comp_prod;
1448         rx_prods.sge_prod = rx_sge_prod;
1449
1450         /*
1451          * Make sure that the BD and SGE data is updated before updating the
1452          * producers since FW might read the BD/SGE right after the producer
1453          * is updated.
1454          * This is only applicable for weak-ordered memory model archs such
1455          * as IA-64. The following barrier is also mandatory since FW will
1456          * assumes BDs must have buffers.
1457          */
1458         wmb();
1459
1460         for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1461                 REG_WR(bp, BAR_USTRORM_INTMEM +
1462                        USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1463                        ((u32 *)&rx_prods)[i]);
1464
1465         mmiowb(); /* keep prod updates ordered */
1466
1467         DP(NETIF_MSG_RX_STATUS,
1468            "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
1469            fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1470 }
1471
1472 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1473 {
1474         struct bnx2x *bp = fp->bp;
1475         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1476         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1477         int rx_pkt = 0;
1478
1479 #ifdef BNX2X_STOP_ON_ERROR
1480         if (unlikely(bp->panic))
1481                 return 0;
1482 #endif
1483
1484         /* CQ "next element" is of the size of the regular element,
1485            that's why it's ok here */
1486         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1487         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1488                 hw_comp_cons++;
1489
1490         bd_cons = fp->rx_bd_cons;
1491         bd_prod = fp->rx_bd_prod;
1492         bd_prod_fw = bd_prod;
1493         sw_comp_cons = fp->rx_comp_cons;
1494         sw_comp_prod = fp->rx_comp_prod;
1495
1496         /* Memory barrier necessary as speculative reads of the rx
1497          * buffer can be ahead of the index in the status block
1498          */
1499         rmb();
1500
1501         DP(NETIF_MSG_RX_STATUS,
1502            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1503            fp->index, hw_comp_cons, sw_comp_cons);
1504
1505         while (sw_comp_cons != hw_comp_cons) {
1506                 struct sw_rx_bd *rx_buf = NULL;
1507                 struct sk_buff *skb;
1508                 union eth_rx_cqe *cqe;
1509                 u8 cqe_fp_flags;
1510                 u16 len, pad;
1511
1512                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1513                 bd_prod = RX_BD(bd_prod);
1514                 bd_cons = RX_BD(bd_cons);
1515
1516                 /* Prefetch the page containing the BD descriptor
1517                    at producer's index. It will be needed when new skb is
1518                    allocated */
1519                 prefetch((void *)(PAGE_ALIGN((unsigned long)
1520                                              (&fp->rx_desc_ring[bd_prod])) -
1521                                   PAGE_SIZE + 1));
1522
1523                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1524                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1525
1526                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1527                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1528                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1529                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1530                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1531                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1532
1533                 /* is this a slowpath msg? */
1534                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1535                         bnx2x_sp_event(fp, cqe);
1536                         goto next_cqe;
1537
1538                 /* this is an rx packet */
1539                 } else {
1540                         rx_buf = &fp->rx_buf_ring[bd_cons];
1541                         skb = rx_buf->skb;
1542                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1543                         pad = cqe->fast_path_cqe.placement_offset;
1544
1545                         /* If CQE is marked both TPA_START and TPA_END
1546                            it is a non-TPA CQE */
1547                         if ((!fp->disable_tpa) &&
1548                             (TPA_TYPE(cqe_fp_flags) !=
1549                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1550                                 u16 queue = cqe->fast_path_cqe.queue_index;
1551
1552                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1553                                         DP(NETIF_MSG_RX_STATUS,
1554                                            "calling tpa_start on queue %d\n",
1555                                            queue);
1556
1557                                         bnx2x_tpa_start(fp, queue, skb,
1558                                                         bd_cons, bd_prod);
1559                                         goto next_rx;
1560                                 }
1561
1562                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1563                                         DP(NETIF_MSG_RX_STATUS,
1564                                            "calling tpa_stop on queue %d\n",
1565                                            queue);
1566
1567                                         if (!BNX2X_RX_SUM_FIX(cqe))
1568                                                 BNX2X_ERR("STOP on none TCP "
1569                                                           "data\n");
1570
1571                                         /* This is a size of the linear data
1572                                            on this skb */
1573                                         len = le16_to_cpu(cqe->fast_path_cqe.
1574                                                                 len_on_bd);
1575                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1576                                                     len, cqe, comp_ring_cons);
1577 #ifdef BNX2X_STOP_ON_ERROR
1578                                         if (bp->panic)
1579                                                 return 0;
1580 #endif
1581
1582                                         bnx2x_update_sge_prod(fp,
1583                                                         &cqe->fast_path_cqe);
1584                                         goto next_cqe;
1585                                 }
1586                         }
1587
1588                         pci_dma_sync_single_for_device(bp->pdev,
1589                                         pci_unmap_addr(rx_buf, mapping),
1590                                                        pad + RX_COPY_THRESH,
1591                                                        PCI_DMA_FROMDEVICE);
1592                         prefetch(skb);
1593                         prefetch(((char *)(skb)) + 128);
1594
1595                         /* is this an error packet? */
1596                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1597                                 DP(NETIF_MSG_RX_ERR,
1598                                    "ERROR  flags %x  rx packet %u\n",
1599                                    cqe_fp_flags, sw_comp_cons);
1600                                 fp->eth_q_stats.rx_err_discard_pkt++;
1601                                 goto reuse_rx;
1602                         }
1603
1604                         /* Since we don't have a jumbo ring
1605                          * copy small packets if mtu > 1500
1606                          */
1607                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1608                             (len <= RX_COPY_THRESH)) {
1609                                 struct sk_buff *new_skb;
1610
1611                                 new_skb = netdev_alloc_skb(bp->dev,
1612                                                            len + pad);
1613                                 if (new_skb == NULL) {
1614                                         DP(NETIF_MSG_RX_ERR,
1615                                            "ERROR  packet dropped "
1616                                            "because of alloc failure\n");
1617                                         fp->eth_q_stats.rx_skb_alloc_failed++;
1618                                         goto reuse_rx;
1619                                 }
1620
1621                                 /* aligned copy */
1622                                 skb_copy_from_linear_data_offset(skb, pad,
1623                                                     new_skb->data + pad, len);
1624                                 skb_reserve(new_skb, pad);
1625                                 skb_put(new_skb, len);
1626
1627                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1628
1629                                 skb = new_skb;
1630
1631                         } else
1632                         if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1633                                 pci_unmap_single(bp->pdev,
1634                                         pci_unmap_addr(rx_buf, mapping),
1635                                                  bp->rx_buf_size,
1636                                                  PCI_DMA_FROMDEVICE);
1637                                 skb_reserve(skb, pad);
1638                                 skb_put(skb, len);
1639
1640                         } else {
1641                                 DP(NETIF_MSG_RX_ERR,
1642                                    "ERROR  packet dropped because "
1643                                    "of alloc failure\n");
1644                                 fp->eth_q_stats.rx_skb_alloc_failed++;
1645 reuse_rx:
1646                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1647                                 goto next_rx;
1648                         }
1649
1650                         skb->protocol = eth_type_trans(skb, bp->dev);
1651
1652                         skb->ip_summed = CHECKSUM_NONE;
1653                         if (bp->rx_csum) {
1654                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1655                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1656                                 else
1657                                         fp->eth_q_stats.hw_csum_err++;
1658                         }
1659                 }
1660
1661                 skb_record_rx_queue(skb, fp->index);
1662
1663 #ifdef BCM_VLAN
1664                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1665                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1666                      PARSING_FLAGS_VLAN))
1667                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1668                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1669                 else
1670 #endif
1671                         netif_receive_skb(skb);
1672
1673
1674 next_rx:
1675                 rx_buf->skb = NULL;
1676
1677                 bd_cons = NEXT_RX_IDX(bd_cons);
1678                 bd_prod = NEXT_RX_IDX(bd_prod);
1679                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1680                 rx_pkt++;
1681 next_cqe:
1682                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1683                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1684
1685                 if (rx_pkt == budget)
1686                         break;
1687         } /* while */
1688
1689         fp->rx_bd_cons = bd_cons;
1690         fp->rx_bd_prod = bd_prod_fw;
1691         fp->rx_comp_cons = sw_comp_cons;
1692         fp->rx_comp_prod = sw_comp_prod;
1693
1694         /* Update producers */
1695         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1696                              fp->rx_sge_prod);
1697
1698         fp->rx_pkt += rx_pkt;
1699         fp->rx_calls++;
1700
1701         return rx_pkt;
1702 }
1703
1704 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1705 {
1706         struct bnx2x_fastpath *fp = fp_cookie;
1707         struct bnx2x *bp = fp->bp;
1708
1709         /* Return here if interrupt is disabled */
1710         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1711                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1712                 return IRQ_HANDLED;
1713         }
1714
1715         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1716            fp->index, fp->sb_id);
1717         bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1718
1719 #ifdef BNX2X_STOP_ON_ERROR
1720         if (unlikely(bp->panic))
1721                 return IRQ_HANDLED;
1722 #endif
1723         /* Handle Rx or Tx according to MSI-X vector */
1724         if (fp->is_rx_queue) {
1725                 prefetch(fp->rx_cons_sb);
1726                 prefetch(&fp->status_blk->u_status_block.status_block_index);
1727
1728                 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1729
1730         } else {
1731                 prefetch(fp->tx_cons_sb);
1732                 prefetch(&fp->status_blk->c_status_block.status_block_index);
1733
1734                 bnx2x_update_fpsb_idx(fp);
1735                 rmb();
1736                 bnx2x_tx_int(fp);
1737
1738                 /* Re-enable interrupts */
1739                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1740                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1741                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1742                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1743         }
1744
1745         return IRQ_HANDLED;
1746 }
1747
1748 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1749 {
1750         struct bnx2x *bp = netdev_priv(dev_instance);
1751         u16 status = bnx2x_ack_int(bp);
1752         u16 mask;
1753         int i;
1754
1755         /* Return here if interrupt is shared and it's not for us */
1756         if (unlikely(status == 0)) {
1757                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1758                 return IRQ_NONE;
1759         }
1760         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1761
1762         /* Return here if interrupt is disabled */
1763         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1764                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1765                 return IRQ_HANDLED;
1766         }
1767
1768 #ifdef BNX2X_STOP_ON_ERROR
1769         if (unlikely(bp->panic))
1770                 return IRQ_HANDLED;
1771 #endif
1772
1773         for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1774                 struct bnx2x_fastpath *fp = &bp->fp[i];
1775
1776                 mask = 0x2 << fp->sb_id;
1777                 if (status & mask) {
1778                         /* Handle Rx or Tx according to SB id */
1779                         if (fp->is_rx_queue) {
1780                                 prefetch(fp->rx_cons_sb);
1781                                 prefetch(&fp->status_blk->u_status_block.
1782                                                         status_block_index);
1783
1784                                 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1785
1786                         } else {
1787                                 prefetch(fp->tx_cons_sb);
1788                                 prefetch(&fp->status_blk->c_status_block.
1789                                                         status_block_index);
1790
1791                                 bnx2x_update_fpsb_idx(fp);
1792                                 rmb();
1793                                 bnx2x_tx_int(fp);
1794
1795                                 /* Re-enable interrupts */
1796                                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1797                                              le16_to_cpu(fp->fp_u_idx),
1798                                              IGU_INT_NOP, 1);
1799                                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1800                                              le16_to_cpu(fp->fp_c_idx),
1801                                              IGU_INT_ENABLE, 1);
1802                         }
1803                         status &= ~mask;
1804                 }
1805         }
1806
1807
1808         if (unlikely(status & 0x1)) {
1809                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1810
1811                 status &= ~0x1;
1812                 if (!status)
1813                         return IRQ_HANDLED;
1814         }
1815
1816         if (status)
1817                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1818                    status);
1819
1820         return IRQ_HANDLED;
1821 }
1822
1823 /* end of fast path */
1824
1825 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1826
1827 /* Link */
1828
1829 /*
1830  * General service functions
1831  */
1832
1833 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1834 {
1835         u32 lock_status;
1836         u32 resource_bit = (1 << resource);
1837         int func = BP_FUNC(bp);
1838         u32 hw_lock_control_reg;
1839         int cnt;
1840
1841         /* Validating that the resource is within range */
1842         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1843                 DP(NETIF_MSG_HW,
1844                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1845                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1846                 return -EINVAL;
1847         }
1848
1849         if (func <= 5) {
1850                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1851         } else {
1852                 hw_lock_control_reg =
1853                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1854         }
1855
1856         /* Validating that the resource is not already taken */
1857         lock_status = REG_RD(bp, hw_lock_control_reg);
1858         if (lock_status & resource_bit) {
1859                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1860                    lock_status, resource_bit);
1861                 return -EEXIST;
1862         }
1863
1864         /* Try for 5 second every 5ms */
1865         for (cnt = 0; cnt < 1000; cnt++) {
1866                 /* Try to acquire the lock */
1867                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1868                 lock_status = REG_RD(bp, hw_lock_control_reg);
1869                 if (lock_status & resource_bit)
1870                         return 0;
1871
1872                 msleep(5);
1873         }
1874         DP(NETIF_MSG_HW, "Timeout\n");
1875         return -EAGAIN;
1876 }
1877
1878 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1879 {
1880         u32 lock_status;
1881         u32 resource_bit = (1 << resource);
1882         int func = BP_FUNC(bp);
1883         u32 hw_lock_control_reg;
1884
1885         /* Validating that the resource is within range */
1886         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1887                 DP(NETIF_MSG_HW,
1888                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1889                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1890                 return -EINVAL;
1891         }
1892
1893         if (func <= 5) {
1894                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1895         } else {
1896                 hw_lock_control_reg =
1897                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1898         }
1899
1900         /* Validating that the resource is currently taken */
1901         lock_status = REG_RD(bp, hw_lock_control_reg);
1902         if (!(lock_status & resource_bit)) {
1903                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1904                    lock_status, resource_bit);
1905                 return -EFAULT;
1906         }
1907
1908         REG_WR(bp, hw_lock_control_reg, resource_bit);
1909         return 0;
1910 }
1911
1912 /* HW Lock for shared dual port PHYs */
1913 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1914 {
1915         mutex_lock(&bp->port.phy_mutex);
1916
1917         if (bp->port.need_hw_lock)
1918                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1919 }
1920
1921 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1922 {
1923         if (bp->port.need_hw_lock)
1924                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1925
1926         mutex_unlock(&bp->port.phy_mutex);
1927 }
1928
1929 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1930 {
1931         /* The GPIO should be swapped if swap register is set and active */
1932         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1933                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1934         int gpio_shift = gpio_num +
1935                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1936         u32 gpio_mask = (1 << gpio_shift);
1937         u32 gpio_reg;
1938         int value;
1939
1940         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1941                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1942                 return -EINVAL;
1943         }
1944
1945         /* read GPIO value */
1946         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1947
1948         /* get the requested pin value */
1949         if ((gpio_reg & gpio_mask) == gpio_mask)
1950                 value = 1;
1951         else
1952                 value = 0;
1953
1954         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1955
1956         return value;
1957 }
1958
1959 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1960 {
1961         /* The GPIO should be swapped if swap register is set and active */
1962         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1963                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1964         int gpio_shift = gpio_num +
1965                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1966         u32 gpio_mask = (1 << gpio_shift);
1967         u32 gpio_reg;
1968
1969         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1970                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1971                 return -EINVAL;
1972         }
1973
1974         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1975         /* read GPIO and mask except the float bits */
1976         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1977
1978         switch (mode) {
1979         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1980                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1981                    gpio_num, gpio_shift);
1982                 /* clear FLOAT and set CLR */
1983                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1984                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1985                 break;
1986
1987         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1988                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1989                    gpio_num, gpio_shift);
1990                 /* clear FLOAT and set SET */
1991                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1992                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1993                 break;
1994
1995         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1996                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1997                    gpio_num, gpio_shift);
1998                 /* set FLOAT */
1999                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2000                 break;
2001
2002         default:
2003                 break;
2004         }
2005
2006         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2007         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2008
2009         return 0;
2010 }
2011
2012 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2013 {
2014         /* The GPIO should be swapped if swap register is set and active */
2015         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2016                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2017         int gpio_shift = gpio_num +
2018                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2019         u32 gpio_mask = (1 << gpio_shift);
2020         u32 gpio_reg;
2021
2022         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2023                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2024                 return -EINVAL;
2025         }
2026
2027         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2028         /* read GPIO int */
2029         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2030
2031         switch (mode) {
2032         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2033                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2034                                    "output low\n", gpio_num, gpio_shift);
2035                 /* clear SET and set CLR */
2036                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2037                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2038                 break;
2039
2040         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2041                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2042                                    "output high\n", gpio_num, gpio_shift);
2043                 /* clear CLR and set SET */
2044                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2045                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2046                 break;
2047
2048         default:
2049                 break;
2050         }
2051
2052         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2053         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2054
2055         return 0;
2056 }
2057
2058 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2059 {
2060         u32 spio_mask = (1 << spio_num);
2061         u32 spio_reg;
2062
2063         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2064             (spio_num > MISC_REGISTERS_SPIO_7)) {
2065                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2066                 return -EINVAL;
2067         }
2068
2069         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2070         /* read SPIO and mask except the float bits */
2071         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2072
2073         switch (mode) {
2074         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2075                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2076                 /* clear FLOAT and set CLR */
2077                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2078                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2079                 break;
2080
2081         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2082                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2083                 /* clear FLOAT and set SET */
2084                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2085                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2086                 break;
2087
2088         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2089                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2090                 /* set FLOAT */
2091                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2092                 break;
2093
2094         default:
2095                 break;
2096         }
2097
2098         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2099         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2100
2101         return 0;
2102 }
2103
2104 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2105 {
2106         switch (bp->link_vars.ieee_fc &
2107                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2108         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2109                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2110                                           ADVERTISED_Pause);
2111                 break;
2112
2113         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2114                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2115                                          ADVERTISED_Pause);
2116                 break;
2117
2118         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2119                 bp->port.advertising |= ADVERTISED_Asym_Pause;
2120                 break;
2121
2122         default:
2123                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2124                                           ADVERTISED_Pause);
2125                 break;
2126         }
2127 }
2128
2129 static void bnx2x_link_report(struct bnx2x *bp)
2130 {
2131         if (bp->state == BNX2X_STATE_DISABLED) {
2132                 netif_carrier_off(bp->dev);
2133                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2134                 return;
2135         }
2136
2137         if (bp->link_vars.link_up) {
2138                 if (bp->state == BNX2X_STATE_OPEN)
2139                         netif_carrier_on(bp->dev);
2140                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2141
2142                 printk("%d Mbps ", bp->link_vars.line_speed);
2143
2144                 if (bp->link_vars.duplex == DUPLEX_FULL)
2145                         printk("full duplex");
2146                 else
2147                         printk("half duplex");
2148
2149                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2150                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2151                                 printk(", receive ");
2152                                 if (bp->link_vars.flow_ctrl &
2153                                     BNX2X_FLOW_CTRL_TX)
2154                                         printk("& transmit ");
2155                         } else {
2156                                 printk(", transmit ");
2157                         }
2158                         printk("flow control ON");
2159                 }
2160                 printk("\n");
2161
2162         } else { /* link_down */
2163                 netif_carrier_off(bp->dev);
2164                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2165         }
2166 }
2167
2168 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2169 {
2170         if (!BP_NOMCP(bp)) {
2171                 u8 rc;
2172
2173                 /* Initialize link parameters structure variables */
2174                 /* It is recommended to turn off RX FC for jumbo frames
2175                    for better performance */
2176                 if (bp->dev->mtu > 5000)
2177                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2178                 else
2179                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2180
2181                 bnx2x_acquire_phy_lock(bp);
2182
2183                 if (load_mode == LOAD_DIAG)
2184                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2185
2186                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2187
2188                 bnx2x_release_phy_lock(bp);
2189
2190                 bnx2x_calc_fc_adv(bp);
2191
2192                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2193                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2194                         bnx2x_link_report(bp);
2195                 }
2196
2197                 return rc;
2198         }
2199         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2200         return -EINVAL;
2201 }
2202
2203 static void bnx2x_link_set(struct bnx2x *bp)
2204 {
2205         if (!BP_NOMCP(bp)) {
2206                 bnx2x_acquire_phy_lock(bp);
2207                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2208                 bnx2x_release_phy_lock(bp);
2209
2210                 bnx2x_calc_fc_adv(bp);
2211         } else
2212                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2213 }
2214
2215 static void bnx2x__link_reset(struct bnx2x *bp)
2216 {
2217         if (!BP_NOMCP(bp)) {
2218                 bnx2x_acquire_phy_lock(bp);
2219                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2220                 bnx2x_release_phy_lock(bp);
2221         } else
2222                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2223 }
2224
2225 static u8 bnx2x_link_test(struct bnx2x *bp)
2226 {
2227         u8 rc;
2228
2229         bnx2x_acquire_phy_lock(bp);
2230         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2231         bnx2x_release_phy_lock(bp);
2232
2233         return rc;
2234 }
2235
2236 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2237 {
2238         u32 r_param = bp->link_vars.line_speed / 8;
2239         u32 fair_periodic_timeout_usec;
2240         u32 t_fair;
2241
2242         memset(&(bp->cmng.rs_vars), 0,
2243                sizeof(struct rate_shaping_vars_per_port));
2244         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2245
2246         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2247         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2248
2249         /* this is the threshold below which no timer arming will occur
2250            1.25 coefficient is for the threshold to be a little bigger
2251            than the real time, to compensate for timer in-accuracy */
2252         bp->cmng.rs_vars.rs_threshold =
2253                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2254
2255         /* resolution of fairness timer */
2256         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2257         /* for 10G it is 1000usec. for 1G it is 10000usec. */
2258         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2259
2260         /* this is the threshold below which we won't arm the timer anymore */
2261         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2262
2263         /* we multiply by 1e3/8 to get bytes/msec.
2264            We don't want the credits to pass a credit
2265            of the t_fair*FAIR_MEM (algorithm resolution) */
2266         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2267         /* since each tick is 4 usec */
2268         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2269 }
2270
2271 /* Calculates the sum of vn_min_rates.
2272    It's needed for further normalizing of the min_rates.
2273    Returns:
2274      sum of vn_min_rates.
2275        or
2276      0 - if all the min_rates are 0.
2277      In the later case fainess algorithm should be deactivated.
2278      If not all min_rates are zero then those that are zeroes will be set to 1.
2279  */
2280 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2281 {
2282         int all_zero = 1;
2283         int port = BP_PORT(bp);
2284         int vn;
2285
2286         bp->vn_weight_sum = 0;
2287         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2288                 int func = 2*vn + port;
2289                 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2290                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2291                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2292
2293                 /* Skip hidden vns */
2294                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2295                         continue;
2296
2297                 /* If min rate is zero - set it to 1 */
2298                 if (!vn_min_rate)
2299                         vn_min_rate = DEF_MIN_RATE;
2300                 else
2301                         all_zero = 0;
2302
2303                 bp->vn_weight_sum += vn_min_rate;
2304         }
2305
2306         /* ... only if all min rates are zeros - disable fairness */
2307         if (all_zero)
2308                 bp->vn_weight_sum = 0;
2309 }
2310
2311 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2312 {
2313         struct rate_shaping_vars_per_vn m_rs_vn;
2314         struct fairness_vars_per_vn m_fair_vn;
2315         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2316         u16 vn_min_rate, vn_max_rate;
2317         int i;
2318
2319         /* If function is hidden - set min and max to zeroes */
2320         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2321                 vn_min_rate = 0;
2322                 vn_max_rate = 0;
2323
2324         } else {
2325                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2326                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2327                 /* If fairness is enabled (not all min rates are zeroes) and
2328                    if current min rate is zero - set it to 1.
2329                    This is a requirement of the algorithm. */
2330                 if (bp->vn_weight_sum && (vn_min_rate == 0))
2331                         vn_min_rate = DEF_MIN_RATE;
2332                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2333                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2334         }
2335
2336         DP(NETIF_MSG_IFUP,
2337            "func %d: vn_min_rate=%d  vn_max_rate=%d  vn_weight_sum=%d\n",
2338            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2339
2340         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2341         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2342
2343         /* global vn counter - maximal Mbps for this vn */
2344         m_rs_vn.vn_counter.rate = vn_max_rate;
2345
2346         /* quota - number of bytes transmitted in this period */
2347         m_rs_vn.vn_counter.quota =
2348                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2349
2350         if (bp->vn_weight_sum) {
2351                 /* credit for each period of the fairness algorithm:
2352                    number of bytes in T_FAIR (the vn share the port rate).
2353                    vn_weight_sum should not be larger than 10000, thus
2354                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2355                    than zero */
2356                 m_fair_vn.vn_credit_delta =
2357                         max((u32)(vn_min_rate * (T_FAIR_COEF /
2358                                                  (8 * bp->vn_weight_sum))),
2359                             (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2360                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2361                    m_fair_vn.vn_credit_delta);
2362         }
2363
2364         /* Store it to internal memory */
2365         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2366                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2367                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2368                        ((u32 *)(&m_rs_vn))[i]);
2369
2370         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2371                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2372                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2373                        ((u32 *)(&m_fair_vn))[i]);
2374 }
2375
2376
2377 /* This function is called upon link interrupt */
2378 static void bnx2x_link_attn(struct bnx2x *bp)
2379 {
2380         /* Make sure that we are synced with the current statistics */
2381         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2382
2383         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2384
2385         if (bp->link_vars.link_up) {
2386
2387                 /* dropless flow control */
2388                 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
2389                         int port = BP_PORT(bp);
2390                         u32 pause_enabled = 0;
2391
2392                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2393                                 pause_enabled = 1;
2394
2395                         REG_WR(bp, BAR_USTRORM_INTMEM +
2396                                USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2397                                pause_enabled);
2398                 }
2399
2400                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2401                         struct host_port_stats *pstats;
2402
2403                         pstats = bnx2x_sp(bp, port_stats);
2404                         /* reset old bmac stats */
2405                         memset(&(pstats->mac_stx[0]), 0,
2406                                sizeof(struct mac_stx));
2407                 }
2408                 if ((bp->state == BNX2X_STATE_OPEN) ||
2409                     (bp->state == BNX2X_STATE_DISABLED))
2410                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2411         }
2412
2413         /* indicate link status */
2414         bnx2x_link_report(bp);
2415
2416         if (IS_E1HMF(bp)) {
2417                 int port = BP_PORT(bp);
2418                 int func;
2419                 int vn;
2420
2421                 /* Set the attention towards other drivers on the same port */
2422                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2423                         if (vn == BP_E1HVN(bp))
2424                                 continue;
2425
2426                         func = ((vn << 1) | port);
2427                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2428                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2429                 }
2430
2431                 if (bp->link_vars.link_up) {
2432                         int i;
2433
2434                         /* Init rate shaping and fairness contexts */
2435                         bnx2x_init_port_minmax(bp);
2436
2437                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2438                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2439
2440                         /* Store it to internal memory */
2441                         for (i = 0;
2442                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
2443                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2444                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2445                                        ((u32 *)(&bp->cmng))[i]);
2446                 }
2447         }
2448 }
2449
2450 static void bnx2x__link_status_update(struct bnx2x *bp)
2451 {
2452         int func = BP_FUNC(bp);
2453
2454         if (bp->state != BNX2X_STATE_OPEN)
2455                 return;
2456
2457         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2458
2459         if (bp->link_vars.link_up)
2460                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2461         else
2462                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2463
2464         bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2465         bnx2x_calc_vn_weight_sum(bp);
2466
2467         /* indicate link status */
2468         bnx2x_link_report(bp);
2469 }
2470
2471 static void bnx2x_pmf_update(struct bnx2x *bp)
2472 {
2473         int port = BP_PORT(bp);
2474         u32 val;
2475
2476         bp->port.pmf = 1;
2477         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2478
2479         /* enable nig attention */
2480         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2481         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2482         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2483
2484         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2485 }
2486
2487 /* end of Link */
2488
2489 /* slow path */
2490
2491 /*
2492  * General service functions
2493  */
2494
2495 /* send the MCP a request, block until there is a reply */
2496 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2497 {
2498         int func = BP_FUNC(bp);
2499         u32 seq = ++bp->fw_seq;
2500         u32 rc = 0;
2501         u32 cnt = 1;
2502         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2503
2504         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2505         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2506
2507         do {
2508                 /* let the FW do it's magic ... */
2509                 msleep(delay);
2510
2511                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2512
2513                 /* Give the FW up to 2 second (200*10ms) */
2514         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
2515
2516         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2517            cnt*delay, rc, seq);
2518
2519         /* is this a reply to our command? */
2520         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2521                 rc &= FW_MSG_CODE_MASK;
2522         else {
2523                 /* FW BUG! */
2524                 BNX2X_ERR("FW failed to respond!\n");
2525                 bnx2x_fw_dump(bp);
2526                 rc = 0;
2527         }
2528
2529         return rc;
2530 }
2531
2532 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2533 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set);
2534 static void bnx2x_set_rx_mode(struct net_device *dev);
2535
2536 static void bnx2x_e1h_disable(struct bnx2x *bp)
2537 {
2538         int port = BP_PORT(bp);
2539         int i;
2540
2541         bp->rx_mode = BNX2X_RX_MODE_NONE;
2542         bnx2x_set_storm_rx_mode(bp);
2543
2544         netif_tx_disable(bp->dev);
2545         bp->dev->trans_start = jiffies; /* prevent tx timeout */
2546
2547         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2548
2549         bnx2x_set_mac_addr_e1h(bp, 0);
2550
2551         for (i = 0; i < MC_HASH_SIZE; i++)
2552                 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
2553
2554         netif_carrier_off(bp->dev);
2555 }
2556
2557 static void bnx2x_e1h_enable(struct bnx2x *bp)
2558 {
2559         int port = BP_PORT(bp);
2560
2561         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2562
2563         bnx2x_set_mac_addr_e1h(bp, 1);
2564
2565         /* Tx queue should be only reenabled */
2566         netif_tx_wake_all_queues(bp->dev);
2567
2568         /* Initialize the receive filter. */
2569         bnx2x_set_rx_mode(bp->dev);
2570 }
2571
2572 static void bnx2x_update_min_max(struct bnx2x *bp)
2573 {
2574         int port = BP_PORT(bp);
2575         int vn, i;
2576
2577         /* Init rate shaping and fairness contexts */
2578         bnx2x_init_port_minmax(bp);
2579
2580         bnx2x_calc_vn_weight_sum(bp);
2581
2582         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2583                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2584
2585         if (bp->port.pmf) {
2586                 int func;
2587
2588                 /* Set the attention towards other drivers on the same port */
2589                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2590                         if (vn == BP_E1HVN(bp))
2591                                 continue;
2592
2593                         func = ((vn << 1) | port);
2594                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2595                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2596                 }
2597
2598                 /* Store it to internal memory */
2599                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2600                         REG_WR(bp, BAR_XSTRORM_INTMEM +
2601                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2602                                ((u32 *)(&bp->cmng))[i]);
2603         }
2604 }
2605
2606 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2607 {
2608         int func = BP_FUNC(bp);
2609
2610         DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2611         bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2612
2613         if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2614
2615                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2616                         DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2617                         bp->state = BNX2X_STATE_DISABLED;
2618
2619                         bnx2x_e1h_disable(bp);
2620                 } else {
2621                         DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2622                         bp->state = BNX2X_STATE_OPEN;
2623
2624                         bnx2x_e1h_enable(bp);
2625                 }
2626                 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2627         }
2628         if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2629
2630                 bnx2x_update_min_max(bp);
2631                 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2632         }
2633
2634         /* Report results to MCP */
2635         if (dcc_event)
2636                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2637         else
2638                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2639 }
2640
2641 /* must be called under the spq lock */
2642 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2643 {
2644         struct eth_spe *next_spe = bp->spq_prod_bd;
2645
2646         if (bp->spq_prod_bd == bp->spq_last_bd) {
2647                 bp->spq_prod_bd = bp->spq;
2648                 bp->spq_prod_idx = 0;
2649                 DP(NETIF_MSG_TIMER, "end of spq\n");
2650         } else {
2651                 bp->spq_prod_bd++;
2652                 bp->spq_prod_idx++;
2653         }
2654         return next_spe;
2655 }
2656
2657 /* must be called under the spq lock */
2658 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2659 {
2660         int func = BP_FUNC(bp);
2661
2662         /* Make sure that BD data is updated before writing the producer */
2663         wmb();
2664
2665         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2666                bp->spq_prod_idx);
2667         mmiowb();
2668 }
2669
2670 /* the slow path queue is odd since completions arrive on the fastpath ring */
2671 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2672                          u32 data_hi, u32 data_lo, int common)
2673 {
2674         struct eth_spe *spe;
2675
2676         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2677            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2678            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2679            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2680            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2681
2682 #ifdef BNX2X_STOP_ON_ERROR
2683         if (unlikely(bp->panic))
2684                 return -EIO;
2685 #endif
2686
2687         spin_lock_bh(&bp->spq_lock);
2688
2689         if (!bp->spq_left) {
2690                 BNX2X_ERR("BUG! SPQ ring full!\n");
2691                 spin_unlock_bh(&bp->spq_lock);
2692                 bnx2x_panic();
2693                 return -EBUSY;
2694         }
2695
2696         spe = bnx2x_sp_get_next(bp);
2697
2698         /* CID needs port number to be encoded int it */
2699         spe->hdr.conn_and_cmd_data =
2700                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2701                                      HW_CID(bp, cid)));
2702         spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2703         if (common)
2704                 spe->hdr.type |=
2705                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2706
2707         spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2708         spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2709
2710         bp->spq_left--;
2711
2712         bnx2x_sp_prod_update(bp);
2713         spin_unlock_bh(&bp->spq_lock);
2714         return 0;
2715 }
2716
2717 /* acquire split MCP access lock register */
2718 static int bnx2x_acquire_alr(struct bnx2x *bp)
2719 {
2720         u32 i, j, val;
2721         int rc = 0;
2722
2723         might_sleep();
2724         i = 100;
2725         for (j = 0; j < i*10; j++) {
2726                 val = (1UL << 31);
2727                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2728                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2729                 if (val & (1L << 31))
2730                         break;
2731
2732                 msleep(5);
2733         }
2734         if (!(val & (1L << 31))) {
2735                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2736                 rc = -EBUSY;
2737         }
2738
2739         return rc;
2740 }
2741
2742 /* release split MCP access lock register */
2743 static void bnx2x_release_alr(struct bnx2x *bp)
2744 {
2745         u32 val = 0;
2746
2747         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2748 }
2749
2750 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2751 {
2752         struct host_def_status_block *def_sb = bp->def_status_blk;
2753         u16 rc = 0;
2754
2755         barrier(); /* status block is written to by the chip */
2756         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2757                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2758                 rc |= 1;
2759         }
2760         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2761                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2762                 rc |= 2;
2763         }
2764         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2765                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2766                 rc |= 4;
2767         }
2768         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2769                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2770                 rc |= 8;
2771         }
2772         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2773                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2774                 rc |= 16;
2775         }
2776         return rc;
2777 }
2778
2779 /*
2780  * slow path service functions
2781  */
2782
2783 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2784 {
2785         int port = BP_PORT(bp);
2786         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2787                        COMMAND_REG_ATTN_BITS_SET);
2788         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2789                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2790         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2791                                        NIG_REG_MASK_INTERRUPT_PORT0;
2792         u32 aeu_mask;
2793         u32 nig_mask = 0;
2794
2795         if (bp->attn_state & asserted)
2796                 BNX2X_ERR("IGU ERROR\n");
2797
2798         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2799         aeu_mask = REG_RD(bp, aeu_addr);
2800
2801         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2802            aeu_mask, asserted);
2803         aeu_mask &= ~(asserted & 0xff);
2804         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2805
2806         REG_WR(bp, aeu_addr, aeu_mask);
2807         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2808
2809         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2810         bp->attn_state |= asserted;
2811         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2812
2813         if (asserted & ATTN_HARD_WIRED_MASK) {
2814                 if (asserted & ATTN_NIG_FOR_FUNC) {
2815
2816                         bnx2x_acquire_phy_lock(bp);
2817
2818                         /* save nig interrupt mask */
2819                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2820                         REG_WR(bp, nig_int_mask_addr, 0);
2821
2822                         bnx2x_link_attn(bp);
2823
2824                         /* handle unicore attn? */
2825                 }
2826                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2827                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2828
2829                 if (asserted & GPIO_2_FUNC)
2830                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2831
2832                 if (asserted & GPIO_3_FUNC)
2833                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2834
2835                 if (asserted & GPIO_4_FUNC)
2836                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2837
2838                 if (port == 0) {
2839                         if (asserted & ATTN_GENERAL_ATTN_1) {
2840                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2841                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2842                         }
2843                         if (asserted & ATTN_GENERAL_ATTN_2) {
2844                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2845                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2846                         }
2847                         if (asserted & ATTN_GENERAL_ATTN_3) {
2848                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2849                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2850                         }
2851                 } else {
2852                         if (asserted & ATTN_GENERAL_ATTN_4) {
2853                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2854                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2855                         }
2856                         if (asserted & ATTN_GENERAL_ATTN_5) {
2857                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2858                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2859                         }
2860                         if (asserted & ATTN_GENERAL_ATTN_6) {
2861                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2862                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2863                         }
2864                 }
2865
2866         } /* if hardwired */
2867
2868         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2869            asserted, hc_addr);
2870         REG_WR(bp, hc_addr, asserted);
2871
2872         /* now set back the mask */
2873         if (asserted & ATTN_NIG_FOR_FUNC) {
2874                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2875                 bnx2x_release_phy_lock(bp);
2876         }
2877 }
2878
2879 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2880 {
2881         int port = BP_PORT(bp);
2882
2883         /* mark the failure */
2884         bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2885         bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2886         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2887                  bp->link_params.ext_phy_config);
2888
2889         /* log the failure */
2890         printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2891                " the driver to shutdown the card to prevent permanent"
2892                " damage.  Please contact Dell Support for assistance\n",
2893                bp->dev->name);
2894 }
2895
2896 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2897 {
2898         int port = BP_PORT(bp);
2899         int reg_offset;
2900         u32 val, swap_val, swap_override;
2901
2902         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2903                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2904
2905         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2906
2907                 val = REG_RD(bp, reg_offset);
2908                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2909                 REG_WR(bp, reg_offset, val);
2910
2911                 BNX2X_ERR("SPIO5 hw attention\n");
2912
2913                 /* Fan failure attention */
2914                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2915                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2916                         /* Low power mode is controlled by GPIO 2 */
2917                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2918                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2919                         /* The PHY reset is controlled by GPIO 1 */
2920                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2921                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2922                         break;
2923
2924                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2925                         /* The PHY reset is controlled by GPIO 1 */
2926                         /* fake the port number to cancel the swap done in
2927                            set_gpio() */
2928                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2929                         swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2930                         port = (swap_val && swap_override) ^ 1;
2931                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2932                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2933                         break;
2934
2935                 default:
2936                         break;
2937                 }
2938                 bnx2x_fan_failure(bp);
2939         }
2940
2941         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2942                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2943                 bnx2x_acquire_phy_lock(bp);
2944                 bnx2x_handle_module_detect_int(&bp->link_params);
2945                 bnx2x_release_phy_lock(bp);
2946         }
2947
2948         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2949
2950                 val = REG_RD(bp, reg_offset);
2951                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2952                 REG_WR(bp, reg_offset, val);
2953
2954                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2955                           (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2956                 bnx2x_panic();
2957         }
2958 }
2959
2960 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2961 {
2962         u32 val;
2963
2964         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2965
2966                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2967                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2968                 /* DORQ discard attention */
2969                 if (val & 0x2)
2970                         BNX2X_ERR("FATAL error from DORQ\n");
2971         }
2972
2973         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2974
2975                 int port = BP_PORT(bp);
2976                 int reg_offset;
2977
2978                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2979                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2980
2981                 val = REG_RD(bp, reg_offset);
2982                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2983                 REG_WR(bp, reg_offset, val);
2984
2985                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2986                           (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
2987                 bnx2x_panic();
2988         }
2989 }
2990
2991 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2992 {
2993         u32 val;
2994
2995         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2996
2997                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2998                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2999                 /* CFC error attention */
3000                 if (val & 0x2)
3001                         BNX2X_ERR("FATAL error from CFC\n");
3002         }
3003
3004         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3005
3006                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3007                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3008                 /* RQ_USDMDP_FIFO_OVERFLOW */
3009                 if (val & 0x18000)
3010                         BNX2X_ERR("FATAL error from PXP\n");
3011         }
3012
3013         if (attn & HW_INTERRUT_ASSERT_SET_2) {
3014
3015                 int port = BP_PORT(bp);
3016                 int reg_offset;
3017
3018                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3019                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3020
3021                 val = REG_RD(bp, reg_offset);
3022                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3023                 REG_WR(bp, reg_offset, val);
3024
3025                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3026                           (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3027                 bnx2x_panic();
3028         }
3029 }
3030
3031 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3032 {
3033         u32 val;
3034
3035         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3036
3037                 if (attn & BNX2X_PMF_LINK_ASSERT) {
3038                         int func = BP_FUNC(bp);
3039
3040                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3041                         val = SHMEM_RD(bp, func_mb[func].drv_status);
3042                         if (val & DRV_STATUS_DCC_EVENT_MASK)
3043                                 bnx2x_dcc_event(bp,
3044                                             (val & DRV_STATUS_DCC_EVENT_MASK));
3045                         bnx2x__link_status_update(bp);
3046                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3047                                 bnx2x_pmf_update(bp);
3048
3049                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3050
3051                         BNX2X_ERR("MC assert!\n");
3052                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3053                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3054                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3055                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3056                         bnx2x_panic();
3057
3058                 } else if (attn & BNX2X_MCP_ASSERT) {
3059
3060                         BNX2X_ERR("MCP assert!\n");
3061                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3062                         bnx2x_fw_dump(bp);
3063
3064                 } else
3065                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3066         }
3067
3068         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3069                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3070                 if (attn & BNX2X_GRC_TIMEOUT) {
3071                         val = CHIP_IS_E1H(bp) ?
3072                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3073                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
3074                 }
3075                 if (attn & BNX2X_GRC_RSV) {
3076                         val = CHIP_IS_E1H(bp) ?
3077                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3078                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
3079                 }
3080                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3081         }
3082 }
3083
3084 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3085 {
3086         struct attn_route attn;
3087         struct attn_route group_mask;
3088         int port = BP_PORT(bp);
3089         int index;
3090         u32 reg_addr;
3091         u32 val;
3092         u32 aeu_mask;
3093
3094         /* need to take HW lock because MCP or other port might also
3095            try to handle this event */
3096         bnx2x_acquire_alr(bp);
3097
3098         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3099         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3100         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3101         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3102         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3103            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3104
3105         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3106                 if (deasserted & (1 << index)) {
3107                         group_mask = bp->attn_group[index];
3108
3109                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3110                            index, group_mask.sig[0], group_mask.sig[1],
3111                            group_mask.sig[2], group_mask.sig[3]);
3112
3113                         bnx2x_attn_int_deasserted3(bp,
3114                                         attn.sig[3] & group_mask.sig[3]);
3115                         bnx2x_attn_int_deasserted1(bp,
3116                                         attn.sig[1] & group_mask.sig[1]);
3117                         bnx2x_attn_int_deasserted2(bp,
3118                                         attn.sig[2] & group_mask.sig[2]);
3119                         bnx2x_attn_int_deasserted0(bp,
3120                                         attn.sig[0] & group_mask.sig[0]);
3121
3122                         if ((attn.sig[0] & group_mask.sig[0] &
3123                                                 HW_PRTY_ASSERT_SET_0) ||
3124                             (attn.sig[1] & group_mask.sig[1] &
3125                                                 HW_PRTY_ASSERT_SET_1) ||
3126                             (attn.sig[2] & group_mask.sig[2] &
3127                                                 HW_PRTY_ASSERT_SET_2))
3128                                 BNX2X_ERR("FATAL HW block parity attention\n");
3129                 }
3130         }
3131
3132         bnx2x_release_alr(bp);
3133
3134         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3135
3136         val = ~deasserted;
3137         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3138            val, reg_addr);
3139         REG_WR(bp, reg_addr, val);
3140
3141         if (~bp->attn_state & deasserted)
3142                 BNX2X_ERR("IGU ERROR\n");
3143
3144         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3145                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
3146
3147         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3148         aeu_mask = REG_RD(bp, reg_addr);
3149
3150         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
3151            aeu_mask, deasserted);
3152         aeu_mask |= (deasserted & 0xff);
3153         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3154
3155         REG_WR(bp, reg_addr, aeu_mask);
3156         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3157
3158         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3159         bp->attn_state &= ~deasserted;
3160         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3161 }
3162
3163 static void bnx2x_attn_int(struct bnx2x *bp)
3164 {
3165         /* read local copy of bits */
3166         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3167                                                                 attn_bits);
3168         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3169                                                                 attn_bits_ack);
3170         u32 attn_state = bp->attn_state;
3171
3172         /* look for changed bits */
3173         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
3174         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
3175
3176         DP(NETIF_MSG_HW,
3177            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
3178            attn_bits, attn_ack, asserted, deasserted);
3179
3180         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3181                 BNX2X_ERR("BAD attention state\n");
3182
3183         /* handle bits that were raised */
3184         if (asserted)
3185                 bnx2x_attn_int_asserted(bp, asserted);
3186
3187         if (deasserted)
3188                 bnx2x_attn_int_deasserted(bp, deasserted);
3189 }
3190
3191 static void bnx2x_sp_task(struct work_struct *work)
3192 {
3193         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3194         u16 status;
3195
3196
3197         /* Return here if interrupt is disabled */
3198         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3199                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3200                 return;
3201         }
3202
3203         status = bnx2x_update_dsb_idx(bp);
3204 /*      if (status == 0)                                     */
3205 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
3206
3207         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
3208
3209         /* HW attentions */
3210         if (status & 0x1)
3211                 bnx2x_attn_int(bp);
3212
3213         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3214                      IGU_INT_NOP, 1);
3215         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3216                      IGU_INT_NOP, 1);
3217         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3218                      IGU_INT_NOP, 1);
3219         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3220                      IGU_INT_NOP, 1);
3221         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3222                      IGU_INT_ENABLE, 1);
3223
3224 }
3225
3226 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3227 {
3228         struct net_device *dev = dev_instance;
3229         struct bnx2x *bp = netdev_priv(dev);
3230
3231         /* Return here if interrupt is disabled */
3232         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3233                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3234                 return IRQ_HANDLED;
3235         }
3236
3237         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3238
3239 #ifdef BNX2X_STOP_ON_ERROR
3240         if (unlikely(bp->panic))
3241                 return IRQ_HANDLED;
3242 #endif
3243
3244         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3245
3246         return IRQ_HANDLED;
3247 }
3248
3249 /* end of slow path */
3250
3251 /* Statistics */
3252
3253 /****************************************************************************
3254 * Macros
3255 ****************************************************************************/
3256
3257 /* sum[hi:lo] += add[hi:lo] */
3258 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3259         do { \
3260                 s_lo += a_lo; \
3261                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3262         } while (0)
3263
3264 /* difference = minuend - subtrahend */
3265 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3266         do { \
3267                 if (m_lo < s_lo) { \
3268                         /* underflow */ \
3269                         d_hi = m_hi - s_hi; \
3270                         if (d_hi > 0) { \
3271                                 /* we can 'loan' 1 */ \
3272                                 d_hi--; \
3273                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3274                         } else { \
3275                                 /* m_hi <= s_hi */ \
3276                                 d_hi = 0; \
3277                                 d_lo = 0; \
3278                         } \
3279                 } else { \
3280                         /* m_lo >= s_lo */ \
3281                         if (m_hi < s_hi) { \
3282                                 d_hi = 0; \
3283                                 d_lo = 0; \
3284                         } else { \
3285                                 /* m_hi >= s_hi */ \
3286                                 d_hi = m_hi - s_hi; \
3287                                 d_lo = m_lo - s_lo; \
3288                         } \
3289                 } \
3290         } while (0)
3291
3292 #define UPDATE_STAT64(s, t) \
3293         do { \
3294                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3295                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3296                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3297                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3298                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3299                        pstats->mac_stx[1].t##_lo, diff.lo); \
3300         } while (0)
3301
3302 #define UPDATE_STAT64_NIG(s, t) \
3303         do { \
3304                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3305                         diff.lo, new->s##_lo, old->s##_lo); \
3306                 ADD_64(estats->t##_hi, diff.hi, \
3307                        estats->t##_lo, diff.lo); \
3308         } while (0)
3309
3310 /* sum[hi:lo] += add */
3311 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3312         do { \
3313                 s_lo += a; \
3314                 s_hi += (s_lo < a) ? 1 : 0; \
3315         } while (0)
3316
3317 #define UPDATE_EXTEND_STAT(s) \
3318         do { \
3319                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3320                               pstats->mac_stx[1].s##_lo, \
3321                               new->s); \
3322         } while (0)
3323
3324 #define UPDATE_EXTEND_TSTAT(s, t) \
3325         do { \
3326                 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3327                 old_tclient->s = tclient->s; \
3328                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3329         } while (0)
3330
3331 #define UPDATE_EXTEND_USTAT(s, t) \
3332         do { \
3333                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3334                 old_uclient->s = uclient->s; \
3335                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3336         } while (0)
3337
3338 #define UPDATE_EXTEND_XSTAT(s, t) \
3339         do { \
3340                 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3341                 old_xclient->s = xclient->s; \
3342                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3343         } while (0)
3344
3345 /* minuend -= subtrahend */
3346 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3347         do { \
3348                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3349         } while (0)
3350
3351 /* minuend[hi:lo] -= subtrahend */
3352 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3353         do { \
3354                 SUB_64(m_hi, 0, m_lo, s); \
3355         } while (0)
3356
3357 #define SUB_EXTEND_USTAT(s, t) \
3358         do { \
3359                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3360                 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3361         } while (0)
3362
3363 /*
3364  * General service functions
3365  */
3366
3367 static inline long bnx2x_hilo(u32 *hiref)
3368 {
3369         u32 lo = *(hiref + 1);
3370 #if (BITS_PER_LONG == 64)
3371         u32 hi = *hiref;
3372
3373         return HILO_U64(hi, lo);
3374 #else
3375         return lo;
3376 #endif
3377 }
3378
3379 /*
3380  * Init service functions
3381  */
3382
3383 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3384 {
3385         if (!bp->stats_pending) {
3386                 struct eth_query_ramrod_data ramrod_data = {0};
3387                 int i, rc;
3388
3389                 ramrod_data.drv_counter = bp->stats_counter++;
3390                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3391                 for_each_queue(bp, i)
3392                         ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3393
3394                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3395                                    ((u32 *)&ramrod_data)[1],
3396                                    ((u32 *)&ramrod_data)[0], 0);
3397                 if (rc == 0) {
3398                         /* stats ramrod has it's own slot on the spq */
3399                         bp->spq_left++;
3400                         bp->stats_pending = 1;
3401                 }
3402         }
3403 }
3404
3405 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3406 {
3407         struct dmae_command *dmae = &bp->stats_dmae;
3408         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3409
3410         *stats_comp = DMAE_COMP_VAL;
3411         if (CHIP_REV_IS_SLOW(bp))
3412                 return;
3413
3414         /* loader */
3415         if (bp->executer_idx) {
3416                 int loader_idx = PMF_DMAE_C(bp);
3417
3418                 memset(dmae, 0, sizeof(struct dmae_command));
3419
3420                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3421                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3422                                 DMAE_CMD_DST_RESET |
3423 #ifdef __BIG_ENDIAN
3424                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3425 #else
3426                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3427 #endif
3428                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3429                                                DMAE_CMD_PORT_0) |
3430                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3431                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3432                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3433                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3434                                      sizeof(struct dmae_command) *
3435                                      (loader_idx + 1)) >> 2;
3436                 dmae->dst_addr_hi = 0;
3437                 dmae->len = sizeof(struct dmae_command) >> 2;
3438                 if (CHIP_IS_E1(bp))
3439                         dmae->len--;
3440                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3441                 dmae->comp_addr_hi = 0;
3442                 dmae->comp_val = 1;
3443
3444                 *stats_comp = 0;
3445                 bnx2x_post_dmae(bp, dmae, loader_idx);
3446
3447         } else if (bp->func_stx) {
3448                 *stats_comp = 0;
3449                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3450         }
3451 }
3452
3453 static int bnx2x_stats_comp(struct bnx2x *bp)
3454 {
3455         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3456         int cnt = 10;
3457
3458         might_sleep();
3459         while (*stats_comp != DMAE_COMP_VAL) {
3460                 if (!cnt) {
3461                         BNX2X_ERR("timeout waiting for stats finished\n");
3462                         break;
3463                 }
3464                 cnt--;
3465                 msleep(1);
3466         }
3467         return 1;
3468 }
3469
3470 /*
3471  * Statistics service functions
3472  */
3473
3474 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3475 {
3476         struct dmae_command *dmae;
3477         u32 opcode;
3478         int loader_idx = PMF_DMAE_C(bp);
3479         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3480
3481         /* sanity */
3482         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3483                 BNX2X_ERR("BUG!\n");
3484                 return;
3485         }
3486
3487         bp->executer_idx = 0;
3488
3489         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3490                   DMAE_CMD_C_ENABLE |
3491                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3492 #ifdef __BIG_ENDIAN
3493                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3494 #else
3495                   DMAE_CMD_ENDIANITY_DW_SWAP |
3496 #endif
3497                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3498                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3499
3500         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3501         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3502         dmae->src_addr_lo = bp->port.port_stx >> 2;
3503         dmae->src_addr_hi = 0;
3504         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3505         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3506         dmae->len = DMAE_LEN32_RD_MAX;
3507         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3508         dmae->comp_addr_hi = 0;
3509         dmae->comp_val = 1;
3510
3511         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3512         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3513         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3514         dmae->src_addr_hi = 0;
3515         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3516                                    DMAE_LEN32_RD_MAX * 4);
3517         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3518                                    DMAE_LEN32_RD_MAX * 4);
3519         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3520         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3521         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3522         dmae->comp_val = DMAE_COMP_VAL;
3523
3524         *stats_comp = 0;
3525         bnx2x_hw_stats_post(bp);
3526         bnx2x_stats_comp(bp);
3527 }
3528
3529 static void bnx2x_port_stats_init(struct bnx2x *bp)
3530 {
3531         struct dmae_command *dmae;
3532         int port = BP_PORT(bp);
3533         int vn = BP_E1HVN(bp);
3534         u32 opcode;
3535         int loader_idx = PMF_DMAE_C(bp);
3536         u32 mac_addr;
3537         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3538
3539         /* sanity */
3540         if (!bp->link_vars.link_up || !bp->port.pmf) {
3541                 BNX2X_ERR("BUG!\n");
3542                 return;
3543         }
3544
3545         bp->executer_idx = 0;
3546
3547         /* MCP */
3548         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3549                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3550                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3551 #ifdef __BIG_ENDIAN
3552                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3553 #else
3554                   DMAE_CMD_ENDIANITY_DW_SWAP |
3555 #endif
3556                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3557                   (vn << DMAE_CMD_E1HVN_SHIFT));
3558
3559         if (bp->port.port_stx) {
3560
3561                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3562                 dmae->opcode = opcode;
3563                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3564                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3565                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3566                 dmae->dst_addr_hi = 0;
3567                 dmae->len = sizeof(struct host_port_stats) >> 2;
3568                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3569                 dmae->comp_addr_hi = 0;
3570                 dmae->comp_val = 1;
3571         }
3572
3573         if (bp->func_stx) {
3574
3575                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3576                 dmae->opcode = opcode;
3577                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3578                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3579                 dmae->dst_addr_lo = bp->func_stx >> 2;
3580                 dmae->dst_addr_hi = 0;
3581                 dmae->len = sizeof(struct host_func_stats) >> 2;
3582                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3583                 dmae->comp_addr_hi = 0;
3584                 dmae->comp_val = 1;
3585         }
3586
3587         /* MAC */
3588         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3589                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3590                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3591 #ifdef __BIG_ENDIAN
3592                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3593 #else
3594                   DMAE_CMD_ENDIANITY_DW_SWAP |
3595 #endif
3596                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3597                   (vn << DMAE_CMD_E1HVN_SHIFT));
3598
3599         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3600
3601                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3602                                    NIG_REG_INGRESS_BMAC0_MEM);
3603
3604                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3605                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3606                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3607                 dmae->opcode = opcode;
3608                 dmae->src_addr_lo = (mac_addr +
3609                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3610                 dmae->src_addr_hi = 0;
3611                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3612                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3613                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3614                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3615                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3616                 dmae->comp_addr_hi = 0;
3617                 dmae->comp_val = 1;
3618
3619                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3620                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3621                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3622                 dmae->opcode = opcode;
3623                 dmae->src_addr_lo = (mac_addr +
3624                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3625                 dmae->src_addr_hi = 0;
3626                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3627                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3628                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3629                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3630                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3631                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3632                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3633                 dmae->comp_addr_hi = 0;
3634                 dmae->comp_val = 1;
3635
3636         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3637
3638                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3639
3640                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3641                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3642                 dmae->opcode = opcode;
3643                 dmae->src_addr_lo = (mac_addr +
3644                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3645                 dmae->src_addr_hi = 0;
3646                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3647                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3648                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3649                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3650                 dmae->comp_addr_hi = 0;
3651                 dmae->comp_val = 1;
3652
3653                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3654                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3655                 dmae->opcode = opcode;
3656                 dmae->src_addr_lo = (mac_addr +
3657                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3658                 dmae->src_addr_hi = 0;
3659                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3660                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3661                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3662                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3663                 dmae->len = 1;
3664                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3665                 dmae->comp_addr_hi = 0;
3666                 dmae->comp_val = 1;
3667
3668                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3669                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3670                 dmae->opcode = opcode;
3671                 dmae->src_addr_lo = (mac_addr +
3672                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3673                 dmae->src_addr_hi = 0;
3674                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3675                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3676                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3677                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3678                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3679                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3680                 dmae->comp_addr_hi = 0;
3681                 dmae->comp_val = 1;
3682         }
3683
3684         /* NIG */
3685         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3686         dmae->opcode = opcode;
3687         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3688                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3689         dmae->src_addr_hi = 0;
3690         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3691         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3692         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3693         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3694         dmae->comp_addr_hi = 0;
3695         dmae->comp_val = 1;
3696
3697         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3698         dmae->opcode = opcode;
3699         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3700                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3701         dmae->src_addr_hi = 0;
3702         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3703                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3704         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3705                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3706         dmae->len = (2*sizeof(u32)) >> 2;
3707         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3708         dmae->comp_addr_hi = 0;
3709         dmae->comp_val = 1;
3710
3711         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3712         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3713                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3714                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3715 #ifdef __BIG_ENDIAN
3716                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3717 #else
3718                         DMAE_CMD_ENDIANITY_DW_SWAP |
3719 #endif
3720                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3721                         (vn << DMAE_CMD_E1HVN_SHIFT));
3722         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3723                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3724         dmae->src_addr_hi = 0;
3725         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3726                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3727         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3728                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3729         dmae->len = (2*sizeof(u32)) >> 2;
3730         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3731         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3732         dmae->comp_val = DMAE_COMP_VAL;
3733
3734         *stats_comp = 0;
3735 }
3736
3737 static void bnx2x_func_stats_init(struct bnx2x *bp)
3738 {
3739         struct dmae_command *dmae = &bp->stats_dmae;
3740         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3741
3742         /* sanity */
3743         if (!bp->func_stx) {
3744                 BNX2X_ERR("BUG!\n");
3745                 return;
3746         }
3747
3748         bp->executer_idx = 0;
3749         memset(dmae, 0, sizeof(struct dmae_command));
3750
3751         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3752                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3753                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3754 #ifdef __BIG_ENDIAN
3755                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3756 #else
3757                         DMAE_CMD_ENDIANITY_DW_SWAP |
3758 #endif
3759                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3760                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3761         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3762         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3763         dmae->dst_addr_lo = bp->func_stx >> 2;
3764         dmae->dst_addr_hi = 0;
3765         dmae->len = sizeof(struct host_func_stats) >> 2;
3766         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3767         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3768         dmae->comp_val = DMAE_COMP_VAL;
3769
3770         *stats_comp = 0;
3771 }
3772
3773 static void bnx2x_stats_start(struct bnx2x *bp)
3774 {
3775         if (bp->port.pmf)
3776                 bnx2x_port_stats_init(bp);
3777
3778         else if (bp->func_stx)
3779                 bnx2x_func_stats_init(bp);
3780
3781         bnx2x_hw_stats_post(bp);
3782         bnx2x_storm_stats_post(bp);
3783 }
3784
3785 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3786 {
3787         bnx2x_stats_comp(bp);
3788         bnx2x_stats_pmf_update(bp);
3789         bnx2x_stats_start(bp);
3790 }
3791
3792 static void bnx2x_stats_restart(struct bnx2x *bp)
3793 {
3794         bnx2x_stats_comp(bp);
3795         bnx2x_stats_start(bp);
3796 }
3797
3798 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3799 {
3800         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3801         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3802         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3803         struct {
3804                 u32 lo;
3805                 u32 hi;
3806         } diff;
3807
3808         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3809         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3810         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3811         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3812         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3813         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3814         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3815         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3816         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3817         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3818         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3819         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3820         UPDATE_STAT64(tx_stat_gt127,
3821                                 tx_stat_etherstatspkts65octetsto127octets);
3822         UPDATE_STAT64(tx_stat_gt255,
3823                                 tx_stat_etherstatspkts128octetsto255octets);
3824         UPDATE_STAT64(tx_stat_gt511,
3825                                 tx_stat_etherstatspkts256octetsto511octets);
3826         UPDATE_STAT64(tx_stat_gt1023,
3827                                 tx_stat_etherstatspkts512octetsto1023octets);
3828         UPDATE_STAT64(tx_stat_gt1518,
3829                                 tx_stat_etherstatspkts1024octetsto1522octets);
3830         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3831         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3832         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3833         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3834         UPDATE_STAT64(tx_stat_gterr,
3835                                 tx_stat_dot3statsinternalmactransmiterrors);
3836         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3837
3838         estats->pause_frames_received_hi =
3839                                 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3840         estats->pause_frames_received_lo =
3841                                 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3842
3843         estats->pause_frames_sent_hi =
3844                                 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3845         estats->pause_frames_sent_lo =
3846                                 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3847 }
3848
3849 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3850 {
3851         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3852         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3853         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3854
3855         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3856         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3857         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3858         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3859         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3860         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3861         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3862         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3863         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3864         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3865         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3866         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3867         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3868         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3869         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3870         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3871         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3872         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3873         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3874         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3875         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3876         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3877         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3878         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3879         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3880         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3881         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3882         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3883         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3884         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3885         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3886
3887         estats->pause_frames_received_hi =
3888                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3889         estats->pause_frames_received_lo =
3890                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3891         ADD_64(estats->pause_frames_received_hi,
3892                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3893                estats->pause_frames_received_lo,
3894                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3895
3896         estats->pause_frames_sent_hi =
3897                         pstats->mac_stx[1].tx_stat_outxonsent_hi;
3898         estats->pause_frames_sent_lo =
3899                         pstats->mac_stx[1].tx_stat_outxonsent_lo;
3900         ADD_64(estats->pause_frames_sent_hi,
3901                pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3902                estats->pause_frames_sent_lo,
3903                pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3904 }
3905
3906 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3907 {
3908         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3909         struct nig_stats *old = &(bp->port.old_nig_stats);
3910         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3911         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3912         struct {
3913                 u32 lo;
3914                 u32 hi;
3915         } diff;
3916         u32 nig_timer_max;
3917
3918         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3919                 bnx2x_bmac_stats_update(bp);
3920
3921         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3922                 bnx2x_emac_stats_update(bp);
3923
3924         else { /* unreached */
3925                 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3926                 return -1;
3927         }
3928
3929         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3930                       new->brb_discard - old->brb_discard);
3931         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3932                       new->brb_truncate - old->brb_truncate);
3933
3934         UPDATE_STAT64_NIG(egress_mac_pkt0,
3935                                         etherstatspkts1024octetsto1522octets);
3936         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3937
3938         memcpy(old, new, sizeof(struct nig_stats));
3939
3940         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3941                sizeof(struct mac_stx));
3942         estats->brb_drop_hi = pstats->brb_drop_hi;
3943         estats->brb_drop_lo = pstats->brb_drop_lo;
3944
3945         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3946
3947         nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3948         if (nig_timer_max != estats->nig_timer_max) {
3949                 estats->nig_timer_max = nig_timer_max;
3950                 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3951         }
3952
3953         return 0;
3954 }
3955
3956 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3957 {
3958         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3959         struct tstorm_per_port_stats *tport =
3960                                         &stats->tstorm_common.port_statistics;
3961         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3962         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3963         int i;
3964
3965         memcpy(&(fstats->total_bytes_received_hi),
3966                &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
3967                sizeof(struct host_func_stats) - 2*sizeof(u32));
3968         estats->error_bytes_received_hi = 0;
3969         estats->error_bytes_received_lo = 0;
3970         estats->etherstatsoverrsizepkts_hi = 0;
3971         estats->etherstatsoverrsizepkts_lo = 0;
3972         estats->no_buff_discard_hi = 0;
3973         estats->no_buff_discard_lo = 0;
3974
3975         for_each_rx_queue(bp, i) {
3976                 struct bnx2x_fastpath *fp = &bp->fp[i];
3977                 int cl_id = fp->cl_id;
3978                 struct tstorm_per_client_stats *tclient =
3979                                 &stats->tstorm_common.client_statistics[cl_id];
3980                 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3981                 struct ustorm_per_client_stats *uclient =
3982                                 &stats->ustorm_common.client_statistics[cl_id];
3983                 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3984                 struct xstorm_per_client_stats *xclient =
3985                                 &stats->xstorm_common.client_statistics[cl_id];
3986                 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3987                 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3988                 u32 diff;
3989
3990                 /* are storm stats valid? */
3991                 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3992                                                         bp->stats_counter) {
3993                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3994                            "  xstorm counter (%d) != stats_counter (%d)\n",
3995                            i, xclient->stats_counter, bp->stats_counter);
3996                         return -1;
3997                 }
3998                 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3999                                                         bp->stats_counter) {
4000                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4001                            "  tstorm counter (%d) != stats_counter (%d)\n",
4002                            i, tclient->stats_counter, bp->stats_counter);
4003                         return -2;
4004                 }
4005                 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4006                                                         bp->stats_counter) {
4007                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4008                            "  ustorm counter (%d) != stats_counter (%d)\n",
4009                            i, uclient->stats_counter, bp->stats_counter);
4010                         return -4;
4011                 }
4012
4013                 qstats->total_bytes_received_hi =
4014                         le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4015                 qstats->total_bytes_received_lo =
4016                         le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4017
4018                 ADD_64(qstats->total_bytes_received_hi,
4019                        le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4020                        qstats->total_bytes_received_lo,
4021                        le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4022
4023                 ADD_64(qstats->total_bytes_received_hi,
4024                        le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4025                        qstats->total_bytes_received_lo,
4026                        le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4027
4028                 qstats->valid_bytes_received_hi =
4029                                         qstats->total_bytes_received_hi;
4030                 qstats->valid_bytes_received_lo =
4031                                         qstats->total_bytes_received_lo;
4032
4033                 qstats->error_bytes_received_hi =
4034                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
4035                 qstats->error_bytes_received_lo =
4036                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
4037
4038                 ADD_64(qstats->total_bytes_received_hi,
4039                        qstats->error_bytes_received_hi,
4040                        qstats->total_bytes_received_lo,
4041                        qstats->error_bytes_received_lo);
4042
4043                 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4044                                         total_unicast_packets_received);
4045                 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4046                                         total_multicast_packets_received);
4047                 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4048                                         total_broadcast_packets_received);
4049                 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4050                                         etherstatsoverrsizepkts);
4051                 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4052
4053                 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4054                                         total_unicast_packets_received);
4055                 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4056                                         total_multicast_packets_received);
4057                 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4058                                         total_broadcast_packets_received);
4059                 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4060                 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4061                 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4062
4063                 qstats->total_bytes_transmitted_hi =
4064                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4065                 qstats->total_bytes_transmitted_lo =
4066                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4067
4068                 ADD_64(qstats->total_bytes_transmitted_hi,
4069                        le32_to_cpu(xclient->multicast_bytes_sent.hi),
4070                        qstats->total_bytes_transmitted_lo,
4071                        le32_to_cpu(xclient->multicast_bytes_sent.lo));
4072
4073                 ADD_64(qstats->total_bytes_transmitted_hi,
4074                        le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4075                        qstats->total_bytes_transmitted_lo,
4076                        le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4077
4078                 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4079                                         total_unicast_packets_transmitted);
4080                 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4081                                         total_multicast_packets_transmitted);
4082                 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4083                                         total_broadcast_packets_transmitted);
4084
4085                 old_tclient->checksum_discard = tclient->checksum_discard;
4086                 old_tclient->ttl0_discard = tclient->ttl0_discard;
4087
4088                 ADD_64(fstats->total_bytes_received_hi,
4089                        qstats->total_bytes_received_hi,
4090                        fstats->total_bytes_received_lo,
4091                        qstats->total_bytes_received_lo);
4092                 ADD_64(fstats->total_bytes_transmitted_hi,
4093                        qstats->total_bytes_transmitted_hi,
4094                        fstats->total_bytes_transmitted_lo,
4095                        qstats->total_bytes_transmitted_lo);
4096                 ADD_64(fstats->total_unicast_packets_received_hi,
4097                        qstats->total_unicast_packets_received_hi,
4098                        fstats->total_unicast_packets_received_lo,
4099                        qstats->total_unicast_packets_received_lo);
4100                 ADD_64(fstats->total_multicast_packets_received_hi,
4101                        qstats->total_multicast_packets_received_hi,
4102                        fstats->total_multicast_packets_received_lo,
4103                        qstats->total_multicast_packets_received_lo);
4104                 ADD_64(fstats->total_broadcast_packets_received_hi,
4105                        qstats->total_broadcast_packets_received_hi,
4106                        fstats->total_broadcast_packets_received_lo,
4107                        qstats->total_broadcast_packets_received_lo);
4108                 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4109                        qstats->total_unicast_packets_transmitted_hi,
4110                        fstats->total_unicast_packets_transmitted_lo,
4111                        qstats->total_unicast_packets_transmitted_lo);
4112                 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4113                        qstats->total_multicast_packets_transmitted_hi,
4114                        fstats->total_multicast_packets_transmitted_lo,
4115                        qstats->total_multicast_packets_transmitted_lo);
4116                 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4117                        qstats->total_broadcast_packets_transmitted_hi,
4118                        fstats->total_broadcast_packets_transmitted_lo,
4119                        qstats->total_broadcast_packets_transmitted_lo);
4120                 ADD_64(fstats->valid_bytes_received_hi,
4121                        qstats->valid_bytes_received_hi,
4122                        fstats->valid_bytes_received_lo,
4123                        qstats->valid_bytes_received_lo);
4124
4125                 ADD_64(estats->error_bytes_received_hi,
4126                        qstats->error_bytes_received_hi,
4127                        estats->error_bytes_received_lo,
4128                        qstats->error_bytes_received_lo);
4129                 ADD_64(estats->etherstatsoverrsizepkts_hi,
4130                        qstats->etherstatsoverrsizepkts_hi,
4131                        estats->etherstatsoverrsizepkts_lo,
4132                        qstats->etherstatsoverrsizepkts_lo);
4133                 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4134                        estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4135         }
4136
4137         ADD_64(fstats->total_bytes_received_hi,
4138                estats->rx_stat_ifhcinbadoctets_hi,
4139                fstats->total_bytes_received_lo,
4140                estats->rx_stat_ifhcinbadoctets_lo);
4141
4142         memcpy(estats, &(fstats->total_bytes_received_hi),
4143                sizeof(struct host_func_stats) - 2*sizeof(u32));
4144
4145         ADD_64(estats->etherstatsoverrsizepkts_hi,
4146                estats->rx_stat_dot3statsframestoolong_hi,
4147                estats->etherstatsoverrsizepkts_lo,
4148                estats->rx_stat_dot3statsframestoolong_lo);
4149         ADD_64(estats->error_bytes_received_hi,
4150                estats->rx_stat_ifhcinbadoctets_hi,
4151                estats->error_bytes_received_lo,
4152                estats->rx_stat_ifhcinbadoctets_lo);
4153
4154         if (bp->port.pmf) {
4155                 estats->mac_filter_discard =
4156                                 le32_to_cpu(tport->mac_filter_discard);
4157                 estats->xxoverflow_discard =
4158                                 le32_to_cpu(tport->xxoverflow_discard);
4159                 estats->brb_truncate_discard =
4160                                 le32_to_cpu(tport->brb_truncate_discard);
4161                 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4162         }
4163
4164         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4165
4166         bp->stats_pending = 0;
4167
4168         return 0;
4169 }
4170
4171 static void bnx2x_net_stats_update(struct bnx2x *bp)
4172 {
4173         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4174         struct net_device_stats *nstats = &bp->dev->stats;
4175         int i;
4176
4177         nstats->rx_packets =
4178                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4179                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4180                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4181
4182         nstats->tx_packets =
4183                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4184                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4185                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4186
4187         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4188
4189         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4190
4191         nstats->rx_dropped = estats->mac_discard;
4192         for_each_rx_queue(bp, i)
4193                 nstats->rx_dropped +=
4194                         le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4195
4196         nstats->tx_dropped = 0;
4197
4198         nstats->multicast =
4199                 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4200
4201         nstats->collisions =
4202                 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4203
4204         nstats->rx_length_errors =
4205                 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4206                 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4207         nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4208                                  bnx2x_hilo(&estats->brb_truncate_hi);
4209         nstats->rx_crc_errors =
4210                 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4211         nstats->rx_frame_errors =
4212                 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4213         nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4214         nstats->rx_missed_errors = estats->xxoverflow_discard;
4215
4216         nstats->rx_errors = nstats->rx_length_errors +
4217                             nstats->rx_over_errors +
4218                             nstats->rx_crc_errors +
4219                             nstats->rx_frame_errors +
4220                             nstats->rx_fifo_errors +
4221                             nstats->rx_missed_errors;
4222
4223         nstats->tx_aborted_errors =
4224                 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4225                 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4226         nstats->tx_carrier_errors =
4227                 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4228         nstats->tx_fifo_errors = 0;
4229         nstats->tx_heartbeat_errors = 0;
4230         nstats->tx_window_errors = 0;
4231
4232         nstats->tx_errors = nstats->tx_aborted_errors +
4233                             nstats->tx_carrier_errors +
4234             bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4235 }
4236
4237 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4238 {
4239         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4240         int i;
4241
4242         estats->driver_xoff = 0;
4243         estats->rx_err_discard_pkt = 0;
4244         estats->rx_skb_alloc_failed = 0;
4245         estats->hw_csum_err = 0;
4246         for_each_rx_queue(bp, i) {
4247                 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4248
4249                 estats->driver_xoff += qstats->driver_xoff;
4250                 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4251                 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4252                 estats->hw_csum_err += qstats->hw_csum_err;
4253         }
4254 }
4255
4256 static void bnx2x_stats_update(struct bnx2x *bp)
4257 {
4258         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4259
4260         if (*stats_comp != DMAE_COMP_VAL)
4261                 return;
4262
4263         if (bp->port.pmf)
4264                 bnx2x_hw_stats_update(bp);
4265
4266         if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4267                 BNX2X_ERR("storm stats were not updated for 3 times\n");
4268                 bnx2x_panic();
4269                 return;
4270         }
4271
4272         bnx2x_net_stats_update(bp);
4273         bnx2x_drv_stats_update(bp);
4274
4275         if (bp->msglevel & NETIF_MSG_TIMER) {
4276                 struct bnx2x_fastpath *fp0_rx = bp->fp;
4277                 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
4278                 struct tstorm_per_client_stats *old_tclient =
4279                                                         &bp->fp->old_tclient;
4280                 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4281                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4282                 struct net_device_stats *nstats = &bp->dev->stats;
4283                 int i;
4284
4285                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4286                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
4287                                   "  tx pkt (%lx)\n",
4288                        bnx2x_tx_avail(fp0_tx),
4289                        le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
4290                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
4291                                   "  rx pkt (%lx)\n",
4292                        (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4293                              fp0_rx->rx_comp_cons),
4294                        le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
4295                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u  "
4296                                   "brb truncate %u\n",
4297                        (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4298                        qstats->driver_xoff,
4299                        estats->brb_drop_lo, estats->brb_truncate_lo);
4300                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
4301                         "packets_too_big_discard %lu  no_buff_discard %lu  "
4302                         "mac_discard %u  mac_filter_discard %u  "
4303                         "xxovrflow_discard %u  brb_truncate_discard %u  "
4304                         "ttl0_discard %u\n",
4305                        le32_to_cpu(old_tclient->checksum_discard),
4306                        bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4307                        bnx2x_hilo(&qstats->no_buff_discard_hi),
4308                        estats->mac_discard, estats->mac_filter_discard,
4309                        estats->xxoverflow_discard, estats->brb_truncate_discard,
4310                        le32_to_cpu(old_tclient->ttl0_discard));
4311
4312                 for_each_queue(bp, i) {
4313                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4314                                bnx2x_fp(bp, i, tx_pkt),
4315                                bnx2x_fp(bp, i, rx_pkt),
4316                                bnx2x_fp(bp, i, rx_calls));
4317                 }
4318         }
4319
4320         bnx2x_hw_stats_post(bp);
4321         bnx2x_storm_stats_post(bp);
4322 }
4323
4324 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4325 {
4326         struct dmae_command *dmae;
4327         u32 opcode;
4328         int loader_idx = PMF_DMAE_C(bp);
4329         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4330
4331         bp->executer_idx = 0;
4332
4333         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4334                   DMAE_CMD_C_ENABLE |
4335                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4336 #ifdef __BIG_ENDIAN
4337                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4338 #else
4339                   DMAE_CMD_ENDIANITY_DW_SWAP |
4340 #endif
4341                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4342                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4343
4344         if (bp->port.port_stx) {
4345
4346                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4347                 if (bp->func_stx)
4348                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4349                 else
4350                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4351                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4352                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4353                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4354                 dmae->dst_addr_hi = 0;
4355                 dmae->len = sizeof(struct host_port_stats) >> 2;
4356                 if (bp->func_stx) {
4357                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4358                         dmae->comp_addr_hi = 0;
4359                         dmae->comp_val = 1;
4360                 } else {
4361                         dmae->comp_addr_lo =
4362                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4363                         dmae->comp_addr_hi =
4364                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4365                         dmae->comp_val = DMAE_COMP_VAL;
4366
4367                         *stats_comp = 0;
4368                 }
4369         }
4370
4371         if (bp->func_stx) {
4372
4373                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4374                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4375                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4376                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4377                 dmae->dst_addr_lo = bp->func_stx >> 2;
4378                 dmae->dst_addr_hi = 0;
4379                 dmae->len = sizeof(struct host_func_stats) >> 2;
4380                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4381                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4382                 dmae->comp_val = DMAE_COMP_VAL;
4383
4384                 *stats_comp = 0;
4385         }
4386 }
4387
4388 static void bnx2x_stats_stop(struct bnx2x *bp)
4389 {
4390         int update = 0;
4391
4392         bnx2x_stats_comp(bp);
4393
4394         if (bp->port.pmf)
4395                 update = (bnx2x_hw_stats_update(bp) == 0);
4396
4397         update |= (bnx2x_storm_stats_update(bp) == 0);
4398
4399         if (update) {
4400                 bnx2x_net_stats_update(bp);
4401
4402                 if (bp->port.pmf)
4403                         bnx2x_port_stats_stop(bp);
4404
4405                 bnx2x_hw_stats_post(bp);
4406                 bnx2x_stats_comp(bp);
4407         }
4408 }
4409
4410 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4411 {
4412 }
4413
4414 static const struct {
4415         void (*action)(struct bnx2x *bp);
4416         enum bnx2x_stats_state next_state;
4417 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4418 /* state        event   */
4419 {
4420 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4421 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
4422 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4423 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4424 },
4425 {
4426 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
4427 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
4428 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
4429 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
4430 }
4431 };
4432
4433 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4434 {
4435         enum bnx2x_stats_state state = bp->stats_state;
4436
4437         bnx2x_stats_stm[state][event].action(bp);
4438         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4439
4440         /* Make sure the state has been "changed" */
4441         smp_wmb();
4442
4443         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4444                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4445                    state, event, bp->stats_state);
4446 }
4447
4448 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4449 {
4450         struct dmae_command *dmae;
4451         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4452
4453         /* sanity */
4454         if (!bp->port.pmf || !bp->port.port_stx) {
4455                 BNX2X_ERR("BUG!\n");
4456                 return;
4457         }
4458
4459         bp->executer_idx = 0;
4460
4461         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4462         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4463                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4464                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4465 #ifdef __BIG_ENDIAN
4466                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4467 #else
4468                         DMAE_CMD_ENDIANITY_DW_SWAP |
4469 #endif
4470                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4471                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4472         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4473         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4474         dmae->dst_addr_lo = bp->port.port_stx >> 2;
4475         dmae->dst_addr_hi = 0;
4476         dmae->len = sizeof(struct host_port_stats) >> 2;
4477         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4478         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4479         dmae->comp_val = DMAE_COMP_VAL;
4480
4481         *stats_comp = 0;
4482         bnx2x_hw_stats_post(bp);
4483         bnx2x_stats_comp(bp);
4484 }
4485
4486 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4487 {
4488         int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4489         int port = BP_PORT(bp);
4490         int func;
4491         u32 func_stx;
4492
4493         /* sanity */
4494         if (!bp->port.pmf || !bp->func_stx) {
4495                 BNX2X_ERR("BUG!\n");
4496                 return;
4497         }
4498
4499         /* save our func_stx */
4500         func_stx = bp->func_stx;
4501
4502         for (vn = VN_0; vn < vn_max; vn++) {
4503                 func = 2*vn + port;
4504
4505                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4506                 bnx2x_func_stats_init(bp);
4507                 bnx2x_hw_stats_post(bp);
4508                 bnx2x_stats_comp(bp);
4509         }
4510
4511         /* restore our func_stx */
4512         bp->func_stx = func_stx;
4513 }
4514
4515 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4516 {
4517         struct dmae_command *dmae = &bp->stats_dmae;
4518         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4519
4520         /* sanity */
4521         if (!bp->func_stx) {
4522                 BNX2X_ERR("BUG!\n");
4523                 return;
4524         }
4525
4526         bp->executer_idx = 0;
4527         memset(dmae, 0, sizeof(struct dmae_command));
4528
4529         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4530                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4531                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4532 #ifdef __BIG_ENDIAN
4533                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4534 #else
4535                         DMAE_CMD_ENDIANITY_DW_SWAP |
4536 #endif
4537                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4538                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4539         dmae->src_addr_lo = bp->func_stx >> 2;
4540         dmae->src_addr_hi = 0;
4541         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4542         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4543         dmae->len = sizeof(struct host_func_stats) >> 2;
4544         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4545         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4546         dmae->comp_val = DMAE_COMP_VAL;
4547
4548         *stats_comp = 0;
4549         bnx2x_hw_stats_post(bp);
4550         bnx2x_stats_comp(bp);
4551 }
4552
4553 static void bnx2x_stats_init(struct bnx2x *bp)
4554 {
4555         int port = BP_PORT(bp);
4556         int func = BP_FUNC(bp);
4557         int i;
4558
4559         bp->stats_pending = 0;
4560         bp->executer_idx = 0;
4561         bp->stats_counter = 0;
4562
4563         /* port and func stats for management */
4564         if (!BP_NOMCP(bp)) {
4565                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4566                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4567
4568         } else {
4569                 bp->port.port_stx = 0;
4570                 bp->func_stx = 0;
4571         }
4572         DP(BNX2X_MSG_STATS, "port_stx 0x%x  func_stx 0x%x\n",
4573            bp->port.port_stx, bp->func_stx);
4574
4575         /* port stats */
4576         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4577         bp->port.old_nig_stats.brb_discard =
4578                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4579         bp->port.old_nig_stats.brb_truncate =
4580                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4581         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4582                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4583         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4584                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4585
4586         /* function stats */
4587         for_each_queue(bp, i) {
4588                 struct bnx2x_fastpath *fp = &bp->fp[i];
4589
4590                 memset(&fp->old_tclient, 0,
4591                        sizeof(struct tstorm_per_client_stats));
4592                 memset(&fp->old_uclient, 0,
4593                        sizeof(struct ustorm_per_client_stats));
4594                 memset(&fp->old_xclient, 0,
4595                        sizeof(struct xstorm_per_client_stats));
4596                 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4597         }
4598
4599         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4600         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4601
4602         bp->stats_state = STATS_STATE_DISABLED;
4603
4604         if (bp->port.pmf) {
4605                 if (bp->port.port_stx)
4606                         bnx2x_port_stats_base_init(bp);
4607
4608                 if (bp->func_stx)
4609                         bnx2x_func_stats_base_init(bp);
4610
4611         } else if (bp->func_stx)
4612                 bnx2x_func_stats_base_update(bp);
4613 }
4614
4615 static void bnx2x_timer(unsigned long data)
4616 {
4617         struct bnx2x *bp = (struct bnx2x *) data;
4618
4619         if (!netif_running(bp->dev))
4620                 return;
4621
4622         if (atomic_read(&bp->intr_sem) != 0)
4623                 goto timer_restart;
4624
4625         if (poll) {
4626                 struct bnx2x_fastpath *fp = &bp->fp[0];
4627                 int rc;
4628
4629                 bnx2x_tx_int(fp);
4630                 rc = bnx2x_rx_int(fp, 1000);
4631         }
4632
4633         if (!BP_NOMCP(bp)) {
4634                 int func = BP_FUNC(bp);
4635                 u32 drv_pulse;
4636                 u32 mcp_pulse;
4637
4638                 ++bp->fw_drv_pulse_wr_seq;
4639                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4640                 /* TBD - add SYSTEM_TIME */
4641                 drv_pulse = bp->fw_drv_pulse_wr_seq;
4642                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4643
4644                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4645                              MCP_PULSE_SEQ_MASK);
4646                 /* The delta between driver pulse and mcp response
4647                  * should be 1 (before mcp response) or 0 (after mcp response)
4648                  */
4649                 if ((drv_pulse != mcp_pulse) &&
4650                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4651                         /* someone lost a heartbeat... */
4652                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4653                                   drv_pulse, mcp_pulse);
4654                 }
4655         }
4656
4657         if ((bp->state == BNX2X_STATE_OPEN) ||
4658             (bp->state == BNX2X_STATE_DISABLED))
4659                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4660
4661 timer_restart:
4662         mod_timer(&bp->timer, jiffies + bp->current_interval);
4663 }
4664
4665 /* end of Statistics */
4666
4667 /* nic init */
4668
4669 /*
4670  * nic init service functions
4671  */
4672
4673 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4674 {
4675         int port = BP_PORT(bp);
4676
4677         /* "CSTORM" */
4678         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4679                         CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4680                         CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4681         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4682                         CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4683                         CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
4684 }
4685
4686 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4687                           dma_addr_t mapping, int sb_id)
4688 {
4689         int port = BP_PORT(bp);
4690         int func = BP_FUNC(bp);
4691         int index;
4692         u64 section;
4693
4694         /* USTORM */
4695         section = ((u64)mapping) + offsetof(struct host_status_block,
4696                                             u_status_block);
4697         sb->u_status_block.status_block_id = sb_id;
4698
4699         REG_WR(bp, BAR_CSTRORM_INTMEM +
4700                CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4701         REG_WR(bp, BAR_CSTRORM_INTMEM +
4702                ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
4703                U64_HI(section));
4704         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4705                 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
4706
4707         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4708                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4709                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
4710
4711         /* CSTORM */
4712         section = ((u64)mapping) + offsetof(struct host_status_block,
4713                                             c_status_block);
4714         sb->c_status_block.status_block_id = sb_id;
4715
4716         REG_WR(bp, BAR_CSTRORM_INTMEM +
4717                CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
4718         REG_WR(bp, BAR_CSTRORM_INTMEM +
4719                ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
4720                U64_HI(section));
4721         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4722                 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
4723
4724         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4725                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4726                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
4727
4728         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4729 }
4730
4731 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4732 {
4733         int func = BP_FUNC(bp);
4734
4735         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
4736                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4737                         sizeof(struct tstorm_def_status_block)/4);
4738         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4739                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4740                         sizeof(struct cstorm_def_status_block_u)/4);
4741         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4742                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4743                         sizeof(struct cstorm_def_status_block_c)/4);
4744         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
4745                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4746                         sizeof(struct xstorm_def_status_block)/4);
4747 }
4748
4749 static void bnx2x_init_def_sb(struct bnx2x *bp,
4750                               struct host_def_status_block *def_sb,
4751                               dma_addr_t mapping, int sb_id)
4752 {
4753         int port = BP_PORT(bp);
4754         int func = BP_FUNC(bp);
4755         int index, val, reg_offset;
4756         u64 section;
4757
4758         /* ATTN */
4759         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4760                                             atten_status_block);
4761         def_sb->atten_status_block.status_block_id = sb_id;
4762
4763         bp->attn_state = 0;
4764
4765         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4766                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4767
4768         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4769                 bp->attn_group[index].sig[0] = REG_RD(bp,
4770                                                      reg_offset + 0x10*index);
4771                 bp->attn_group[index].sig[1] = REG_RD(bp,
4772                                                reg_offset + 0x4 + 0x10*index);
4773                 bp->attn_group[index].sig[2] = REG_RD(bp,
4774                                                reg_offset + 0x8 + 0x10*index);
4775                 bp->attn_group[index].sig[3] = REG_RD(bp,
4776                                                reg_offset + 0xc + 0x10*index);
4777         }
4778
4779         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4780                              HC_REG_ATTN_MSG0_ADDR_L);
4781
4782         REG_WR(bp, reg_offset, U64_LO(section));
4783         REG_WR(bp, reg_offset + 4, U64_HI(section));
4784
4785         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4786
4787         val = REG_RD(bp, reg_offset);
4788         val |= sb_id;
4789         REG_WR(bp, reg_offset, val);
4790
4791         /* USTORM */
4792         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4793                                             u_def_status_block);
4794         def_sb->u_def_status_block.status_block_id = sb_id;
4795
4796         REG_WR(bp, BAR_CSTRORM_INTMEM +
4797                CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4798         REG_WR(bp, BAR_CSTRORM_INTMEM +
4799                ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
4800                U64_HI(section));
4801         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4802                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
4803
4804         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4805                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4806                          CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
4807
4808         /* CSTORM */
4809         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4810                                             c_def_status_block);
4811         def_sb->c_def_status_block.status_block_id = sb_id;
4812
4813         REG_WR(bp, BAR_CSTRORM_INTMEM +
4814                CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
4815         REG_WR(bp, BAR_CSTRORM_INTMEM +
4816                ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
4817                U64_HI(section));
4818         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4819                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
4820
4821         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4822                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4823                          CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
4824
4825         /* TSTORM */
4826         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4827                                             t_def_status_block);
4828         def_sb->t_def_status_block.status_block_id = sb_id;
4829
4830         REG_WR(bp, BAR_TSTRORM_INTMEM +
4831                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4832         REG_WR(bp, BAR_TSTRORM_INTMEM +
4833                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4834                U64_HI(section));
4835         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4836                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4837
4838         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4839                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4840                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4841
4842         /* XSTORM */
4843         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4844                                             x_def_status_block);
4845         def_sb->x_def_status_block.status_block_id = sb_id;
4846
4847         REG_WR(bp, BAR_XSTRORM_INTMEM +
4848                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4849         REG_WR(bp, BAR_XSTRORM_INTMEM +
4850                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4851                U64_HI(section));
4852         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4853                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4854
4855         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4856                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4857                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4858
4859         bp->stats_pending = 0;
4860         bp->set_mac_pending = 0;
4861
4862         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4863 }
4864
4865 static void bnx2x_update_coalesce(struct bnx2x *bp)
4866 {
4867         int port = BP_PORT(bp);
4868         int i;
4869
4870         for_each_queue(bp, i) {
4871                 int sb_id = bp->fp[i].sb_id;
4872
4873                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4874                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4875                         CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4876                                                       U_SB_ETH_RX_CQ_INDEX),
4877                         bp->rx_ticks/12);
4878                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4879                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4880                                                        U_SB_ETH_RX_CQ_INDEX),
4881                          (bp->rx_ticks/12) ? 0 : 1);
4882
4883                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4884                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4885                         CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4886                                                       C_SB_ETH_TX_CQ_INDEX),
4887                         bp->tx_ticks/12);
4888                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4889                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4890                                                        C_SB_ETH_TX_CQ_INDEX),
4891                          (bp->tx_ticks/12) ? 0 : 1);
4892         }
4893 }
4894
4895 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4896                                        struct bnx2x_fastpath *fp, int last)
4897 {
4898         int i;
4899
4900         for (i = 0; i < last; i++) {
4901                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4902                 struct sk_buff *skb = rx_buf->skb;
4903
4904                 if (skb == NULL) {
4905                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4906                         continue;
4907                 }
4908
4909                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4910                         pci_unmap_single(bp->pdev,
4911                                          pci_unmap_addr(rx_buf, mapping),
4912                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4913
4914                 dev_kfree_skb(skb);
4915                 rx_buf->skb = NULL;
4916         }
4917 }
4918
4919 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4920 {
4921         int func = BP_FUNC(bp);
4922         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4923                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4924         u16 ring_prod, cqe_ring_prod;
4925         int i, j;
4926
4927         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4928         DP(NETIF_MSG_IFUP,
4929            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4930
4931         if (bp->flags & TPA_ENABLE_FLAG) {
4932
4933                 for_each_rx_queue(bp, j) {
4934                         struct bnx2x_fastpath *fp = &bp->fp[j];
4935
4936                         for (i = 0; i < max_agg_queues; i++) {
4937                                 fp->tpa_pool[i].skb =
4938                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4939                                 if (!fp->tpa_pool[i].skb) {
4940                                         BNX2X_ERR("Failed to allocate TPA "
4941                                                   "skb pool for queue[%d] - "
4942                                                   "disabling TPA on this "
4943                                                   "queue!\n", j);
4944                                         bnx2x_free_tpa_pool(bp, fp, i);
4945                                         fp->disable_tpa = 1;
4946                                         break;
4947                                 }
4948                                 pci_unmap_addr_set((struct sw_rx_bd *)
4949                                                         &bp->fp->tpa_pool[i],
4950                                                    mapping, 0);
4951                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4952                         }
4953                 }
4954         }
4955
4956         for_each_rx_queue(bp, j) {
4957                 struct bnx2x_fastpath *fp = &bp->fp[j];
4958
4959                 fp->rx_bd_cons = 0;
4960                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4961                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4962
4963                 /* Mark queue as Rx */
4964                 fp->is_rx_queue = 1;
4965
4966                 /* "next page" elements initialization */
4967                 /* SGE ring */
4968                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4969                         struct eth_rx_sge *sge;
4970
4971                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4972                         sge->addr_hi =
4973                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4974                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4975                         sge->addr_lo =
4976                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4977                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4978                 }
4979
4980                 bnx2x_init_sge_ring_bit_mask(fp);
4981
4982                 /* RX BD ring */
4983                 for (i = 1; i <= NUM_RX_RINGS; i++) {
4984                         struct eth_rx_bd *rx_bd;
4985
4986                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4987                         rx_bd->addr_hi =
4988                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4989                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4990                         rx_bd->addr_lo =
4991                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4992                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4993                 }
4994
4995                 /* CQ ring */
4996                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4997                         struct eth_rx_cqe_next_page *nextpg;
4998
4999                         nextpg = (struct eth_rx_cqe_next_page *)
5000                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5001                         nextpg->addr_hi =
5002                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5003                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5004                         nextpg->addr_lo =
5005                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5006                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5007                 }
5008
5009                 /* Allocate SGEs and initialize the ring elements */
5010                 for (i = 0, ring_prod = 0;
5011                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
5012
5013                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5014                                 BNX2X_ERR("was only able to allocate "
5015                                           "%d rx sges\n", i);
5016                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5017                                 /* Cleanup already allocated elements */
5018                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
5019                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
5020                                 fp->disable_tpa = 1;
5021                                 ring_prod = 0;
5022                                 break;
5023                         }
5024                         ring_prod = NEXT_SGE_IDX(ring_prod);
5025                 }
5026                 fp->rx_sge_prod = ring_prod;
5027
5028                 /* Allocate BDs and initialize BD ring */
5029                 fp->rx_comp_cons = 0;
5030                 cqe_ring_prod = ring_prod = 0;
5031                 for (i = 0; i < bp->rx_ring_size; i++) {
5032                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5033                                 BNX2X_ERR("was only able to allocate "
5034                                           "%d rx skbs on queue[%d]\n", i, j);
5035                                 fp->eth_q_stats.rx_skb_alloc_failed++;
5036                                 break;
5037                         }
5038                         ring_prod = NEXT_RX_IDX(ring_prod);
5039                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5040                         WARN_ON(ring_prod <= i);
5041                 }
5042
5043                 fp->rx_bd_prod = ring_prod;
5044                 /* must not have more available CQEs than BDs */
5045                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5046                                        cqe_ring_prod);
5047                 fp->rx_pkt = fp->rx_calls = 0;
5048
5049                 /* Warning!
5050                  * this will generate an interrupt (to the TSTORM)
5051                  * must only be done after chip is initialized
5052                  */
5053                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5054                                      fp->rx_sge_prod);
5055                 if (j != 0)
5056                         continue;
5057
5058                 REG_WR(bp, BAR_USTRORM_INTMEM +
5059                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5060                        U64_LO(fp->rx_comp_mapping));
5061                 REG_WR(bp, BAR_USTRORM_INTMEM +
5062                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5063                        U64_HI(fp->rx_comp_mapping));
5064         }
5065 }
5066
5067 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5068 {
5069         int i, j;
5070
5071         for_each_tx_queue(bp, j) {
5072                 struct bnx2x_fastpath *fp = &bp->fp[j];
5073
5074                 for (i = 1; i <= NUM_TX_RINGS; i++) {
5075                         struct eth_tx_next_bd *tx_next_bd =
5076                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5077
5078                         tx_next_bd->addr_hi =
5079                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5080                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5081                         tx_next_bd->addr_lo =
5082                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5083                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5084                 }
5085
5086                 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5087                 fp->tx_db.data.zero_fill1 = 0;
5088                 fp->tx_db.data.prod = 0;
5089
5090                 fp->tx_pkt_prod = 0;
5091                 fp->tx_pkt_cons = 0;
5092                 fp->tx_bd_prod = 0;
5093                 fp->tx_bd_cons = 0;
5094                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5095                 fp->tx_pkt = 0;
5096         }
5097
5098         /* clean tx statistics */
5099         for_each_rx_queue(bp, i)
5100                 bnx2x_fp(bp, i, tx_pkt) = 0;
5101 }
5102
5103 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5104 {
5105         int func = BP_FUNC(bp);
5106
5107         spin_lock_init(&bp->spq_lock);
5108
5109         bp->spq_left = MAX_SPQ_PENDING;
5110         bp->spq_prod_idx = 0;
5111         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5112         bp->spq_prod_bd = bp->spq;
5113         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5114
5115         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5116                U64_LO(bp->spq_mapping));
5117         REG_WR(bp,
5118                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5119                U64_HI(bp->spq_mapping));
5120
5121         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5122                bp->spq_prod_idx);
5123 }
5124
5125 static void bnx2x_init_context(struct bnx2x *bp)
5126 {
5127         int i;
5128
5129         for_each_rx_queue(bp, i) {
5130                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5131                 struct bnx2x_fastpath *fp = &bp->fp[i];
5132                 u8 cl_id = fp->cl_id;
5133
5134                 context->ustorm_st_context.common.sb_index_numbers =
5135                                                 BNX2X_RX_SB_INDEX_NUM;
5136                 context->ustorm_st_context.common.clientId = cl_id;
5137                 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5138                 context->ustorm_st_context.common.flags =
5139                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5140                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5141                 context->ustorm_st_context.common.statistics_counter_id =
5142                                                 cl_id;
5143                 context->ustorm_st_context.common.mc_alignment_log_size =
5144                                                 BNX2X_RX_ALIGN_SHIFT;
5145                 context->ustorm_st_context.common.bd_buff_size =
5146                                                 bp->rx_buf_size;
5147                 context->ustorm_st_context.common.bd_page_base_hi =
5148                                                 U64_HI(fp->rx_desc_mapping);
5149                 context->ustorm_st_context.common.bd_page_base_lo =
5150                                                 U64_LO(fp->rx_desc_mapping);
5151                 if (!fp->disable_tpa) {
5152                         context->ustorm_st_context.common.flags |=
5153                                 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5154                         context->ustorm_st_context.common.sge_buff_size =
5155                                 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5156                                          (u32)0xffff);
5157                         context->ustorm_st_context.common.sge_page_base_hi =
5158                                                 U64_HI(fp->rx_sge_mapping);
5159                         context->ustorm_st_context.common.sge_page_base_lo =
5160                                                 U64_LO(fp->rx_sge_mapping);
5161
5162                         context->ustorm_st_context.common.max_sges_for_packet =
5163                                 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5164                         context->ustorm_st_context.common.max_sges_for_packet =
5165                                 ((context->ustorm_st_context.common.
5166                                   max_sges_for_packet + PAGES_PER_SGE - 1) &
5167                                  (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5168                 }
5169
5170                 context->ustorm_ag_context.cdu_usage =
5171                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5172                                                CDU_REGION_NUMBER_UCM_AG,
5173                                                ETH_CONNECTION_TYPE);
5174
5175                 context->xstorm_ag_context.cdu_reserved =
5176                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5177                                                CDU_REGION_NUMBER_XCM_AG,
5178                                                ETH_CONNECTION_TYPE);
5179         }
5180
5181         for_each_tx_queue(bp, i) {
5182                 struct bnx2x_fastpath *fp = &bp->fp[i];
5183                 struct eth_context *context =
5184                         bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
5185
5186                 context->cstorm_st_context.sb_index_number =
5187                                                 C_SB_ETH_TX_CQ_INDEX;
5188                 context->cstorm_st_context.status_block_id = fp->sb_id;
5189
5190                 context->xstorm_st_context.tx_bd_page_base_hi =
5191                                                 U64_HI(fp->tx_desc_mapping);
5192                 context->xstorm_st_context.tx_bd_page_base_lo =
5193                                                 U64_LO(fp->tx_desc_mapping);
5194                 context->xstorm_st_context.statistics_data = (fp->cl_id |
5195                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5196         }
5197 }
5198
5199 static void bnx2x_init_ind_table(struct bnx2x *bp)
5200 {
5201         int func = BP_FUNC(bp);
5202         int i;
5203
5204         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5205                 return;
5206
5207         DP(NETIF_MSG_IFUP,
5208            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
5209         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5210                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5211                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5212                         bp->fp->cl_id + (i % bp->num_rx_queues));
5213 }
5214
5215 static void bnx2x_set_client_config(struct bnx2x *bp)
5216 {
5217         struct tstorm_eth_client_config tstorm_client = {0};
5218         int port = BP_PORT(bp);
5219         int i;
5220
5221         tstorm_client.mtu = bp->dev->mtu;
5222         tstorm_client.config_flags =
5223                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5224                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5225 #ifdef BCM_VLAN
5226         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5227                 tstorm_client.config_flags |=
5228                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5229                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5230         }
5231 #endif
5232
5233         for_each_queue(bp, i) {
5234                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5235
5236                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5237                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5238                        ((u32 *)&tstorm_client)[0]);
5239                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5240                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5241                        ((u32 *)&tstorm_client)[1]);
5242         }
5243
5244         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5245            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5246 }
5247
5248 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5249 {
5250         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5251         int mode = bp->rx_mode;
5252         int mask = (1 << BP_L_ID(bp));
5253         int func = BP_FUNC(bp);
5254         int port = BP_PORT(bp);
5255         int i;
5256         /* All but management unicast packets should pass to the host as well */
5257         u32 llh_mask =
5258                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5259                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5260                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5261                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5262
5263         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
5264
5265         switch (mode) {
5266         case BNX2X_RX_MODE_NONE: /* no Rx */
5267                 tstorm_mac_filter.ucast_drop_all = mask;
5268                 tstorm_mac_filter.mcast_drop_all = mask;
5269                 tstorm_mac_filter.bcast_drop_all = mask;
5270                 break;
5271
5272         case BNX2X_RX_MODE_NORMAL:
5273                 tstorm_mac_filter.bcast_accept_all = mask;
5274                 break;
5275
5276         case BNX2X_RX_MODE_ALLMULTI:
5277                 tstorm_mac_filter.mcast_accept_all = mask;
5278                 tstorm_mac_filter.bcast_accept_all = mask;
5279                 break;
5280
5281         case BNX2X_RX_MODE_PROMISC:
5282                 tstorm_mac_filter.ucast_accept_all = mask;
5283                 tstorm_mac_filter.mcast_accept_all = mask;
5284                 tstorm_mac_filter.bcast_accept_all = mask;
5285                 /* pass management unicast packets as well */
5286                 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5287                 break;
5288
5289         default:
5290                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5291                 break;
5292         }
5293
5294         REG_WR(bp,
5295                (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5296                llh_mask);
5297
5298         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5299                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5300                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5301                        ((u32 *)&tstorm_mac_filter)[i]);
5302
5303 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5304                    ((u32 *)&tstorm_mac_filter)[i]); */
5305         }
5306
5307         if (mode != BNX2X_RX_MODE_NONE)
5308                 bnx2x_set_client_config(bp);
5309 }
5310
5311 static void bnx2x_init_internal_common(struct bnx2x *bp)
5312 {
5313         int i;
5314
5315         /* Zero this manually as its initialization is
5316            currently missing in the initTool */
5317         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5318                 REG_WR(bp, BAR_USTRORM_INTMEM +
5319                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
5320 }
5321
5322 static void bnx2x_init_internal_port(struct bnx2x *bp)
5323 {
5324         int port = BP_PORT(bp);
5325
5326         REG_WR(bp,
5327                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5328         REG_WR(bp,
5329                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5330         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5331         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5332 }
5333
5334 static void bnx2x_init_internal_func(struct bnx2x *bp)
5335 {
5336         struct tstorm_eth_function_common_config tstorm_config = {0};
5337         struct stats_indication_flags stats_flags = {0};
5338         int port = BP_PORT(bp);
5339         int func = BP_FUNC(bp);
5340         int i, j;
5341         u32 offset;
5342         u16 max_agg_size;
5343
5344         if (is_multi(bp)) {
5345                 tstorm_config.config_flags = MULTI_FLAGS(bp);
5346                 tstorm_config.rss_result_mask = MULTI_MASK;
5347         }
5348
5349         /* Enable TPA if needed */
5350         if (bp->flags & TPA_ENABLE_FLAG)
5351                 tstorm_config.config_flags |=
5352                         TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5353
5354         if (IS_E1HMF(bp))
5355                 tstorm_config.config_flags |=
5356                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5357
5358         tstorm_config.leading_client_id = BP_L_ID(bp);
5359
5360         REG_WR(bp, BAR_TSTRORM_INTMEM +
5361                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5362                (*(u32 *)&tstorm_config));
5363
5364         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5365         bnx2x_set_storm_rx_mode(bp);
5366
5367         for_each_queue(bp, i) {
5368                 u8 cl_id = bp->fp[i].cl_id;
5369
5370                 /* reset xstorm per client statistics */
5371                 offset = BAR_XSTRORM_INTMEM +
5372                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5373                 for (j = 0;
5374                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5375                         REG_WR(bp, offset + j*4, 0);
5376
5377                 /* reset tstorm per client statistics */
5378                 offset = BAR_TSTRORM_INTMEM +
5379                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5380                 for (j = 0;
5381                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5382                         REG_WR(bp, offset + j*4, 0);
5383
5384                 /* reset ustorm per client statistics */
5385                 offset = BAR_USTRORM_INTMEM +
5386                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5387                 for (j = 0;
5388                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5389                         REG_WR(bp, offset + j*4, 0);
5390         }
5391
5392         /* Init statistics related context */
5393         stats_flags.collect_eth = 1;
5394
5395         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5396                ((u32 *)&stats_flags)[0]);
5397         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5398                ((u32 *)&stats_flags)[1]);
5399
5400         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5401                ((u32 *)&stats_flags)[0]);
5402         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5403                ((u32 *)&stats_flags)[1]);
5404
5405         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5406                ((u32 *)&stats_flags)[0]);
5407         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5408                ((u32 *)&stats_flags)[1]);
5409
5410         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5411                ((u32 *)&stats_flags)[0]);
5412         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5413                ((u32 *)&stats_flags)[1]);
5414
5415         REG_WR(bp, BAR_XSTRORM_INTMEM +
5416                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5417                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5418         REG_WR(bp, BAR_XSTRORM_INTMEM +
5419                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5420                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5421
5422         REG_WR(bp, BAR_TSTRORM_INTMEM +
5423                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5424                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5425         REG_WR(bp, BAR_TSTRORM_INTMEM +
5426                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5427                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5428
5429         REG_WR(bp, BAR_USTRORM_INTMEM +
5430                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5431                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5432         REG_WR(bp, BAR_USTRORM_INTMEM +
5433                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5434                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5435
5436         if (CHIP_IS_E1H(bp)) {
5437                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5438                         IS_E1HMF(bp));
5439                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5440                         IS_E1HMF(bp));
5441                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5442                         IS_E1HMF(bp));
5443                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5444                         IS_E1HMF(bp));
5445
5446                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5447                          bp->e1hov);
5448         }
5449
5450         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5451         max_agg_size =
5452                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5453                           SGE_PAGE_SIZE * PAGES_PER_SGE),
5454                     (u32)0xffff);
5455         for_each_rx_queue(bp, i) {
5456                 struct bnx2x_fastpath *fp = &bp->fp[i];
5457
5458                 REG_WR(bp, BAR_USTRORM_INTMEM +
5459                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5460                        U64_LO(fp->rx_comp_mapping));
5461                 REG_WR(bp, BAR_USTRORM_INTMEM +
5462                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5463                        U64_HI(fp->rx_comp_mapping));
5464
5465                 /* Next page */
5466                 REG_WR(bp, BAR_USTRORM_INTMEM +
5467                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5468                        U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5469                 REG_WR(bp, BAR_USTRORM_INTMEM +
5470                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5471                        U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5472
5473                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5474                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5475                          max_agg_size);
5476         }
5477
5478         /* dropless flow control */
5479         if (CHIP_IS_E1H(bp)) {
5480                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5481
5482                 rx_pause.bd_thr_low = 250;
5483                 rx_pause.cqe_thr_low = 250;
5484                 rx_pause.cos = 1;
5485                 rx_pause.sge_thr_low = 0;
5486                 rx_pause.bd_thr_high = 350;
5487                 rx_pause.cqe_thr_high = 350;
5488                 rx_pause.sge_thr_high = 0;
5489
5490                 for_each_rx_queue(bp, i) {
5491                         struct bnx2x_fastpath *fp = &bp->fp[i];
5492
5493                         if (!fp->disable_tpa) {
5494                                 rx_pause.sge_thr_low = 150;
5495                                 rx_pause.sge_thr_high = 250;
5496                         }
5497
5498
5499                         offset = BAR_USTRORM_INTMEM +
5500                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5501                                                                    fp->cl_id);
5502                         for (j = 0;
5503                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5504                              j++)
5505                                 REG_WR(bp, offset + j*4,
5506                                        ((u32 *)&rx_pause)[j]);
5507                 }
5508         }
5509
5510         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5511
5512         /* Init rate shaping and fairness contexts */
5513         if (IS_E1HMF(bp)) {
5514                 int vn;
5515
5516                 /* During init there is no active link
5517                    Until link is up, set link rate to 10Gbps */
5518                 bp->link_vars.line_speed = SPEED_10000;
5519                 bnx2x_init_port_minmax(bp);
5520
5521                 bnx2x_calc_vn_weight_sum(bp);
5522
5523                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5524                         bnx2x_init_vn_minmax(bp, 2*vn + port);
5525
5526                 /* Enable rate shaping and fairness */
5527                 bp->cmng.flags.cmng_enables =
5528                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5529                 if (bp->vn_weight_sum)
5530                         bp->cmng.flags.cmng_enables |=
5531                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5532                 else
5533                         DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5534                            "  fairness will be disabled\n");
5535         } else {
5536                 /* rate shaping and fairness are disabled */
5537                 DP(NETIF_MSG_IFUP,
5538                    "single function mode  minmax will be disabled\n");
5539         }
5540
5541
5542         /* Store it to internal memory */
5543         if (bp->port.pmf)
5544                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5545                         REG_WR(bp, BAR_XSTRORM_INTMEM +
5546                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5547                                ((u32 *)(&bp->cmng))[i]);
5548 }
5549
5550 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5551 {
5552         switch (load_code) {
5553         case FW_MSG_CODE_DRV_LOAD_COMMON:
5554                 bnx2x_init_internal_common(bp);
5555                 /* no break */
5556
5557         case FW_MSG_CODE_DRV_LOAD_PORT:
5558                 bnx2x_init_internal_port(bp);
5559                 /* no break */
5560
5561         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5562                 bnx2x_init_internal_func(bp);
5563                 break;
5564
5565         default:
5566                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5567                 break;
5568         }
5569 }
5570
5571 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5572 {
5573         int i;
5574
5575         for_each_queue(bp, i) {
5576                 struct bnx2x_fastpath *fp = &bp->fp[i];
5577
5578                 fp->bp = bp;
5579                 fp->state = BNX2X_FP_STATE_CLOSED;
5580                 fp->index = i;
5581                 fp->cl_id = BP_L_ID(bp) + i;
5582                 fp->sb_id = fp->cl_id;
5583                 /* Suitable Rx and Tx SBs are served by the same client */
5584                 if (i >= bp->num_rx_queues)
5585                         fp->cl_id -= bp->num_rx_queues;
5586                 DP(NETIF_MSG_IFUP,
5587                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
5588                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5589                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5590                               fp->sb_id);
5591                 bnx2x_update_fpsb_idx(fp);
5592         }
5593
5594         /* ensure status block indices were read */
5595         rmb();
5596
5597
5598         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5599                           DEF_SB_ID);
5600         bnx2x_update_dsb_idx(bp);
5601         bnx2x_update_coalesce(bp);
5602         bnx2x_init_rx_rings(bp);
5603         bnx2x_init_tx_ring(bp);
5604         bnx2x_init_sp_ring(bp);
5605         bnx2x_init_context(bp);
5606         bnx2x_init_internal(bp, load_code);
5607         bnx2x_init_ind_table(bp);
5608         bnx2x_stats_init(bp);
5609
5610         /* At this point, we are ready for interrupts */
5611         atomic_set(&bp->intr_sem, 0);
5612
5613         /* flush all before enabling interrupts */
5614         mb();
5615         mmiowb();
5616
5617         bnx2x_int_enable(bp);
5618
5619         /* Check for SPIO5 */
5620         bnx2x_attn_int_deasserted0(bp,
5621                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5622                                    AEU_INPUTS_ATTN_BITS_SPIO5);
5623 }
5624
5625 /* end of nic init */
5626
5627 /*
5628  * gzip service functions
5629  */
5630
5631 static int bnx2x_gunzip_init(struct bnx2x *bp)
5632 {
5633         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5634                                               &bp->gunzip_mapping);
5635         if (bp->gunzip_buf  == NULL)
5636                 goto gunzip_nomem1;
5637
5638         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5639         if (bp->strm  == NULL)
5640                 goto gunzip_nomem2;
5641
5642         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5643                                       GFP_KERNEL);
5644         if (bp->strm->workspace == NULL)
5645                 goto gunzip_nomem3;
5646
5647         return 0;
5648
5649 gunzip_nomem3:
5650         kfree(bp->strm);
5651         bp->strm = NULL;
5652
5653 gunzip_nomem2:
5654         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5655                             bp->gunzip_mapping);
5656         bp->gunzip_buf = NULL;
5657
5658 gunzip_nomem1:
5659         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5660                " un-compression\n", bp->dev->name);
5661         return -ENOMEM;
5662 }
5663
5664 static void bnx2x_gunzip_end(struct bnx2x *bp)
5665 {
5666         kfree(bp->strm->workspace);
5667
5668         kfree(bp->strm);
5669         bp->strm = NULL;
5670
5671         if (bp->gunzip_buf) {
5672                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5673                                     bp->gunzip_mapping);
5674                 bp->gunzip_buf = NULL;
5675         }
5676 }
5677
5678 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5679 {
5680         int n, rc;
5681
5682         /* check gzip header */
5683         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5684                 BNX2X_ERR("Bad gzip header\n");
5685                 return -EINVAL;
5686         }
5687
5688         n = 10;
5689
5690 #define FNAME                           0x8
5691
5692         if (zbuf[3] & FNAME)
5693                 while ((zbuf[n++] != 0) && (n < len));
5694
5695         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5696         bp->strm->avail_in = len - n;
5697         bp->strm->next_out = bp->gunzip_buf;
5698         bp->strm->avail_out = FW_BUF_SIZE;
5699
5700         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5701         if (rc != Z_OK)
5702                 return rc;
5703
5704         rc = zlib_inflate(bp->strm, Z_FINISH);
5705         if ((rc != Z_OK) && (rc != Z_STREAM_END))
5706                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5707                        bp->dev->name, bp->strm->msg);
5708
5709         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5710         if (bp->gunzip_outlen & 0x3)
5711                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5712                                     " gunzip_outlen (%d) not aligned\n",
5713                        bp->dev->name, bp->gunzip_outlen);
5714         bp->gunzip_outlen >>= 2;
5715
5716         zlib_inflateEnd(bp->strm);
5717
5718         if (rc == Z_STREAM_END)
5719                 return 0;
5720
5721         return rc;
5722 }
5723
5724 /* nic load/unload */
5725
5726 /*
5727  * General service functions
5728  */
5729
5730 /* send a NIG loopback debug packet */
5731 static void bnx2x_lb_pckt(struct bnx2x *bp)
5732 {
5733         u32 wb_write[3];
5734
5735         /* Ethernet source and destination addresses */
5736         wb_write[0] = 0x55555555;
5737         wb_write[1] = 0x55555555;
5738         wb_write[2] = 0x20;             /* SOP */
5739         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5740
5741         /* NON-IP protocol */
5742         wb_write[0] = 0x09000000;
5743         wb_write[1] = 0x55555555;
5744         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
5745         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5746 }
5747
5748 /* some of the internal memories
5749  * are not directly readable from the driver
5750  * to test them we send debug packets
5751  */
5752 static int bnx2x_int_mem_test(struct bnx2x *bp)
5753 {
5754         int factor;
5755         int count, i;
5756         u32 val = 0;
5757
5758         if (CHIP_REV_IS_FPGA(bp))
5759                 factor = 120;
5760         else if (CHIP_REV_IS_EMUL(bp))
5761                 factor = 200;
5762         else
5763                 factor = 1;
5764
5765         DP(NETIF_MSG_HW, "start part1\n");
5766
5767         /* Disable inputs of parser neighbor blocks */
5768         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5769         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5770         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5771         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5772
5773         /*  Write 0 to parser credits for CFC search request */
5774         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5775
5776         /* send Ethernet packet */
5777         bnx2x_lb_pckt(bp);
5778
5779         /* TODO do i reset NIG statistic? */
5780         /* Wait until NIG register shows 1 packet of size 0x10 */
5781         count = 1000 * factor;
5782         while (count) {
5783
5784                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5785                 val = *bnx2x_sp(bp, wb_data[0]);
5786                 if (val == 0x10)
5787                         break;
5788
5789                 msleep(10);
5790                 count--;
5791         }
5792         if (val != 0x10) {
5793                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5794                 return -1;
5795         }
5796
5797         /* Wait until PRS register shows 1 packet */
5798         count = 1000 * factor;
5799         while (count) {
5800                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5801                 if (val == 1)
5802                         break;
5803
5804                 msleep(10);
5805                 count--;
5806         }
5807         if (val != 0x1) {
5808                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5809                 return -2;
5810         }
5811
5812         /* Reset and init BRB, PRS */
5813         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5814         msleep(50);
5815         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5816         msleep(50);
5817         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5818         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5819
5820         DP(NETIF_MSG_HW, "part2\n");
5821
5822         /* Disable inputs of parser neighbor blocks */
5823         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5824         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5825         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5826         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5827
5828         /* Write 0 to parser credits for CFC search request */
5829         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5830
5831         /* send 10 Ethernet packets */
5832         for (i = 0; i < 10; i++)
5833                 bnx2x_lb_pckt(bp);
5834
5835         /* Wait until NIG register shows 10 + 1
5836            packets of size 11*0x10 = 0xb0 */
5837         count = 1000 * factor;
5838         while (count) {
5839
5840                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5841                 val = *bnx2x_sp(bp, wb_data[0]);
5842                 if (val == 0xb0)
5843                         break;
5844
5845                 msleep(10);
5846                 count--;
5847         }
5848         if (val != 0xb0) {
5849                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5850                 return -3;
5851         }
5852
5853         /* Wait until PRS register shows 2 packets */
5854         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5855         if (val != 2)
5856                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5857
5858         /* Write 1 to parser credits for CFC search request */
5859         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5860
5861         /* Wait until PRS register shows 3 packets */
5862         msleep(10 * factor);
5863         /* Wait until NIG register shows 1 packet of size 0x10 */
5864         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5865         if (val != 3)
5866                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5867
5868         /* clear NIG EOP FIFO */
5869         for (i = 0; i < 11; i++)
5870                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5871         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5872         if (val != 1) {
5873                 BNX2X_ERR("clear of NIG failed\n");
5874                 return -4;
5875         }
5876
5877         /* Reset and init BRB, PRS, NIG */
5878         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5879         msleep(50);
5880         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5881         msleep(50);
5882         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5883         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5884 #ifndef BCM_ISCSI
5885         /* set NIC mode */
5886         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5887 #endif
5888
5889         /* Enable inputs of parser neighbor blocks */
5890         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5891         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5892         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5893         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5894
5895         DP(NETIF_MSG_HW, "done\n");
5896
5897         return 0; /* OK */
5898 }
5899
5900 static void enable_blocks_attention(struct bnx2x *bp)
5901 {
5902         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5903         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5904         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5905         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5906         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5907         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5908         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5909         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5910         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5911 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5912 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5913         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5914         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5915         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5916 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5917 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5918         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5919         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5920         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5921         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5922 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5923 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5924         if (CHIP_REV_IS_FPGA(bp))
5925                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5926         else
5927                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5928         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5929         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5930         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5931 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5932 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5933         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5934         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5935 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5936         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5937 }
5938
5939
5940 static void bnx2x_reset_common(struct bnx2x *bp)
5941 {
5942         /* reset_common */
5943         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5944                0xd3ffff7f);
5945         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5946 }
5947
5948 static void bnx2x_init_pxp(struct bnx2x *bp)
5949 {
5950         u16 devctl;
5951         int r_order, w_order;
5952
5953         pci_read_config_word(bp->pdev,
5954                              bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
5955         DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
5956         w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5957         if (bp->mrrs == -1)
5958                 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5959         else {
5960                 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
5961                 r_order = bp->mrrs;
5962         }
5963
5964         bnx2x_init_pxp_arb(bp, r_order, w_order);
5965 }
5966
5967 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5968 {
5969         u32 val;
5970         u8 port;
5971         u8 is_required = 0;
5972
5973         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5974               SHARED_HW_CFG_FAN_FAILURE_MASK;
5975
5976         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5977                 is_required = 1;
5978
5979         /*
5980          * The fan failure mechanism is usually related to the PHY type since
5981          * the power consumption of the board is affected by the PHY. Currently,
5982          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5983          */
5984         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5985                 for (port = PORT_0; port < PORT_MAX; port++) {
5986                         u32 phy_type =
5987                                 SHMEM_RD(bp, dev_info.port_hw_config[port].
5988                                          external_phy_config) &
5989                                 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
5990                         is_required |=
5991                                 ((phy_type ==
5992                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
5993                                  (phy_type ==
5994                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
5995                                  (phy_type ==
5996                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
5997                 }
5998
5999         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6000
6001         if (is_required == 0)
6002                 return;
6003
6004         /* Fan failure is indicated by SPIO 5 */
6005         bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6006                        MISC_REGISTERS_SPIO_INPUT_HI_Z);
6007
6008         /* set to active low mode */
6009         val = REG_RD(bp, MISC_REG_SPIO_INT);
6010         val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6011                                 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6012         REG_WR(bp, MISC_REG_SPIO_INT, val);
6013
6014         /* enable interrupt to signal the IGU */
6015         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6016         val |= (1 << MISC_REGISTERS_SPIO_5);
6017         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6018 }
6019
6020 static int bnx2x_init_common(struct bnx2x *bp)
6021 {
6022         u32 val, i;
6023
6024         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
6025
6026         bnx2x_reset_common(bp);
6027         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6028         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
6029
6030         bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
6031         if (CHIP_IS_E1H(bp))
6032                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
6033
6034         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6035         msleep(30);
6036         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6037
6038         bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
6039         if (CHIP_IS_E1(bp)) {
6040                 /* enable HW interrupt from PXP on USDM overflow
6041                    bit 16 on INT_MASK_0 */
6042                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6043         }
6044
6045         bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
6046         bnx2x_init_pxp(bp);
6047
6048 #ifdef __BIG_ENDIAN
6049         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6050         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6051         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6052         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6053         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6054         /* make sure this value is 0 */
6055         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6056
6057 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6058         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6059         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6060         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6061         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6062 #endif
6063
6064         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
6065 #ifdef BCM_ISCSI
6066         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6067         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6068         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6069 #endif
6070
6071         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6072                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6073
6074         /* let the HW do it's magic ... */
6075         msleep(100);
6076         /* finish PXP init */
6077         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6078         if (val != 1) {
6079                 BNX2X_ERR("PXP2 CFG failed\n");
6080                 return -EBUSY;
6081         }
6082         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6083         if (val != 1) {
6084                 BNX2X_ERR("PXP2 RD_INIT failed\n");
6085                 return -EBUSY;
6086         }
6087
6088         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6089         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6090
6091         bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
6092
6093         /* clean the DMAE memory */
6094         bp->dmae_ready = 1;
6095         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6096
6097         bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6098         bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6099         bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6100         bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
6101
6102         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6103         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6104         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6105         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6106
6107         bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
6108         /* soft reset pulse */
6109         REG_WR(bp, QM_REG_SOFT_RESET, 1);
6110         REG_WR(bp, QM_REG_SOFT_RESET, 0);
6111
6112 #ifdef BCM_ISCSI
6113         bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
6114 #endif
6115
6116         bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
6117         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6118         if (!CHIP_REV_IS_SLOW(bp)) {
6119                 /* enable hw interrupt from doorbell Q */
6120                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6121         }
6122
6123         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6124         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6125         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6126         /* set NIC mode */
6127         REG_WR(bp, PRS_REG_NIC_MODE, 1);
6128         if (CHIP_IS_E1H(bp))
6129                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
6130
6131         bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6132         bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6133         bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6134         bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
6135
6136         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6137         bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6138         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6139         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6140
6141         bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6142         bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6143         bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6144         bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
6145
6146         /* sync semi rtc */
6147         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6148                0x80000000);
6149         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6150                0x80000000);
6151
6152         bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6153         bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6154         bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
6155
6156         REG_WR(bp, SRC_REG_SOFT_RST, 1);
6157         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6158                 REG_WR(bp, i, 0xc0cac01a);
6159                 /* TODO: replace with something meaningful */
6160         }
6161         bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6162         REG_WR(bp, SRC_REG_SOFT_RST, 0);
6163
6164         if (sizeof(union cdu_context) != 1024)
6165                 /* we currently assume that a context is 1024 bytes */
6166                 printk(KERN_ALERT PFX "please adjust the size of"
6167                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
6168
6169         bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
6170         val = (4 << 24) + (0 << 12) + 1024;
6171         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6172
6173         bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
6174         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6175         /* enable context validation interrupt from CFC */
6176         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6177
6178         /* set the thresholds to prevent CFC/CDU race */
6179         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6180
6181         bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6182         bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
6183
6184         bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
6185         /* Reset PCIE errors for debug */
6186         REG_WR(bp, 0x2814, 0xffffffff);
6187         REG_WR(bp, 0x3820, 0xffffffff);
6188
6189         bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
6190         bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
6191         bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
6192         bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
6193
6194         bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
6195         if (CHIP_IS_E1H(bp)) {
6196                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6197                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6198         }
6199
6200         if (CHIP_REV_IS_SLOW(bp))
6201                 msleep(200);
6202
6203         /* finish CFC init */
6204         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6205         if (val != 1) {
6206                 BNX2X_ERR("CFC LL_INIT failed\n");
6207                 return -EBUSY;
6208         }
6209         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6210         if (val != 1) {
6211                 BNX2X_ERR("CFC AC_INIT failed\n");
6212                 return -EBUSY;
6213         }
6214         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6215         if (val != 1) {
6216                 BNX2X_ERR("CFC CAM_INIT failed\n");
6217                 return -EBUSY;
6218         }
6219         REG_WR(bp, CFC_REG_DEBUG0, 0);
6220
6221         /* read NIG statistic
6222            to see if this is our first up since powerup */
6223         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6224         val = *bnx2x_sp(bp, wb_data[0]);
6225
6226         /* do internal memory self test */
6227         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6228                 BNX2X_ERR("internal mem self test failed\n");
6229                 return -EBUSY;
6230         }
6231
6232         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6233         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6234         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6235         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6236         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6237                 bp->port.need_hw_lock = 1;
6238                 break;
6239
6240         default:
6241                 break;
6242         }
6243
6244         bnx2x_setup_fan_failure_detection(bp);
6245
6246         /* clear PXP2 attentions */
6247         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6248
6249         enable_blocks_attention(bp);
6250
6251         if (!BP_NOMCP(bp)) {
6252                 bnx2x_acquire_phy_lock(bp);
6253                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6254                 bnx2x_release_phy_lock(bp);
6255         } else
6256                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6257
6258         return 0;
6259 }
6260
6261 static int bnx2x_init_port(struct bnx2x *bp)
6262 {
6263         int port = BP_PORT(bp);
6264         int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
6265         u32 low, high;
6266         u32 val;
6267
6268         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
6269
6270         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6271
6272         bnx2x_init_block(bp, PXP_BLOCK, init_stage);
6273         bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
6274
6275         bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6276         bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6277         bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6278 #ifdef BCM_ISCSI
6279         /* Port0  1
6280          * Port1  385 */
6281         i++;
6282         wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
6283         wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
6284         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6285         REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6286
6287         /* Port0  2
6288          * Port1  386 */
6289         i++;
6290         wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
6291         wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
6292         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6293         REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6294
6295         /* Port0  3
6296          * Port1  387 */
6297         i++;
6298         wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
6299         wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
6300         REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6301         REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6302 #endif
6303         bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6304
6305 #ifdef BCM_ISCSI
6306         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
6307         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
6308
6309         bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6310 #endif
6311         bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6312
6313         bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
6314         if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6315                 /* no pause for emulation and FPGA */
6316                 low = 0;
6317                 high = 513;
6318         } else {
6319                 if (IS_E1HMF(bp))
6320                         low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6321                 else if (bp->dev->mtu > 4096) {
6322                         if (bp->flags & ONE_PORT_FLAG)
6323                                 low = 160;
6324                         else {
6325                                 val = bp->dev->mtu;
6326                                 /* (24*1024 + val*4)/256 */
6327                                 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6328                         }
6329                 } else
6330                         low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6331                 high = low + 56;        /* 14*1024/256 */
6332         }
6333         REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6334         REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6335
6336
6337         bnx2x_init_block(bp, PRS_BLOCK, init_stage);
6338
6339         bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
6340         bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
6341         bnx2x_init_block(bp, USDM_BLOCK, init_stage);
6342         bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
6343
6344         bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6345         bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6346         bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6347         bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
6348
6349         bnx2x_init_block(bp, UPB_BLOCK, init_stage);
6350         bnx2x_init_block(bp, XPB_BLOCK, init_stage);
6351
6352         bnx2x_init_block(bp, PBF_BLOCK, init_stage);
6353
6354         /* configure PBF to work without PAUSE mtu 9000 */
6355         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6356
6357         /* update threshold */
6358         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6359         /* update init credit */
6360         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6361
6362         /* probe changes */
6363         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6364         msleep(5);
6365         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6366
6367 #ifdef BCM_ISCSI
6368         /* tell the searcher where the T2 table is */
6369         REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
6370
6371         wb_write[0] = U64_LO(bp->t2_mapping);
6372         wb_write[1] = U64_HI(bp->t2_mapping);
6373         REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
6374         wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
6375         wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
6376         REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
6377
6378         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
6379 #endif
6380         bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6381         bnx2x_init_block(bp, CFC_BLOCK, init_stage);
6382
6383         if (CHIP_IS_E1(bp)) {
6384                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6385                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6386         }
6387         bnx2x_init_block(bp, HC_BLOCK, init_stage);
6388
6389         bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6390         /* init aeu_mask_attn_func_0/1:
6391          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6392          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6393          *             bits 4-7 are used for "per vn group attention" */
6394         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6395                (IS_E1HMF(bp) ? 0xF7 : 0x7));
6396
6397         bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6398         bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6399         bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6400         bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6401         bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6402
6403         bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6404
6405         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6406
6407         if (CHIP_IS_E1H(bp)) {
6408                 /* 0x2 disable e1hov, 0x1 enable */
6409                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6410                        (IS_E1HMF(bp) ? 0x1 : 0x2));
6411
6412                 {
6413                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6414                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6415                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6416                 }
6417         }
6418
6419         bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6420         bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6421
6422         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6423         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6424                 {
6425                 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6426
6427                 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6428                                MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6429
6430                 /* The GPIO should be swapped if the swap register is
6431                    set and active */
6432                 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6433                 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6434
6435                 /* Select function upon port-swap configuration */
6436                 if (port == 0) {
6437                         offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6438                         aeu_gpio_mask = (swap_val && swap_override) ?
6439                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6440                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6441                 } else {
6442                         offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6443                         aeu_gpio_mask = (swap_val && swap_override) ?
6444                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6445                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6446                 }
6447                 val = REG_RD(bp, offset);
6448                 /* add GPIO3 to group */
6449                 val |= aeu_gpio_mask;
6450                 REG_WR(bp, offset, val);
6451                 }
6452                 break;
6453
6454         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6455         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6456                 /* add SPIO 5 to group 0 */
6457                 {
6458                 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6459                                        MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6460                 val = REG_RD(bp, reg_addr);
6461                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6462                 REG_WR(bp, reg_addr, val);
6463                 }
6464                 break;
6465
6466         default:
6467                 break;
6468         }
6469
6470         bnx2x__link_reset(bp);
6471
6472         return 0;
6473 }
6474
6475 #define ILT_PER_FUNC            (768/2)
6476 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
6477 /* the phys address is shifted right 12 bits and has an added
6478    1=valid bit added to the 53rd bit
6479    then since this is a wide register(TM)
6480    we split it into two 32 bit writes
6481  */
6482 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6483 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
6484 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
6485 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
6486
6487 #define CNIC_ILT_LINES          0
6488
6489 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6490 {
6491         int reg;
6492
6493         if (CHIP_IS_E1H(bp))
6494                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6495         else /* E1 */
6496                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6497
6498         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6499 }
6500
6501 static int bnx2x_init_func(struct bnx2x *bp)
6502 {
6503         int port = BP_PORT(bp);
6504         int func = BP_FUNC(bp);
6505         u32 addr, val;
6506         int i;
6507
6508         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
6509
6510         /* set MSI reconfigure capability */
6511         addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6512         val = REG_RD(bp, addr);
6513         val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6514         REG_WR(bp, addr, val);
6515
6516         i = FUNC_ILT_BASE(func);
6517
6518         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6519         if (CHIP_IS_E1H(bp)) {
6520                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6521                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6522         } else /* E1 */
6523                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6524                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6525
6526
6527         if (CHIP_IS_E1H(bp)) {
6528                 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6529                 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6530                 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6531                 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6532                 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6533                 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6534                 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6535                 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6536                 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
6537
6538                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6539                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6540         }
6541
6542         /* HC init per function */
6543         if (CHIP_IS_E1H(bp)) {
6544                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6545
6546                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6547                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6548         }
6549         bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
6550
6551         /* Reset PCIE errors for debug */
6552         REG_WR(bp, 0x2114, 0xffffffff);
6553         REG_WR(bp, 0x2120, 0xffffffff);
6554
6555         return 0;
6556 }
6557
6558 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6559 {
6560         int i, rc = 0;
6561
6562         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
6563            BP_FUNC(bp), load_code);
6564
6565         bp->dmae_ready = 0;
6566         mutex_init(&bp->dmae_mutex);
6567         rc = bnx2x_gunzip_init(bp);
6568         if (rc)
6569                 return rc;
6570
6571         switch (load_code) {
6572         case FW_MSG_CODE_DRV_LOAD_COMMON:
6573                 rc = bnx2x_init_common(bp);
6574                 if (rc)
6575                         goto init_hw_err;
6576                 /* no break */
6577
6578         case FW_MSG_CODE_DRV_LOAD_PORT:
6579                 bp->dmae_ready = 1;
6580                 rc = bnx2x_init_port(bp);
6581                 if (rc)
6582                         goto init_hw_err;
6583                 /* no break */
6584
6585         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6586                 bp->dmae_ready = 1;
6587                 rc = bnx2x_init_func(bp);
6588                 if (rc)
6589                         goto init_hw_err;
6590                 break;
6591
6592         default:
6593                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6594                 break;
6595         }
6596
6597         if (!BP_NOMCP(bp)) {
6598                 int func = BP_FUNC(bp);
6599
6600                 bp->fw_drv_pulse_wr_seq =
6601                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6602                                  DRV_PULSE_SEQ_MASK);
6603                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6604         }
6605
6606         /* this needs to be done before gunzip end */
6607         bnx2x_zero_def_sb(bp);
6608         for_each_queue(bp, i)
6609                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6610
6611 init_hw_err:
6612         bnx2x_gunzip_end(bp);
6613
6614         return rc;
6615 }
6616
6617 static void bnx2x_free_mem(struct bnx2x *bp)
6618 {
6619
6620 #define BNX2X_PCI_FREE(x, y, size) \
6621         do { \
6622                 if (x) { \
6623                         pci_free_consistent(bp->pdev, size, x, y); \
6624                         x = NULL; \
6625                         y = 0; \
6626                 } \
6627         } while (0)
6628
6629 #define BNX2X_FREE(x) \
6630         do { \
6631                 if (x) { \
6632                         vfree(x); \
6633                         x = NULL; \
6634                 } \
6635         } while (0)
6636
6637         int i;
6638
6639         /* fastpath */
6640         /* Common */
6641         for_each_queue(bp, i) {
6642
6643                 /* status blocks */
6644                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6645                                bnx2x_fp(bp, i, status_blk_mapping),
6646                                sizeof(struct host_status_block));
6647         }
6648         /* Rx */
6649         for_each_rx_queue(bp, i) {
6650
6651                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6652                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6653                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6654                                bnx2x_fp(bp, i, rx_desc_mapping),
6655                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
6656
6657                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6658                                bnx2x_fp(bp, i, rx_comp_mapping),
6659                                sizeof(struct eth_fast_path_rx_cqe) *
6660                                NUM_RCQ_BD);
6661
6662                 /* SGE ring */
6663                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6664                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6665                                bnx2x_fp(bp, i, rx_sge_mapping),
6666                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6667         }
6668         /* Tx */
6669         for_each_tx_queue(bp, i) {
6670
6671                 /* fastpath tx rings: tx_buf tx_desc */
6672                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6673                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6674                                bnx2x_fp(bp, i, tx_desc_mapping),
6675                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6676         }
6677         /* end of fastpath */
6678
6679         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6680                        sizeof(struct host_def_status_block));
6681
6682         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6683                        sizeof(struct bnx2x_slowpath));
6684
6685 #ifdef BCM_ISCSI
6686         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6687         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6688         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6689         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6690 #endif
6691         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6692
6693 #undef BNX2X_PCI_FREE
6694 #undef BNX2X_KFREE
6695 }
6696
6697 static int bnx2x_alloc_mem(struct bnx2x *bp)
6698 {
6699
6700 #define BNX2X_PCI_ALLOC(x, y, size) \
6701         do { \
6702                 x = pci_alloc_consistent(bp->pdev, size, y); \
6703                 if (x == NULL) \
6704                         goto alloc_mem_err; \
6705                 memset(x, 0, size); \
6706         } while (0)
6707
6708 #define BNX2X_ALLOC(x, size) \
6709         do { \
6710                 x = vmalloc(size); \
6711                 if (x == NULL) \
6712                         goto alloc_mem_err; \
6713                 memset(x, 0, size); \
6714         } while (0)
6715
6716         int i;
6717
6718         /* fastpath */
6719         /* Common */
6720         for_each_queue(bp, i) {
6721                 bnx2x_fp(bp, i, bp) = bp;
6722
6723                 /* status blocks */
6724                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6725                                 &bnx2x_fp(bp, i, status_blk_mapping),
6726                                 sizeof(struct host_status_block));
6727         }
6728         /* Rx */
6729         for_each_rx_queue(bp, i) {
6730
6731                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6732                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6733                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6734                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6735                                 &bnx2x_fp(bp, i, rx_desc_mapping),
6736                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6737
6738                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6739                                 &bnx2x_fp(bp, i, rx_comp_mapping),
6740                                 sizeof(struct eth_fast_path_rx_cqe) *
6741                                 NUM_RCQ_BD);
6742
6743                 /* SGE ring */
6744                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6745                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6746                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6747                                 &bnx2x_fp(bp, i, rx_sge_mapping),
6748                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6749         }
6750         /* Tx */
6751         for_each_tx_queue(bp, i) {
6752
6753                 /* fastpath tx rings: tx_buf tx_desc */
6754                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6755                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6756                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6757                                 &bnx2x_fp(bp, i, tx_desc_mapping),
6758                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6759         }
6760         /* end of fastpath */
6761
6762         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6763                         sizeof(struct host_def_status_block));
6764
6765         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6766                         sizeof(struct bnx2x_slowpath));
6767
6768 #ifdef BCM_ISCSI
6769         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6770
6771         /* Initialize T1 */
6772         for (i = 0; i < 64*1024; i += 64) {
6773                 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6774                 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6775         }
6776
6777         /* allocate searcher T2 table
6778            we allocate 1/4 of alloc num for T2
6779           (which is not entered into the ILT) */
6780         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6781
6782         /* Initialize T2 */
6783         for (i = 0; i < 16*1024; i += 64)
6784                 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6785
6786         /* now fixup the last line in the block to point to the next block */
6787         *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6788
6789         /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6790         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6791
6792         /* QM queues (128*MAX_CONN) */
6793         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6794 #endif
6795
6796         /* Slow path ring */
6797         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6798
6799         return 0;
6800
6801 alloc_mem_err:
6802         bnx2x_free_mem(bp);
6803         return -ENOMEM;
6804
6805 #undef BNX2X_PCI_ALLOC
6806 #undef BNX2X_ALLOC
6807 }
6808
6809 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6810 {
6811         int i;
6812
6813         for_each_tx_queue(bp, i) {
6814                 struct bnx2x_fastpath *fp = &bp->fp[i];
6815
6816                 u16 bd_cons = fp->tx_bd_cons;
6817                 u16 sw_prod = fp->tx_pkt_prod;
6818                 u16 sw_cons = fp->tx_pkt_cons;
6819
6820                 while (sw_cons != sw_prod) {
6821                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6822                         sw_cons++;
6823                 }
6824         }
6825 }
6826
6827 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6828 {
6829         int i, j;
6830
6831         for_each_rx_queue(bp, j) {
6832                 struct bnx2x_fastpath *fp = &bp->fp[j];
6833
6834                 for (i = 0; i < NUM_RX_BD; i++) {
6835                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6836                         struct sk_buff *skb = rx_buf->skb;
6837
6838                         if (skb == NULL)
6839                                 continue;
6840
6841                         pci_unmap_single(bp->pdev,
6842                                          pci_unmap_addr(rx_buf, mapping),
6843                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6844
6845                         rx_buf->skb = NULL;
6846                         dev_kfree_skb(skb);
6847                 }
6848                 if (!fp->disable_tpa)
6849                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6850                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
6851                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
6852         }
6853 }
6854
6855 static void bnx2x_free_skbs(struct bnx2x *bp)
6856 {
6857         bnx2x_free_tx_skbs(bp);
6858         bnx2x_free_rx_skbs(bp);
6859 }
6860
6861 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6862 {
6863         int i, offset = 1;
6864
6865         free_irq(bp->msix_table[0].vector, bp->dev);
6866         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6867            bp->msix_table[0].vector);
6868
6869         for_each_queue(bp, i) {
6870                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
6871                    "state %x\n", i, bp->msix_table[i + offset].vector,
6872                    bnx2x_fp(bp, i, state));
6873
6874                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6875         }
6876 }
6877
6878 static void bnx2x_free_irq(struct bnx2x *bp)
6879 {
6880         if (bp->flags & USING_MSIX_FLAG) {
6881                 bnx2x_free_msix_irqs(bp);
6882                 pci_disable_msix(bp->pdev);
6883                 bp->flags &= ~USING_MSIX_FLAG;
6884
6885         } else if (bp->flags & USING_MSI_FLAG) {
6886                 free_irq(bp->pdev->irq, bp->dev);
6887                 pci_disable_msi(bp->pdev);
6888                 bp->flags &= ~USING_MSI_FLAG;
6889
6890         } else
6891                 free_irq(bp->pdev->irq, bp->dev);
6892 }
6893
6894 static int bnx2x_enable_msix(struct bnx2x *bp)
6895 {
6896         int i, rc, offset = 1;
6897         int igu_vec = 0;
6898
6899         bp->msix_table[0].entry = igu_vec;
6900         DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6901
6902         for_each_queue(bp, i) {
6903                 igu_vec = BP_L_ID(bp) + offset + i;
6904                 bp->msix_table[i + offset].entry = igu_vec;
6905                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6906                    "(fastpath #%u)\n", i + offset, igu_vec, i);
6907         }
6908
6909         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6910                              BNX2X_NUM_QUEUES(bp) + offset);
6911         if (rc) {
6912                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
6913                 return rc;
6914         }
6915
6916         bp->flags |= USING_MSIX_FLAG;
6917
6918         return 0;
6919 }
6920
6921 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6922 {
6923         int i, rc, offset = 1;
6924
6925         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6926                          bp->dev->name, bp->dev);
6927         if (rc) {
6928                 BNX2X_ERR("request sp irq failed\n");
6929                 return -EBUSY;
6930         }
6931
6932         for_each_queue(bp, i) {
6933                 struct bnx2x_fastpath *fp = &bp->fp[i];
6934
6935                 if (i < bp->num_rx_queues)
6936                         sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
6937                 else
6938                         sprintf(fp->name, "%s-tx-%d",
6939                                 bp->dev->name, i - bp->num_rx_queues);
6940
6941                 rc = request_irq(bp->msix_table[i + offset].vector,
6942                                  bnx2x_msix_fp_int, 0, fp->name, fp);
6943                 if (rc) {
6944                         BNX2X_ERR("request fp #%d irq failed  rc %d\n", i, rc);
6945                         bnx2x_free_msix_irqs(bp);
6946                         return -EBUSY;
6947                 }
6948
6949                 fp->state = BNX2X_FP_STATE_IRQ;
6950         }
6951
6952         i = BNX2X_NUM_QUEUES(bp);
6953         printk(KERN_INFO PFX "%s: using MSI-X  IRQs: sp %d  fp[%d] %d"
6954                " ... fp[%d] %d\n",
6955                bp->dev->name, bp->msix_table[0].vector,
6956                0, bp->msix_table[offset].vector,
6957                i - 1, bp->msix_table[offset + i - 1].vector);
6958
6959         return 0;
6960 }
6961
6962 static int bnx2x_enable_msi(struct bnx2x *bp)
6963 {
6964         int rc;
6965
6966         rc = pci_enable_msi(bp->pdev);
6967         if (rc) {
6968                 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6969                 return -1;
6970         }
6971         bp->flags |= USING_MSI_FLAG;
6972
6973         return 0;
6974 }
6975
6976 static int bnx2x_req_irq(struct bnx2x *bp)
6977 {
6978         unsigned long flags;
6979         int rc;
6980
6981         if (bp->flags & USING_MSI_FLAG)
6982                 flags = 0;
6983         else
6984                 flags = IRQF_SHARED;
6985
6986         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
6987                          bp->dev->name, bp->dev);
6988         if (!rc)
6989                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6990
6991         return rc;
6992 }
6993
6994 static void bnx2x_napi_enable(struct bnx2x *bp)
6995 {
6996         int i;
6997
6998         for_each_rx_queue(bp, i)
6999                 napi_enable(&bnx2x_fp(bp, i, napi));
7000 }
7001
7002 static void bnx2x_napi_disable(struct bnx2x *bp)
7003 {
7004         int i;
7005
7006         for_each_rx_queue(bp, i)
7007                 napi_disable(&bnx2x_fp(bp, i, napi));
7008 }
7009
7010 static void bnx2x_netif_start(struct bnx2x *bp)
7011 {
7012         int intr_sem;
7013
7014         intr_sem = atomic_dec_and_test(&bp->intr_sem);
7015         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7016
7017         if (intr_sem) {
7018                 if (netif_running(bp->dev)) {
7019                         bnx2x_napi_enable(bp);
7020                         bnx2x_int_enable(bp);
7021                         if (bp->state == BNX2X_STATE_OPEN)
7022                                 netif_tx_wake_all_queues(bp->dev);
7023                 }
7024         }
7025 }
7026
7027 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
7028 {
7029         bnx2x_int_disable_sync(bp, disable_hw);
7030         bnx2x_napi_disable(bp);
7031         netif_tx_disable(bp->dev);
7032         bp->dev->trans_start = jiffies; /* prevent tx timeout */
7033 }
7034
7035 /*
7036  * Init service functions
7037  */
7038
7039 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
7040 {
7041         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
7042         int port = BP_PORT(bp);
7043
7044         /* CAM allocation
7045          * unicasts 0-31:port0 32-63:port1
7046          * multicast 64-127:port0 128-191:port1
7047          */
7048         config->hdr.length = 2;
7049         config->hdr.offset = port ? 32 : 0;
7050         config->hdr.client_id = bp->fp->cl_id;
7051         config->hdr.reserved1 = 0;
7052
7053         /* primary MAC */
7054         config->config_table[0].cam_entry.msb_mac_addr =
7055                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
7056         config->config_table[0].cam_entry.middle_mac_addr =
7057                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
7058         config->config_table[0].cam_entry.lsb_mac_addr =
7059                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
7060         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
7061         if (set)
7062                 config->config_table[0].target_table_entry.flags = 0;
7063         else
7064                 CAM_INVALIDATE(config->config_table[0]);
7065         config->config_table[0].target_table_entry.clients_bit_vector =
7066                                                 cpu_to_le32(1 << BP_L_ID(bp));
7067         config->config_table[0].target_table_entry.vlan_id = 0;
7068
7069         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7070            (set ? "setting" : "clearing"),
7071            config->config_table[0].cam_entry.msb_mac_addr,
7072            config->config_table[0].cam_entry.middle_mac_addr,
7073            config->config_table[0].cam_entry.lsb_mac_addr);
7074
7075         /* broadcast */
7076         config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
7077         config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
7078         config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
7079         config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7080         if (set)
7081                 config->config_table[1].target_table_entry.flags =
7082                                 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7083         else
7084                 CAM_INVALIDATE(config->config_table[1]);
7085         config->config_table[1].target_table_entry.clients_bit_vector =
7086                                                 cpu_to_le32(1 << BP_L_ID(bp));
7087         config->config_table[1].target_table_entry.vlan_id = 0;
7088
7089         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7090                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7091                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7092 }
7093
7094 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
7095 {
7096         struct mac_configuration_cmd_e1h *config =
7097                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7098
7099         /* CAM allocation for E1H
7100          * unicasts: by func number
7101          * multicast: 20+FUNC*20, 20 each
7102          */
7103         config->hdr.length = 1;
7104         config->hdr.offset = BP_FUNC(bp);
7105         config->hdr.client_id = bp->fp->cl_id;
7106         config->hdr.reserved1 = 0;
7107
7108         /* primary MAC */
7109         config->config_table[0].msb_mac_addr =
7110                                         swab16(*(u16 *)&bp->dev->dev_addr[0]);
7111         config->config_table[0].middle_mac_addr =
7112                                         swab16(*(u16 *)&bp->dev->dev_addr[2]);
7113         config->config_table[0].lsb_mac_addr =
7114                                         swab16(*(u16 *)&bp->dev->dev_addr[4]);
7115         config->config_table[0].clients_bit_vector =
7116                                         cpu_to_le32(1 << BP_L_ID(bp));
7117         config->config_table[0].vlan_id = 0;
7118         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
7119         if (set)
7120                 config->config_table[0].flags = BP_PORT(bp);
7121         else
7122                 config->config_table[0].flags =
7123                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
7124
7125         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID %d\n",
7126            (set ? "setting" : "clearing"),
7127            config->config_table[0].msb_mac_addr,
7128            config->config_table[0].middle_mac_addr,
7129            config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
7130
7131         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7132                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7133                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7134 }
7135
7136 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7137                              int *state_p, int poll)
7138 {
7139         /* can take a while if any port is running */
7140         int cnt = 5000;
7141
7142         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7143            poll ? "polling" : "waiting", state, idx);
7144
7145         might_sleep();
7146         while (cnt--) {
7147                 if (poll) {
7148                         bnx2x_rx_int(bp->fp, 10);
7149                         /* if index is different from 0
7150                          * the reply for some commands will
7151                          * be on the non default queue
7152                          */
7153                         if (idx)
7154                                 bnx2x_rx_int(&bp->fp[idx], 10);
7155                 }
7156
7157                 mb(); /* state is changed by bnx2x_sp_event() */
7158                 if (*state_p == state) {
7159 #ifdef BNX2X_STOP_ON_ERROR
7160                         DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
7161 #endif
7162                         return 0;
7163                 }
7164
7165                 msleep(1);
7166
7167                 if (bp->panic)
7168                         return -EIO;
7169         }
7170
7171         /* timeout! */
7172         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7173                   poll ? "polling" : "waiting", state, idx);
7174 #ifdef BNX2X_STOP_ON_ERROR
7175         bnx2x_panic();
7176 #endif
7177
7178         return -EBUSY;
7179 }
7180
7181 static int bnx2x_setup_leading(struct bnx2x *bp)
7182 {
7183         int rc;
7184
7185         /* reset IGU state */
7186         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7187
7188         /* SETUP ramrod */
7189         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7190
7191         /* Wait for completion */
7192         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7193
7194         return rc;
7195 }
7196
7197 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7198 {
7199         struct bnx2x_fastpath *fp = &bp->fp[index];
7200
7201         /* reset IGU state */
7202         bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7203
7204         /* SETUP ramrod */
7205         fp->state = BNX2X_FP_STATE_OPENING;
7206         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7207                       fp->cl_id, 0);
7208
7209         /* Wait for completion */
7210         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7211                                  &(fp->state), 0);
7212 }
7213
7214 static int bnx2x_poll(struct napi_struct *napi, int budget);
7215
7216 static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out,
7217                                     int *num_tx_queues_out)
7218 {
7219         int _num_rx_queues = 0, _num_tx_queues = 0;
7220
7221         switch (bp->multi_mode) {
7222         case ETH_RSS_MODE_DISABLED:
7223                 _num_rx_queues = 1;
7224                 _num_tx_queues = 1;
7225                 break;
7226
7227         case ETH_RSS_MODE_REGULAR:
7228                 if (num_rx_queues)
7229                         _num_rx_queues = min_t(u32, num_rx_queues,
7230                                                BNX2X_MAX_QUEUES(bp));
7231                 else
7232                         _num_rx_queues = min_t(u32, num_online_cpus(),
7233                                                BNX2X_MAX_QUEUES(bp));
7234
7235                 if (num_tx_queues)
7236                         _num_tx_queues = min_t(u32, num_tx_queues,
7237                                                BNX2X_MAX_QUEUES(bp));
7238                 else
7239                         _num_tx_queues = min_t(u32, num_online_cpus(),
7240                                                BNX2X_MAX_QUEUES(bp));
7241
7242                 /* There must be not more Tx queues than Rx queues */
7243                 if (_num_tx_queues > _num_rx_queues) {
7244                         BNX2X_ERR("number of tx queues (%d) > "
7245                                   "number of rx queues (%d)"
7246                                   "  defaulting to %d\n",
7247                                   _num_tx_queues, _num_rx_queues,
7248                                   _num_rx_queues);
7249                         _num_tx_queues = _num_rx_queues;
7250                 }
7251                 break;
7252
7253
7254         default:
7255                 _num_rx_queues = 1;
7256                 _num_tx_queues = 1;
7257                 break;
7258         }
7259
7260         *num_rx_queues_out = _num_rx_queues;
7261         *num_tx_queues_out = _num_tx_queues;
7262 }
7263
7264 static int bnx2x_set_int_mode(struct bnx2x *bp)
7265 {
7266         int rc = 0;
7267
7268         switch (int_mode) {
7269         case INT_MODE_INTx:
7270         case INT_MODE_MSI:
7271                 bp->num_rx_queues = 1;
7272                 bp->num_tx_queues = 1;
7273                 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7274                 break;
7275
7276         case INT_MODE_MSIX:
7277         default:
7278                 /* Set interrupt mode according to bp->multi_mode value */
7279                 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues,
7280                                         &bp->num_tx_queues);
7281
7282                 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n",
7283                    bp->num_rx_queues, bp->num_tx_queues);
7284
7285                 /* if we can't use MSI-X we only need one fp,
7286                  * so try to enable MSI-X with the requested number of fp's
7287                  * and fallback to MSI or legacy INTx with one fp
7288                  */
7289                 rc = bnx2x_enable_msix(bp);
7290                 if (rc) {
7291                         /* failed to enable MSI-X */
7292                         if (bp->multi_mode)
7293                                 BNX2X_ERR("Multi requested but failed to "
7294                                           "enable MSI-X (rx %d tx %d), "
7295                                           "set number of queues to 1\n",
7296                                           bp->num_rx_queues, bp->num_tx_queues);
7297                         bp->num_rx_queues = 1;
7298                         bp->num_tx_queues = 1;
7299                 }
7300                 break;
7301         }
7302         bp->dev->real_num_tx_queues = bp->num_tx_queues;
7303         return rc;
7304 }
7305
7306
7307 /* must be called with rtnl_lock */
7308 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7309 {
7310         u32 load_code;
7311         int i, rc;
7312
7313 #ifdef BNX2X_STOP_ON_ERROR
7314         if (unlikely(bp->panic))
7315                 return -EPERM;
7316 #endif
7317
7318         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7319
7320         rc = bnx2x_set_int_mode(bp);
7321
7322         if (bnx2x_alloc_mem(bp))
7323                 return -ENOMEM;
7324
7325         for_each_rx_queue(bp, i)
7326                 bnx2x_fp(bp, i, disable_tpa) =
7327                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
7328
7329         for_each_rx_queue(bp, i)
7330                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7331                                bnx2x_poll, 128);
7332
7333         bnx2x_napi_enable(bp);
7334
7335         if (bp->flags & USING_MSIX_FLAG) {
7336                 rc = bnx2x_req_msix_irqs(bp);
7337                 if (rc) {
7338                         pci_disable_msix(bp->pdev);
7339                         goto load_error1;
7340                 }
7341         } else {
7342                 /* Fall to INTx if failed to enable MSI-X due to lack of
7343                    memory (in bnx2x_set_int_mode()) */
7344                 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7345                         bnx2x_enable_msi(bp);
7346                 bnx2x_ack_int(bp);
7347                 rc = bnx2x_req_irq(bp);
7348                 if (rc) {
7349                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
7350                         if (bp->flags & USING_MSI_FLAG)
7351                                 pci_disable_msi(bp->pdev);
7352                         goto load_error1;
7353                 }
7354                 if (bp->flags & USING_MSI_FLAG) {
7355                         bp->dev->irq = bp->pdev->irq;
7356                         printk(KERN_INFO PFX "%s: using MSI  IRQ %d\n",
7357                                bp->dev->name, bp->pdev->irq);
7358                 }
7359         }
7360
7361         /* Send LOAD_REQUEST command to MCP
7362            Returns the type of LOAD command:
7363            if it is the first port to be initialized
7364            common blocks should be initialized, otherwise - not
7365         */
7366         if (!BP_NOMCP(bp)) {
7367                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7368                 if (!load_code) {
7369                         BNX2X_ERR("MCP response failure, aborting\n");
7370                         rc = -EBUSY;
7371                         goto load_error2;
7372                 }
7373                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7374                         rc = -EBUSY; /* other port in diagnostic mode */
7375                         goto load_error2;
7376                 }
7377
7378         } else {
7379                 int port = BP_PORT(bp);
7380
7381                 DP(NETIF_MSG_IFUP, "NO MCP - load counts      %d, %d, %d\n",
7382                    load_count[0], load_count[1], load_count[2]);
7383                 load_count[0]++;
7384                 load_count[1 + port]++;
7385                 DP(NETIF_MSG_IFUP, "NO MCP - new load counts  %d, %d, %d\n",
7386                    load_count[0], load_count[1], load_count[2]);
7387                 if (load_count[0] == 1)
7388                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7389                 else if (load_count[1 + port] == 1)
7390                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7391                 else
7392                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7393         }
7394
7395         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7396             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7397                 bp->port.pmf = 1;
7398         else
7399                 bp->port.pmf = 0;
7400         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7401
7402         /* Initialize HW */
7403         rc = bnx2x_init_hw(bp, load_code);
7404         if (rc) {
7405                 BNX2X_ERR("HW init failed, aborting\n");
7406                 goto load_error2;
7407         }
7408
7409         /* Setup NIC internals and enable interrupts */
7410         bnx2x_nic_init(bp, load_code);
7411
7412         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7413             (bp->common.shmem2_base))
7414                 SHMEM2_WR(bp, dcc_support,
7415                           (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7416                            SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7417
7418         /* Send LOAD_DONE command to MCP */
7419         if (!BP_NOMCP(bp)) {
7420                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7421                 if (!load_code) {
7422                         BNX2X_ERR("MCP response failure, aborting\n");
7423                         rc = -EBUSY;
7424                         goto load_error3;
7425                 }
7426         }
7427
7428         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7429
7430         rc = bnx2x_setup_leading(bp);
7431         if (rc) {
7432                 BNX2X_ERR("Setup leading failed!\n");
7433 #ifndef BNX2X_STOP_ON_ERROR
7434                 goto load_error3;
7435 #else
7436                 bp->panic = 1;
7437                 return -EBUSY;
7438 #endif
7439         }
7440
7441         if (CHIP_IS_E1H(bp))
7442                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
7443                         DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
7444                         bp->state = BNX2X_STATE_DISABLED;
7445                 }
7446
7447         if (bp->state == BNX2X_STATE_OPEN) {
7448                 for_each_nondefault_queue(bp, i) {
7449                         rc = bnx2x_setup_multi(bp, i);
7450                         if (rc)
7451                                 goto load_error3;
7452                 }
7453
7454                 if (CHIP_IS_E1(bp))
7455                         bnx2x_set_mac_addr_e1(bp, 1);
7456                 else
7457                         bnx2x_set_mac_addr_e1h(bp, 1);
7458         }
7459
7460         if (bp->port.pmf)
7461                 bnx2x_initial_phy_init(bp, load_mode);
7462
7463         /* Start fast path */
7464         switch (load_mode) {
7465         case LOAD_NORMAL:
7466                 if (bp->state == BNX2X_STATE_OPEN) {
7467                         /* Tx queue should be only reenabled */
7468                         netif_tx_wake_all_queues(bp->dev);
7469                 }
7470                 /* Initialize the receive filter. */
7471                 bnx2x_set_rx_mode(bp->dev);
7472                 break;
7473
7474         case LOAD_OPEN:
7475                 netif_tx_start_all_queues(bp->dev);
7476                 if (bp->state != BNX2X_STATE_OPEN)
7477                         netif_tx_disable(bp->dev);
7478                 /* Initialize the receive filter. */
7479                 bnx2x_set_rx_mode(bp->dev);
7480                 break;
7481
7482         case LOAD_DIAG:
7483                 /* Initialize the receive filter. */
7484                 bnx2x_set_rx_mode(bp->dev);
7485                 bp->state = BNX2X_STATE_DIAG;
7486                 break;
7487
7488         default:
7489                 break;
7490         }
7491
7492         if (!bp->port.pmf)
7493                 bnx2x__link_status_update(bp);
7494
7495         /* start the timer */
7496         mod_timer(&bp->timer, jiffies + bp->current_interval);
7497
7498
7499         return 0;
7500
7501 load_error3:
7502         bnx2x_int_disable_sync(bp, 1);
7503         if (!BP_NOMCP(bp)) {
7504                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7505                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7506         }
7507         bp->port.pmf = 0;
7508         /* Free SKBs, SGEs, TPA pool and driver internals */
7509         bnx2x_free_skbs(bp);
7510         for_each_rx_queue(bp, i)
7511                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7512 load_error2:
7513         /* Release IRQs */
7514         bnx2x_free_irq(bp);
7515 load_error1:
7516         bnx2x_napi_disable(bp);
7517         for_each_rx_queue(bp, i)
7518                 netif_napi_del(&bnx2x_fp(bp, i, napi));
7519         bnx2x_free_mem(bp);
7520
7521         return rc;
7522 }
7523
7524 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7525 {
7526         struct bnx2x_fastpath *fp = &bp->fp[index];
7527         int rc;
7528
7529         /* halt the connection */
7530         fp->state = BNX2X_FP_STATE_HALTING;
7531         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7532
7533         /* Wait for completion */
7534         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7535                                &(fp->state), 1);
7536         if (rc) /* timeout */
7537                 return rc;
7538
7539         /* delete cfc entry */
7540         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7541
7542         /* Wait for completion */
7543         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7544                                &(fp->state), 1);
7545         return rc;
7546 }
7547
7548 static int bnx2x_stop_leading(struct bnx2x *bp)
7549 {
7550         __le16 dsb_sp_prod_idx;
7551         /* if the other port is handling traffic,
7552            this can take a lot of time */
7553         int cnt = 500;
7554         int rc;
7555
7556         might_sleep();
7557
7558         /* Send HALT ramrod */
7559         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7560         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7561
7562         /* Wait for completion */
7563         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7564                                &(bp->fp[0].state), 1);
7565         if (rc) /* timeout */
7566                 return rc;
7567
7568         dsb_sp_prod_idx = *bp->dsb_sp_prod;
7569
7570         /* Send PORT_DELETE ramrod */
7571         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7572
7573         /* Wait for completion to arrive on default status block
7574            we are going to reset the chip anyway
7575            so there is not much to do if this times out
7576          */
7577         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7578                 if (!cnt) {
7579                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7580                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7581                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
7582 #ifdef BNX2X_STOP_ON_ERROR
7583                         bnx2x_panic();
7584 #endif
7585                         rc = -EBUSY;
7586                         break;
7587                 }
7588                 cnt--;
7589                 msleep(1);
7590                 rmb(); /* Refresh the dsb_sp_prod */
7591         }
7592         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7593         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7594
7595         return rc;
7596 }
7597
7598 static void bnx2x_reset_func(struct bnx2x *bp)
7599 {
7600         int port = BP_PORT(bp);
7601         int func = BP_FUNC(bp);
7602         int base, i;
7603
7604         /* Configure IGU */
7605         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7606         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7607
7608         /* Clear ILT */
7609         base = FUNC_ILT_BASE(func);
7610         for (i = base; i < base + ILT_PER_FUNC; i++)
7611                 bnx2x_ilt_wr(bp, i, 0);
7612 }
7613
7614 static void bnx2x_reset_port(struct bnx2x *bp)
7615 {
7616         int port = BP_PORT(bp);
7617         u32 val;
7618
7619         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7620
7621         /* Do not rcv packets to BRB */
7622         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7623         /* Do not direct rcv packets that are not for MCP to the BRB */
7624         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7625                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7626
7627         /* Configure AEU */
7628         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7629
7630         msleep(100);
7631         /* Check for BRB port occupancy */
7632         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7633         if (val)
7634                 DP(NETIF_MSG_IFDOWN,
7635                    "BRB1 is not empty  %d blocks are occupied\n", val);
7636
7637         /* TODO: Close Doorbell port? */
7638 }
7639
7640 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7641 {
7642         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
7643            BP_FUNC(bp), reset_code);
7644
7645         switch (reset_code) {
7646         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7647                 bnx2x_reset_port(bp);
7648                 bnx2x_reset_func(bp);
7649                 bnx2x_reset_common(bp);
7650                 break;
7651
7652         case FW_MSG_CODE_DRV_UNLOAD_PORT:
7653                 bnx2x_reset_port(bp);
7654                 bnx2x_reset_func(bp);
7655                 break;
7656
7657         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7658                 bnx2x_reset_func(bp);
7659                 break;
7660
7661         default:
7662                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7663                 break;
7664         }
7665 }
7666
7667 /* must be called with rtnl_lock */
7668 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7669 {
7670         int port = BP_PORT(bp);
7671         u32 reset_code = 0;
7672         int i, cnt, rc;
7673
7674         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7675
7676         /* Set "drop all" */
7677         bp->rx_mode = BNX2X_RX_MODE_NONE;
7678         bnx2x_set_storm_rx_mode(bp);
7679
7680         /* Disable HW interrupts, NAPI and Tx */
7681         bnx2x_netif_stop(bp, 1);
7682
7683         del_timer_sync(&bp->timer);
7684         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7685                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7686         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7687
7688         /* Release IRQs */
7689         bnx2x_free_irq(bp);
7690
7691         /* Wait until tx fastpath tasks complete */
7692         for_each_tx_queue(bp, i) {
7693                 struct bnx2x_fastpath *fp = &bp->fp[i];
7694
7695                 cnt = 1000;
7696                 while (bnx2x_has_tx_work_unload(fp)) {
7697
7698                         bnx2x_tx_int(fp);
7699                         if (!cnt) {
7700                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
7701                                           i);
7702 #ifdef BNX2X_STOP_ON_ERROR
7703                                 bnx2x_panic();
7704                                 return -EBUSY;
7705 #else
7706                                 break;
7707 #endif
7708                         }
7709                         cnt--;
7710                         msleep(1);
7711                 }
7712         }
7713         /* Give HW time to discard old tx messages */
7714         msleep(1);
7715
7716         if (CHIP_IS_E1(bp)) {
7717                 struct mac_configuration_cmd *config =
7718                                                 bnx2x_sp(bp, mcast_config);
7719
7720                 bnx2x_set_mac_addr_e1(bp, 0);
7721
7722                 for (i = 0; i < config->hdr.length; i++)
7723                         CAM_INVALIDATE(config->config_table[i]);
7724
7725                 config->hdr.length = i;
7726                 if (CHIP_REV_IS_SLOW(bp))
7727                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7728                 else
7729                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7730                 config->hdr.client_id = bp->fp->cl_id;
7731                 config->hdr.reserved1 = 0;
7732
7733                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7734                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7735                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7736
7737         } else { /* E1H */
7738                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7739
7740                 bnx2x_set_mac_addr_e1h(bp, 0);
7741
7742                 for (i = 0; i < MC_HASH_SIZE; i++)
7743                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7744
7745                 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7746         }
7747
7748         if (unload_mode == UNLOAD_NORMAL)
7749                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7750
7751         else if (bp->flags & NO_WOL_FLAG)
7752                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7753
7754         else if (bp->wol) {
7755                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7756                 u8 *mac_addr = bp->dev->dev_addr;
7757                 u32 val;
7758                 /* The mac address is written to entries 1-4 to
7759                    preserve entry 0 which is used by the PMF */
7760                 u8 entry = (BP_E1HVN(bp) + 1)*8;
7761
7762                 val = (mac_addr[0] << 8) | mac_addr[1];
7763                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7764
7765                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7766                       (mac_addr[4] << 8) | mac_addr[5];
7767                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7768
7769                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7770
7771         } else
7772                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7773
7774         /* Close multi and leading connections
7775            Completions for ramrods are collected in a synchronous way */
7776         for_each_nondefault_queue(bp, i)
7777                 if (bnx2x_stop_multi(bp, i))
7778                         goto unload_error;
7779
7780         rc = bnx2x_stop_leading(bp);
7781         if (rc) {
7782                 BNX2X_ERR("Stop leading failed!\n");
7783 #ifdef BNX2X_STOP_ON_ERROR
7784                 return -EBUSY;
7785 #else
7786                 goto unload_error;
7787 #endif
7788         }
7789
7790 unload_error:
7791         if (!BP_NOMCP(bp))
7792                 reset_code = bnx2x_fw_command(bp, reset_code);
7793         else {
7794                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts      %d, %d, %d\n",
7795                    load_count[0], load_count[1], load_count[2]);
7796                 load_count[0]--;
7797                 load_count[1 + port]--;
7798                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts  %d, %d, %d\n",
7799                    load_count[0], load_count[1], load_count[2]);
7800                 if (load_count[0] == 0)
7801                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7802                 else if (load_count[1 + port] == 0)
7803                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7804                 else
7805                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7806         }
7807
7808         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7809             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7810                 bnx2x__link_reset(bp);
7811
7812         /* Reset the chip */
7813         bnx2x_reset_chip(bp, reset_code);
7814
7815         /* Report UNLOAD_DONE to MCP */
7816         if (!BP_NOMCP(bp))
7817                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7818
7819         bp->port.pmf = 0;
7820
7821         /* Free SKBs, SGEs, TPA pool and driver internals */
7822         bnx2x_free_skbs(bp);
7823         for_each_rx_queue(bp, i)
7824                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7825         for_each_rx_queue(bp, i)
7826                 netif_napi_del(&bnx2x_fp(bp, i, napi));
7827         bnx2x_free_mem(bp);
7828
7829         bp->state = BNX2X_STATE_CLOSED;
7830
7831         netif_carrier_off(bp->dev);
7832
7833         return 0;
7834 }
7835
7836 static void bnx2x_reset_task(struct work_struct *work)
7837 {
7838         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7839
7840 #ifdef BNX2X_STOP_ON_ERROR
7841         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7842                   " so reset not done to allow debug dump,\n"
7843                   " you will need to reboot when done\n");
7844         return;
7845 #endif
7846
7847         rtnl_lock();
7848
7849         if (!netif_running(bp->dev))
7850                 goto reset_task_exit;
7851
7852         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7853         bnx2x_nic_load(bp, LOAD_NORMAL);
7854
7855 reset_task_exit:
7856         rtnl_unlock();
7857 }
7858
7859 /* end of nic load/unload */
7860
7861 /* ethtool_ops */
7862
7863 /*
7864  * Init service functions
7865  */
7866
7867 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7868 {
7869         switch (func) {
7870         case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7871         case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7872         case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7873         case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7874         case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7875         case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7876         case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7877         case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7878         default:
7879                 BNX2X_ERR("Unsupported function index: %d\n", func);
7880                 return (u32)(-1);
7881         }
7882 }
7883
7884 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7885 {
7886         u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7887
7888         /* Flush all outstanding writes */
7889         mmiowb();
7890
7891         /* Pretend to be function 0 */
7892         REG_WR(bp, reg, 0);
7893         /* Flush the GRC transaction (in the chip) */
7894         new_val = REG_RD(bp, reg);
7895         if (new_val != 0) {
7896                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7897                           new_val);
7898                 BUG();
7899         }
7900
7901         /* From now we are in the "like-E1" mode */
7902         bnx2x_int_disable(bp);
7903
7904         /* Flush all outstanding writes */
7905         mmiowb();
7906
7907         /* Restore the original funtion settings */
7908         REG_WR(bp, reg, orig_func);
7909         new_val = REG_RD(bp, reg);
7910         if (new_val != orig_func) {
7911                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7912                           orig_func, new_val);
7913                 BUG();
7914         }
7915 }
7916
7917 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7918 {
7919         if (CHIP_IS_E1H(bp))
7920                 bnx2x_undi_int_disable_e1h(bp, func);
7921         else
7922                 bnx2x_int_disable(bp);
7923 }
7924
7925 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7926 {
7927         u32 val;
7928
7929         /* Check if there is any driver already loaded */
7930         val = REG_RD(bp, MISC_REG_UNPREPARED);
7931         if (val == 0x1) {
7932                 /* Check if it is the UNDI driver
7933                  * UNDI driver initializes CID offset for normal bell to 0x7
7934                  */
7935                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7936                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7937                 if (val == 0x7) {
7938                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7939                         /* save our func */
7940                         int func = BP_FUNC(bp);
7941                         u32 swap_en;
7942                         u32 swap_val;
7943
7944                         /* clear the UNDI indication */
7945                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7946
7947                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
7948
7949                         /* try unload UNDI on port 0 */
7950                         bp->func = 0;
7951                         bp->fw_seq =
7952                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7953                                 DRV_MSG_SEQ_NUMBER_MASK);
7954                         reset_code = bnx2x_fw_command(bp, reset_code);
7955
7956                         /* if UNDI is loaded on the other port */
7957                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7958
7959                                 /* send "DONE" for previous unload */
7960                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7961
7962                                 /* unload UNDI on port 1 */
7963                                 bp->func = 1;
7964                                 bp->fw_seq =
7965                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7966                                         DRV_MSG_SEQ_NUMBER_MASK);
7967                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7968
7969                                 bnx2x_fw_command(bp, reset_code);
7970                         }
7971
7972                         /* now it's safe to release the lock */
7973                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7974
7975                         bnx2x_undi_int_disable(bp, func);
7976
7977                         /* close input traffic and wait for it */
7978                         /* Do not rcv packets to BRB */
7979                         REG_WR(bp,
7980                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7981                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7982                         /* Do not direct rcv packets that are not for MCP to
7983                          * the BRB */
7984                         REG_WR(bp,
7985                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7986                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7987                         /* clear AEU */
7988                         REG_WR(bp,
7989                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7990                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7991                         msleep(10);
7992
7993                         /* save NIG port swap info */
7994                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7995                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7996                         /* reset device */
7997                         REG_WR(bp,
7998                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7999                                0xd3ffffff);
8000                         REG_WR(bp,
8001                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8002                                0x1403);
8003                         /* take the NIG out of reset and restore swap values */
8004                         REG_WR(bp,
8005                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
8006                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
8007                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
8008                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
8009
8010                         /* send unload done to the MCP */
8011                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8012
8013                         /* restore our func and fw_seq */
8014                         bp->func = func;
8015                         bp->fw_seq =
8016                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8017                                 DRV_MSG_SEQ_NUMBER_MASK);
8018
8019                 } else
8020                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8021         }
8022 }
8023
8024 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8025 {
8026         u32 val, val2, val3, val4, id;
8027         u16 pmc;
8028
8029         /* Get the chip revision id and number. */
8030         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
8031         val = REG_RD(bp, MISC_REG_CHIP_NUM);
8032         id = ((val & 0xffff) << 16);
8033         val = REG_RD(bp, MISC_REG_CHIP_REV);
8034         id |= ((val & 0xf) << 12);
8035         val = REG_RD(bp, MISC_REG_CHIP_METAL);
8036         id |= ((val & 0xff) << 4);
8037         val = REG_RD(bp, MISC_REG_BOND_ID);
8038         id |= (val & 0xf);
8039         bp->common.chip_id = id;
8040         bp->link_params.chip_id = bp->common.chip_id;
8041         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
8042
8043         val = (REG_RD(bp, 0x2874) & 0x55);
8044         if ((bp->common.chip_id & 0x1) ||
8045             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
8046                 bp->flags |= ONE_PORT_FLAG;
8047                 BNX2X_DEV_INFO("single port device\n");
8048         }
8049
8050         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8051         bp->common.flash_size = (NVRAM_1MB_SIZE <<
8052                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
8053         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8054                        bp->common.flash_size, bp->common.flash_size);
8055
8056         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8057         bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
8058         bp->link_params.shmem_base = bp->common.shmem_base;
8059         BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
8060                        bp->common.shmem_base, bp->common.shmem2_base);
8061
8062         if (!bp->common.shmem_base ||
8063             (bp->common.shmem_base < 0xA0000) ||
8064             (bp->common.shmem_base >= 0xC0000)) {
8065                 BNX2X_DEV_INFO("MCP not active\n");
8066                 bp->flags |= NO_MCP_FLAG;
8067                 return;
8068         }
8069
8070         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8071         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8072                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8073                 BNX2X_ERR("BAD MCP validity signature\n");
8074
8075         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
8076         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
8077
8078         bp->link_params.hw_led_mode = ((bp->common.hw_config &
8079                                         SHARED_HW_CFG_LED_MODE_MASK) >>
8080                                        SHARED_HW_CFG_LED_MODE_SHIFT);
8081
8082         bp->link_params.feature_config_flags = 0;
8083         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8084         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8085                 bp->link_params.feature_config_flags |=
8086                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8087         else
8088                 bp->link_params.feature_config_flags &=
8089                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8090
8091         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8092         bp->common.bc_ver = val;
8093         BNX2X_DEV_INFO("bc_ver %X\n", val);
8094         if (val < BNX2X_BC_VER) {
8095                 /* for now only warn
8096                  * later we might need to enforce this */
8097                 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8098                           " please upgrade BC\n", BNX2X_BC_VER, val);
8099         }
8100         bp->link_params.feature_config_flags |=
8101                 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8102                 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
8103
8104         if (BP_E1HVN(bp) == 0) {
8105                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8106                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8107         } else {
8108                 /* no WOL capability for E1HVN != 0 */
8109                 bp->flags |= NO_WOL_FLAG;
8110         }
8111         BNX2X_DEV_INFO("%sWoL capable\n",
8112                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
8113
8114         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8115         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8116         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8117         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8118
8119         printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8120                val, val2, val3, val4);
8121 }
8122
8123 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8124                                                     u32 switch_cfg)
8125 {
8126         int port = BP_PORT(bp);
8127         u32 ext_phy_type;
8128
8129         switch (switch_cfg) {
8130         case SWITCH_CFG_1G:
8131                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8132
8133                 ext_phy_type =
8134                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8135                 switch (ext_phy_type) {
8136                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8137                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8138                                        ext_phy_type);
8139
8140                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8141                                                SUPPORTED_10baseT_Full |
8142                                                SUPPORTED_100baseT_Half |
8143                                                SUPPORTED_100baseT_Full |
8144                                                SUPPORTED_1000baseT_Full |
8145                                                SUPPORTED_2500baseX_Full |
8146                                                SUPPORTED_TP |
8147                                                SUPPORTED_FIBRE |
8148                                                SUPPORTED_Autoneg |
8149                                                SUPPORTED_Pause |
8150                                                SUPPORTED_Asym_Pause);
8151                         break;
8152
8153                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8154                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8155                                        ext_phy_type);
8156
8157                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8158                                                SUPPORTED_10baseT_Full |
8159                                                SUPPORTED_100baseT_Half |
8160                                                SUPPORTED_100baseT_Full |
8161                                                SUPPORTED_1000baseT_Full |
8162                                                SUPPORTED_TP |
8163                                                SUPPORTED_FIBRE |
8164                                                SUPPORTED_Autoneg |
8165                                                SUPPORTED_Pause |
8166                                                SUPPORTED_Asym_Pause);
8167                         break;
8168
8169                 default:
8170                         BNX2X_ERR("NVRAM config error. "
8171                                   "BAD SerDes ext_phy_config 0x%x\n",
8172                                   bp->link_params.ext_phy_config);
8173                         return;
8174                 }
8175
8176                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8177                                            port*0x10);
8178                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8179                 break;
8180
8181         case SWITCH_CFG_10G:
8182                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8183
8184                 ext_phy_type =
8185                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8186                 switch (ext_phy_type) {
8187                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8188                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8189                                        ext_phy_type);
8190
8191                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8192                                                SUPPORTED_10baseT_Full |
8193                                                SUPPORTED_100baseT_Half |
8194                                                SUPPORTED_100baseT_Full |
8195                                                SUPPORTED_1000baseT_Full |
8196                                                SUPPORTED_2500baseX_Full |
8197                                                SUPPORTED_10000baseT_Full |
8198                                                SUPPORTED_TP |
8199                                                SUPPORTED_FIBRE |
8200                                                SUPPORTED_Autoneg |
8201                                                SUPPORTED_Pause |
8202                                                SUPPORTED_Asym_Pause);
8203                         break;
8204
8205                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8206                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
8207                                        ext_phy_type);
8208
8209                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8210                                                SUPPORTED_1000baseT_Full |
8211                                                SUPPORTED_FIBRE |
8212                                                SUPPORTED_Autoneg |
8213                                                SUPPORTED_Pause |
8214                                                SUPPORTED_Asym_Pause);
8215                         break;
8216
8217                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8218                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
8219                                        ext_phy_type);
8220
8221                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8222                                                SUPPORTED_2500baseX_Full |
8223                                                SUPPORTED_1000baseT_Full |
8224                                                SUPPORTED_FIBRE |
8225                                                SUPPORTED_Autoneg |
8226                                                SUPPORTED_Pause |
8227                                                SUPPORTED_Asym_Pause);
8228                         break;
8229
8230                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8231                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8232                                        ext_phy_type);
8233
8234                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8235                                                SUPPORTED_FIBRE |
8236                                                SUPPORTED_Pause |
8237                                                SUPPORTED_Asym_Pause);
8238                         break;
8239
8240                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8241                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
8242                                        ext_phy_type);
8243
8244                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8245                                                SUPPORTED_1000baseT_Full |
8246                                                SUPPORTED_FIBRE |
8247                                                SUPPORTED_Pause |
8248                                                SUPPORTED_Asym_Pause);
8249                         break;
8250
8251                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8252                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
8253                                        ext_phy_type);
8254
8255                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8256                                                SUPPORTED_1000baseT_Full |
8257                                                SUPPORTED_Autoneg |
8258                                                SUPPORTED_FIBRE |
8259                                                SUPPORTED_Pause |
8260                                                SUPPORTED_Asym_Pause);
8261                         break;
8262
8263                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8264                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8265                                        ext_phy_type);
8266
8267                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8268                                                SUPPORTED_1000baseT_Full |
8269                                                SUPPORTED_Autoneg |
8270                                                SUPPORTED_FIBRE |
8271                                                SUPPORTED_Pause |
8272                                                SUPPORTED_Asym_Pause);
8273                         break;
8274
8275                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8276                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8277                                        ext_phy_type);
8278
8279                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8280                                                SUPPORTED_TP |
8281                                                SUPPORTED_Autoneg |
8282                                                SUPPORTED_Pause |
8283                                                SUPPORTED_Asym_Pause);
8284                         break;
8285
8286                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8287                         BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8288                                        ext_phy_type);
8289
8290                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8291                                                SUPPORTED_10baseT_Full |
8292                                                SUPPORTED_100baseT_Half |
8293                                                SUPPORTED_100baseT_Full |
8294                                                SUPPORTED_1000baseT_Full |
8295                                                SUPPORTED_10000baseT_Full |
8296                                                SUPPORTED_TP |
8297                                                SUPPORTED_Autoneg |
8298                                                SUPPORTED_Pause |
8299                                                SUPPORTED_Asym_Pause);
8300                         break;
8301
8302                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8303                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8304                                   bp->link_params.ext_phy_config);
8305                         break;
8306
8307                 default:
8308                         BNX2X_ERR("NVRAM config error. "
8309                                   "BAD XGXS ext_phy_config 0x%x\n",
8310                                   bp->link_params.ext_phy_config);
8311                         return;
8312                 }
8313
8314                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8315                                            port*0x18);
8316                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8317
8318                 break;
8319
8320         default:
8321                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
8322                           bp->port.link_config);
8323                 return;
8324         }
8325         bp->link_params.phy_addr = bp->port.phy_addr;
8326
8327         /* mask what we support according to speed_cap_mask */
8328         if (!(bp->link_params.speed_cap_mask &
8329                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
8330                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
8331
8332         if (!(bp->link_params.speed_cap_mask &
8333                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
8334                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
8335
8336         if (!(bp->link_params.speed_cap_mask &
8337                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
8338                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
8339
8340         if (!(bp->link_params.speed_cap_mask &
8341                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
8342                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
8343
8344         if (!(bp->link_params.speed_cap_mask &
8345                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
8346                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8347                                         SUPPORTED_1000baseT_Full);
8348
8349         if (!(bp->link_params.speed_cap_mask &
8350                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
8351                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
8352
8353         if (!(bp->link_params.speed_cap_mask &
8354                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
8355                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
8356
8357         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
8358 }
8359
8360 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8361 {
8362         bp->link_params.req_duplex = DUPLEX_FULL;
8363
8364         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
8365         case PORT_FEATURE_LINK_SPEED_AUTO:
8366                 if (bp->port.supported & SUPPORTED_Autoneg) {
8367                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8368                         bp->port.advertising = bp->port.supported;
8369                 } else {
8370                         u32 ext_phy_type =
8371                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8372
8373                         if ((ext_phy_type ==
8374                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8375                             (ext_phy_type ==
8376                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
8377                                 /* force 10G, no AN */
8378                                 bp->link_params.req_line_speed = SPEED_10000;
8379                                 bp->port.advertising =
8380                                                 (ADVERTISED_10000baseT_Full |
8381                                                  ADVERTISED_FIBRE);
8382                                 break;
8383                         }
8384                         BNX2X_ERR("NVRAM config error. "
8385                                   "Invalid link_config 0x%x"
8386                                   "  Autoneg not supported\n",
8387                                   bp->port.link_config);
8388                         return;
8389                 }
8390                 break;
8391
8392         case PORT_FEATURE_LINK_SPEED_10M_FULL:
8393                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
8394                         bp->link_params.req_line_speed = SPEED_10;
8395                         bp->port.advertising = (ADVERTISED_10baseT_Full |
8396                                                 ADVERTISED_TP);
8397                 } else {
8398                         BNX2X_ERR("NVRAM config error. "
8399                                   "Invalid link_config 0x%x"
8400                                   "  speed_cap_mask 0x%x\n",
8401                                   bp->port.link_config,
8402                                   bp->link_params.speed_cap_mask);
8403                         return;
8404                 }
8405                 break;
8406
8407         case PORT_FEATURE_LINK_SPEED_10M_HALF:
8408                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
8409                         bp->link_params.req_line_speed = SPEED_10;
8410                         bp->link_params.req_duplex = DUPLEX_HALF;
8411                         bp->port.advertising = (ADVERTISED_10baseT_Half |
8412                                                 ADVERTISED_TP);
8413                 } else {
8414                         BNX2X_ERR("NVRAM config error. "
8415                                   "Invalid link_config 0x%x"
8416                                   "  speed_cap_mask 0x%x\n",
8417                                   bp->port.link_config,
8418                                   bp->link_params.speed_cap_mask);
8419                         return;
8420                 }
8421                 break;
8422
8423         case PORT_FEATURE_LINK_SPEED_100M_FULL:
8424                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
8425                         bp->link_params.req_line_speed = SPEED_100;
8426                         bp->port.advertising = (ADVERTISED_100baseT_Full |
8427                                                 ADVERTISED_TP);
8428                 } else {
8429                         BNX2X_ERR("NVRAM config error. "
8430                                   "Invalid link_config 0x%x"
8431                                   "  speed_cap_mask 0x%x\n",
8432                                   bp->port.link_config,
8433                                   bp->link_params.speed_cap_mask);
8434                         return;
8435                 }
8436                 break;
8437
8438         case PORT_FEATURE_LINK_SPEED_100M_HALF:
8439                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
8440                         bp->link_params.req_line_speed = SPEED_100;
8441                         bp->link_params.req_duplex = DUPLEX_HALF;
8442                         bp->port.advertising = (ADVERTISED_100baseT_Half |
8443                                                 ADVERTISED_TP);
8444                 } else {
8445                         BNX2X_ERR("NVRAM config error. "
8446                                   "Invalid link_config 0x%x"
8447                                   "  speed_cap_mask 0x%x\n",
8448                                   bp->port.link_config,
8449                                   bp->link_params.speed_cap_mask);
8450                         return;
8451                 }
8452                 break;
8453
8454         case PORT_FEATURE_LINK_SPEED_1G:
8455                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
8456                         bp->link_params.req_line_speed = SPEED_1000;
8457                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
8458                                                 ADVERTISED_TP);
8459                 } else {
8460                         BNX2X_ERR("NVRAM config error. "
8461                                   "Invalid link_config 0x%x"
8462                                   "  speed_cap_mask 0x%x\n",
8463                                   bp->port.link_config,
8464                                   bp->link_params.speed_cap_mask);
8465                         return;
8466                 }
8467                 break;
8468
8469         case PORT_FEATURE_LINK_SPEED_2_5G:
8470                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
8471                         bp->link_params.req_line_speed = SPEED_2500;
8472                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
8473                                                 ADVERTISED_TP);
8474                 } else {
8475                         BNX2X_ERR("NVRAM config error. "
8476                                   "Invalid link_config 0x%x"
8477                                   "  speed_cap_mask 0x%x\n",
8478                                   bp->port.link_config,
8479                                   bp->link_params.speed_cap_mask);
8480                         return;
8481                 }
8482                 break;
8483
8484         case PORT_FEATURE_LINK_SPEED_10G_CX4:
8485         case PORT_FEATURE_LINK_SPEED_10G_KX4:
8486         case PORT_FEATURE_LINK_SPEED_10G_KR:
8487                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
8488                         bp->link_params.req_line_speed = SPEED_10000;
8489                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
8490                                                 ADVERTISED_FIBRE);
8491                 } else {
8492                         BNX2X_ERR("NVRAM config error. "
8493                                   "Invalid link_config 0x%x"
8494                                   "  speed_cap_mask 0x%x\n",
8495                                   bp->port.link_config,
8496                                   bp->link_params.speed_cap_mask);
8497                         return;
8498                 }
8499                 break;
8500
8501         default:
8502                 BNX2X_ERR("NVRAM config error. "
8503                           "BAD link speed link_config 0x%x\n",
8504                           bp->port.link_config);
8505                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8506                 bp->port.advertising = bp->port.supported;
8507                 break;
8508         }
8509
8510         bp->link_params.req_flow_ctrl = (bp->port.link_config &
8511                                          PORT_FEATURE_FLOW_CONTROL_MASK);
8512         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8513             !(bp->port.supported & SUPPORTED_Autoneg))
8514                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8515
8516         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
8517                        "  advertising 0x%x\n",
8518                        bp->link_params.req_line_speed,
8519                        bp->link_params.req_duplex,
8520                        bp->link_params.req_flow_ctrl, bp->port.advertising);
8521 }
8522
8523 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8524 {
8525         int port = BP_PORT(bp);
8526         u32 val, val2;
8527         u32 config;
8528         u16 i;
8529         u32 ext_phy_type;
8530
8531         bp->link_params.bp = bp;
8532         bp->link_params.port = port;
8533
8534         bp->link_params.lane_config =
8535                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8536         bp->link_params.ext_phy_config =
8537                 SHMEM_RD(bp,
8538                          dev_info.port_hw_config[port].external_phy_config);
8539         /* BCM8727_NOC => BCM8727 no over current */
8540         if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8541             PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8542                 bp->link_params.ext_phy_config &=
8543                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8544                 bp->link_params.ext_phy_config |=
8545                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8546                 bp->link_params.feature_config_flags |=
8547                         FEATURE_CONFIG_BCM8727_NOC;
8548         }
8549
8550         bp->link_params.speed_cap_mask =
8551                 SHMEM_RD(bp,
8552                          dev_info.port_hw_config[port].speed_capability_mask);
8553
8554         bp->port.link_config =
8555                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8556
8557         /* Get the 4 lanes xgxs config rx and tx */
8558         for (i = 0; i < 2; i++) {
8559                 val = SHMEM_RD(bp,
8560                            dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8561                 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8562                 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8563
8564                 val = SHMEM_RD(bp,
8565                            dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8566                 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8567                 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8568         }
8569
8570         /* If the device is capable of WoL, set the default state according
8571          * to the HW
8572          */
8573         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8574         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8575                    (config & PORT_FEATURE_WOL_ENABLED));
8576
8577         BNX2X_DEV_INFO("lane_config 0x%08x  ext_phy_config 0x%08x"
8578                        "  speed_cap_mask 0x%08x  link_config 0x%08x\n",
8579                        bp->link_params.lane_config,
8580                        bp->link_params.ext_phy_config,
8581                        bp->link_params.speed_cap_mask, bp->port.link_config);
8582
8583         bp->link_params.switch_cfg |= (bp->port.link_config &
8584                                        PORT_FEATURE_CONNECTED_SWITCH_MASK);
8585         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8586
8587         bnx2x_link_settings_requested(bp);
8588
8589         /*
8590          * If connected directly, work with the internal PHY, otherwise, work
8591          * with the external PHY
8592          */
8593         ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8594         if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8595                 bp->mdio.prtad = bp->link_params.phy_addr;
8596
8597         else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8598                  (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8599                 bp->mdio.prtad =
8600                         XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
8601
8602         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8603         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8604         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8605         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8606         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8607         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8608         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
8609         bp->dev->dev_addr[5] = (u8)(val & 0xff);
8610         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8611         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8612 }
8613
8614 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8615 {
8616         int func = BP_FUNC(bp);
8617         u32 val, val2;
8618         int rc = 0;
8619
8620         bnx2x_get_common_hwinfo(bp);
8621
8622         bp->e1hov = 0;
8623         bp->e1hmf = 0;
8624         if (CHIP_IS_E1H(bp)) {
8625                 bp->mf_config =
8626                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8627
8628                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
8629                        FUNC_MF_CFG_E1HOV_TAG_MASK);
8630                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
8631                         bp->e1hmf = 1;
8632                 BNX2X_DEV_INFO("%s function mode\n",
8633                                IS_E1HMF(bp) ? "multi" : "single");
8634
8635                 if (IS_E1HMF(bp)) {
8636                         val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8637                                                                 e1hov_tag) &
8638                                FUNC_MF_CFG_E1HOV_TAG_MASK);
8639                         if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8640                                 bp->e1hov = val;
8641                                 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8642                                                "(0x%04x)\n",
8643                                                func, bp->e1hov, bp->e1hov);
8644                         } else {
8645                                 BNX2X_ERR("!!!  No valid E1HOV for func %d,"
8646                                           "  aborting\n", func);
8647                                 rc = -EPERM;
8648                         }
8649                 } else {
8650                         if (BP_E1HVN(bp)) {
8651                                 BNX2X_ERR("!!!  VN %d in single function mode,"
8652                                           "  aborting\n", BP_E1HVN(bp));
8653                                 rc = -EPERM;
8654                         }
8655                 }
8656         }
8657
8658         if (!BP_NOMCP(bp)) {
8659                 bnx2x_get_port_hwinfo(bp);
8660
8661                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8662                               DRV_MSG_SEQ_NUMBER_MASK);
8663                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8664         }
8665
8666         if (IS_E1HMF(bp)) {
8667                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8668                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
8669                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8670                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8671                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8672                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8673                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8674                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8675                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
8676                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
8677                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8678                                ETH_ALEN);
8679                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8680                                ETH_ALEN);
8681                 }
8682
8683                 return rc;
8684         }
8685
8686         if (BP_NOMCP(bp)) {
8687                 /* only supposed to happen on emulation/FPGA */
8688                 BNX2X_ERR("warning random MAC workaround active\n");
8689                 random_ether_addr(bp->dev->dev_addr);
8690                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8691         }
8692
8693         return rc;
8694 }
8695
8696 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8697 {
8698         int func = BP_FUNC(bp);
8699         int timer_interval;
8700         int rc;
8701
8702         /* Disable interrupt handling until HW is initialized */
8703         atomic_set(&bp->intr_sem, 1);
8704         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8705
8706         mutex_init(&bp->port.phy_mutex);
8707
8708         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8709         INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8710
8711         rc = bnx2x_get_hwinfo(bp);
8712
8713         /* need to reset chip if undi was active */
8714         if (!BP_NOMCP(bp))
8715                 bnx2x_undi_unload(bp);
8716
8717         if (CHIP_REV_IS_FPGA(bp))
8718                 printk(KERN_ERR PFX "FPGA detected\n");
8719
8720         if (BP_NOMCP(bp) && (func == 0))
8721                 printk(KERN_ERR PFX
8722                        "MCP disabled, must load devices in order!\n");
8723
8724         /* Set multi queue mode */
8725         if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8726             ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8727                 printk(KERN_ERR PFX
8728                       "Multi disabled since int_mode requested is not MSI-X\n");
8729                 multi_mode = ETH_RSS_MODE_DISABLED;
8730         }
8731         bp->multi_mode = multi_mode;
8732
8733
8734         /* Set TPA flags */
8735         if (disable_tpa) {
8736                 bp->flags &= ~TPA_ENABLE_FLAG;
8737                 bp->dev->features &= ~NETIF_F_LRO;
8738         } else {
8739                 bp->flags |= TPA_ENABLE_FLAG;
8740                 bp->dev->features |= NETIF_F_LRO;
8741         }
8742
8743         if (CHIP_IS_E1(bp))
8744                 bp->dropless_fc = 0;
8745         else
8746                 bp->dropless_fc = dropless_fc;
8747
8748         bp->mrrs = mrrs;
8749
8750         bp->tx_ring_size = MAX_TX_AVAIL;
8751         bp->rx_ring_size = MAX_RX_AVAIL;
8752
8753         bp->rx_csum = 1;
8754
8755         bp->tx_ticks = 50;
8756         bp->rx_ticks = 25;
8757
8758         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8759         bp->current_interval = (poll ? poll : timer_interval);
8760
8761         init_timer(&bp->timer);
8762         bp->timer.expires = jiffies + bp->current_interval;
8763         bp->timer.data = (unsigned long) bp;
8764         bp->timer.function = bnx2x_timer;
8765
8766         return rc;
8767 }
8768
8769 /*
8770  * ethtool service functions
8771  */
8772
8773 /* All ethtool functions called with rtnl_lock */
8774
8775 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8776 {
8777         struct bnx2x *bp = netdev_priv(dev);
8778
8779         cmd->supported = bp->port.supported;
8780         cmd->advertising = bp->port.advertising;
8781
8782         if (netif_carrier_ok(dev)) {
8783                 cmd->speed = bp->link_vars.line_speed;
8784                 cmd->duplex = bp->link_vars.duplex;
8785         } else {
8786                 cmd->speed = bp->link_params.req_line_speed;
8787                 cmd->duplex = bp->link_params.req_duplex;
8788         }
8789         if (IS_E1HMF(bp)) {
8790                 u16 vn_max_rate;
8791
8792                 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8793                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8794                 if (vn_max_rate < cmd->speed)
8795                         cmd->speed = vn_max_rate;
8796         }
8797
8798         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8799                 u32 ext_phy_type =
8800                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8801
8802                 switch (ext_phy_type) {
8803                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8804                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8805                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8806                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8807                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8808                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8809                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8810                         cmd->port = PORT_FIBRE;
8811                         break;
8812
8813                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8814                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8815                         cmd->port = PORT_TP;
8816                         break;
8817
8818                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8819                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8820                                   bp->link_params.ext_phy_config);
8821                         break;
8822
8823                 default:
8824                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
8825                            bp->link_params.ext_phy_config);
8826                         break;
8827                 }
8828         } else
8829                 cmd->port = PORT_TP;
8830
8831         cmd->phy_address = bp->mdio.prtad;
8832         cmd->transceiver = XCVR_INTERNAL;
8833
8834         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8835                 cmd->autoneg = AUTONEG_ENABLE;
8836         else
8837                 cmd->autoneg = AUTONEG_DISABLE;
8838
8839         cmd->maxtxpkt = 0;
8840         cmd->maxrxpkt = 0;
8841
8842         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8843            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
8844            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
8845            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
8846            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8847            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8848            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8849
8850         return 0;
8851 }
8852
8853 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8854 {
8855         struct bnx2x *bp = netdev_priv(dev);
8856         u32 advertising;
8857
8858         if (IS_E1HMF(bp))
8859                 return 0;
8860
8861         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8862            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
8863            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
8864            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
8865            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8866            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8867            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8868
8869         if (cmd->autoneg == AUTONEG_ENABLE) {
8870                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8871                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8872                         return -EINVAL;
8873                 }
8874
8875                 /* advertise the requested speed and duplex if supported */
8876                 cmd->advertising &= bp->port.supported;
8877
8878                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8879                 bp->link_params.req_duplex = DUPLEX_FULL;
8880                 bp->port.advertising |= (ADVERTISED_Autoneg |
8881                                          cmd->advertising);
8882
8883         } else { /* forced speed */
8884                 /* advertise the requested speed and duplex if supported */
8885                 switch (cmd->speed) {
8886                 case SPEED_10:
8887                         if (cmd->duplex == DUPLEX_FULL) {
8888                                 if (!(bp->port.supported &
8889                                       SUPPORTED_10baseT_Full)) {
8890                                         DP(NETIF_MSG_LINK,
8891                                            "10M full not supported\n");
8892                                         return -EINVAL;
8893                                 }
8894
8895                                 advertising = (ADVERTISED_10baseT_Full |
8896                                                ADVERTISED_TP);
8897                         } else {
8898                                 if (!(bp->port.supported &
8899                                       SUPPORTED_10baseT_Half)) {
8900                                         DP(NETIF_MSG_LINK,
8901                                            "10M half not supported\n");
8902                                         return -EINVAL;
8903                                 }
8904
8905                                 advertising = (ADVERTISED_10baseT_Half |
8906                                                ADVERTISED_TP);
8907                         }
8908                         break;
8909
8910                 case SPEED_100:
8911                         if (cmd->duplex == DUPLEX_FULL) {
8912                                 if (!(bp->port.supported &
8913                                                 SUPPORTED_100baseT_Full)) {
8914                                         DP(NETIF_MSG_LINK,
8915                                            "100M full not supported\n");
8916                                         return -EINVAL;
8917                                 }
8918
8919                                 advertising = (ADVERTISED_100baseT_Full |
8920                                                ADVERTISED_TP);
8921                         } else {
8922                                 if (!(bp->port.supported &
8923                                                 SUPPORTED_100baseT_Half)) {
8924                                         DP(NETIF_MSG_LINK,
8925                                            "100M half not supported\n");
8926                                         return -EINVAL;
8927                                 }
8928
8929                                 advertising = (ADVERTISED_100baseT_Half |
8930                                                ADVERTISED_TP);
8931                         }
8932                         break;
8933
8934                 case SPEED_1000:
8935                         if (cmd->duplex != DUPLEX_FULL) {
8936                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
8937                                 return -EINVAL;
8938                         }
8939
8940                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
8941                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
8942                                 return -EINVAL;
8943                         }
8944
8945                         advertising = (ADVERTISED_1000baseT_Full |
8946                                        ADVERTISED_TP);
8947                         break;
8948
8949                 case SPEED_2500:
8950                         if (cmd->duplex != DUPLEX_FULL) {
8951                                 DP(NETIF_MSG_LINK,
8952                                    "2.5G half not supported\n");
8953                                 return -EINVAL;
8954                         }
8955
8956                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
8957                                 DP(NETIF_MSG_LINK,
8958                                    "2.5G full not supported\n");
8959                                 return -EINVAL;
8960                         }
8961
8962                         advertising = (ADVERTISED_2500baseX_Full |
8963                                        ADVERTISED_TP);
8964                         break;
8965
8966                 case SPEED_10000:
8967                         if (cmd->duplex != DUPLEX_FULL) {
8968                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
8969                                 return -EINVAL;
8970                         }
8971
8972                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
8973                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
8974                                 return -EINVAL;
8975                         }
8976
8977                         advertising = (ADVERTISED_10000baseT_Full |
8978                                        ADVERTISED_FIBRE);
8979                         break;
8980
8981                 default:
8982                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
8983                         return -EINVAL;
8984                 }
8985
8986                 bp->link_params.req_line_speed = cmd->speed;
8987                 bp->link_params.req_duplex = cmd->duplex;
8988                 bp->port.advertising = advertising;
8989         }
8990
8991         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
8992            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
8993            bp->link_params.req_line_speed, bp->link_params.req_duplex,
8994            bp->port.advertising);
8995
8996         if (netif_running(dev)) {
8997                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8998                 bnx2x_link_set(bp);
8999         }
9000
9001         return 0;
9002 }
9003
9004 #define IS_E1_ONLINE(info)      (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
9005 #define IS_E1H_ONLINE(info)     (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
9006
9007 static int bnx2x_get_regs_len(struct net_device *dev)
9008 {
9009         struct bnx2x *bp = netdev_priv(dev);
9010         int regdump_len = 0;
9011         int i;
9012
9013         if (CHIP_IS_E1(bp)) {
9014                 for (i = 0; i < REGS_COUNT; i++)
9015                         if (IS_E1_ONLINE(reg_addrs[i].info))
9016                                 regdump_len += reg_addrs[i].size;
9017
9018                 for (i = 0; i < WREGS_COUNT_E1; i++)
9019                         if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
9020                                 regdump_len += wreg_addrs_e1[i].size *
9021                                         (1 + wreg_addrs_e1[i].read_regs_count);
9022
9023         } else { /* E1H */
9024                 for (i = 0; i < REGS_COUNT; i++)
9025                         if (IS_E1H_ONLINE(reg_addrs[i].info))
9026                                 regdump_len += reg_addrs[i].size;
9027
9028                 for (i = 0; i < WREGS_COUNT_E1H; i++)
9029                         if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
9030                                 regdump_len += wreg_addrs_e1h[i].size *
9031                                         (1 + wreg_addrs_e1h[i].read_regs_count);
9032         }
9033         regdump_len *= 4;
9034         regdump_len += sizeof(struct dump_hdr);
9035
9036         return regdump_len;
9037 }
9038
9039 static void bnx2x_get_regs(struct net_device *dev,
9040                            struct ethtool_regs *regs, void *_p)
9041 {
9042         u32 *p = _p, i, j;
9043         struct bnx2x *bp = netdev_priv(dev);
9044         struct dump_hdr dump_hdr = {0};
9045
9046         regs->version = 0;
9047         memset(p, 0, regs->len);
9048
9049         if (!netif_running(bp->dev))
9050                 return;
9051
9052         dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9053         dump_hdr.dump_sign = dump_sign_all;
9054         dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9055         dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9056         dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9057         dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9058         dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9059
9060         memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9061         p += dump_hdr.hdr_size + 1;
9062
9063         if (CHIP_IS_E1(bp)) {
9064                 for (i = 0; i < REGS_COUNT; i++)
9065                         if (IS_E1_ONLINE(reg_addrs[i].info))
9066                                 for (j = 0; j < reg_addrs[i].size; j++)
9067                                         *p++ = REG_RD(bp,
9068                                                       reg_addrs[i].addr + j*4);
9069
9070         } else { /* E1H */
9071                 for (i = 0; i < REGS_COUNT; i++)
9072                         if (IS_E1H_ONLINE(reg_addrs[i].info))
9073                                 for (j = 0; j < reg_addrs[i].size; j++)
9074                                         *p++ = REG_RD(bp,
9075                                                       reg_addrs[i].addr + j*4);
9076         }
9077 }
9078
9079 #define PHY_FW_VER_LEN                  10
9080
9081 static void bnx2x_get_drvinfo(struct net_device *dev,
9082                               struct ethtool_drvinfo *info)
9083 {
9084         struct bnx2x *bp = netdev_priv(dev);
9085         u8 phy_fw_ver[PHY_FW_VER_LEN];
9086
9087         strcpy(info->driver, DRV_MODULE_NAME);
9088         strcpy(info->version, DRV_MODULE_VERSION);
9089
9090         phy_fw_ver[0] = '\0';
9091         if (bp->port.pmf) {
9092                 bnx2x_acquire_phy_lock(bp);
9093                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9094                                              (bp->state != BNX2X_STATE_CLOSED),
9095                                              phy_fw_ver, PHY_FW_VER_LEN);
9096                 bnx2x_release_phy_lock(bp);
9097         }
9098
9099         snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9100                  (bp->common.bc_ver & 0xff0000) >> 16,
9101                  (bp->common.bc_ver & 0xff00) >> 8,
9102                  (bp->common.bc_ver & 0xff),
9103                  ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9104         strcpy(info->bus_info, pci_name(bp->pdev));
9105         info->n_stats = BNX2X_NUM_STATS;
9106         info->testinfo_len = BNX2X_NUM_TESTS;
9107         info->eedump_len = bp->common.flash_size;
9108         info->regdump_len = bnx2x_get_regs_len(dev);
9109 }
9110
9111 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9112 {
9113         struct bnx2x *bp = netdev_priv(dev);
9114
9115         if (bp->flags & NO_WOL_FLAG) {
9116                 wol->supported = 0;
9117                 wol->wolopts = 0;
9118         } else {
9119                 wol->supported = WAKE_MAGIC;
9120                 if (bp->wol)
9121                         wol->wolopts = WAKE_MAGIC;
9122                 else
9123                         wol->wolopts = 0;
9124         }
9125         memset(&wol->sopass, 0, sizeof(wol->sopass));
9126 }
9127
9128 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9129 {
9130         struct bnx2x *bp = netdev_priv(dev);
9131
9132         if (wol->wolopts & ~WAKE_MAGIC)
9133                 return -EINVAL;
9134
9135         if (wol->wolopts & WAKE_MAGIC) {
9136                 if (bp->flags & NO_WOL_FLAG)
9137                         return -EINVAL;
9138
9139                 bp->wol = 1;
9140         } else
9141                 bp->wol = 0;
9142
9143         return 0;
9144 }
9145
9146 static u32 bnx2x_get_msglevel(struct net_device *dev)
9147 {
9148         struct bnx2x *bp = netdev_priv(dev);
9149
9150         return bp->msglevel;
9151 }
9152
9153 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9154 {
9155         struct bnx2x *bp = netdev_priv(dev);
9156
9157         if (capable(CAP_NET_ADMIN))
9158                 bp->msglevel = level;
9159 }
9160
9161 static int bnx2x_nway_reset(struct net_device *dev)
9162 {
9163         struct bnx2x *bp = netdev_priv(dev);
9164
9165         if (!bp->port.pmf)
9166                 return 0;
9167
9168         if (netif_running(dev)) {
9169                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9170                 bnx2x_link_set(bp);
9171         }
9172
9173         return 0;
9174 }
9175
9176 static u32 bnx2x_get_link(struct net_device *dev)
9177 {
9178         struct bnx2x *bp = netdev_priv(dev);
9179
9180         return bp->link_vars.link_up;
9181 }
9182
9183 static int bnx2x_get_eeprom_len(struct net_device *dev)
9184 {
9185         struct bnx2x *bp = netdev_priv(dev);
9186
9187         return bp->common.flash_size;
9188 }
9189
9190 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9191 {
9192         int port = BP_PORT(bp);
9193         int count, i;
9194         u32 val = 0;
9195
9196         /* adjust timeout for emulation/FPGA */
9197         count = NVRAM_TIMEOUT_COUNT;
9198         if (CHIP_REV_IS_SLOW(bp))
9199                 count *= 100;
9200
9201         /* request access to nvram interface */
9202         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9203                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9204
9205         for (i = 0; i < count*10; i++) {
9206                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9207                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9208                         break;
9209
9210                 udelay(5);
9211         }
9212
9213         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
9214                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
9215                 return -EBUSY;
9216         }
9217
9218         return 0;
9219 }
9220
9221 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9222 {
9223         int port = BP_PORT(bp);
9224         int count, i;
9225         u32 val = 0;
9226
9227         /* adjust timeout for emulation/FPGA */
9228         count = NVRAM_TIMEOUT_COUNT;
9229         if (CHIP_REV_IS_SLOW(bp))
9230                 count *= 100;
9231
9232         /* relinquish nvram interface */
9233         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9234                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9235
9236         for (i = 0; i < count*10; i++) {
9237                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9238                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9239                         break;
9240
9241                 udelay(5);
9242         }
9243
9244         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
9245                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
9246                 return -EBUSY;
9247         }
9248
9249         return 0;
9250 }
9251
9252 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9253 {
9254         u32 val;
9255
9256         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9257
9258         /* enable both bits, even on read */
9259         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9260                (val | MCPR_NVM_ACCESS_ENABLE_EN |
9261                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
9262 }
9263
9264 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9265 {
9266         u32 val;
9267
9268         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9269
9270         /* disable both bits, even after read */
9271         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9272                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9273                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9274 }
9275
9276 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
9277                                   u32 cmd_flags)
9278 {
9279         int count, i, rc;
9280         u32 val;
9281
9282         /* build the command word */
9283         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9284
9285         /* need to clear DONE bit separately */
9286         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9287
9288         /* address of the NVRAM to read from */
9289         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9290                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9291
9292         /* issue a read command */
9293         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9294
9295         /* adjust timeout for emulation/FPGA */
9296         count = NVRAM_TIMEOUT_COUNT;
9297         if (CHIP_REV_IS_SLOW(bp))
9298                 count *= 100;
9299
9300         /* wait for completion */
9301         *ret_val = 0;
9302         rc = -EBUSY;
9303         for (i = 0; i < count; i++) {
9304                 udelay(5);
9305                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9306
9307                 if (val & MCPR_NVM_COMMAND_DONE) {
9308                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
9309                         /* we read nvram data in cpu order
9310                          * but ethtool sees it as an array of bytes
9311                          * converting to big-endian will do the work */
9312                         *ret_val = cpu_to_be32(val);
9313                         rc = 0;
9314                         break;
9315                 }
9316         }
9317
9318         return rc;
9319 }
9320
9321 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9322                             int buf_size)
9323 {
9324         int rc;
9325         u32 cmd_flags;
9326         __be32 val;
9327
9328         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9329                 DP(BNX2X_MSG_NVM,
9330                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
9331                    offset, buf_size);
9332                 return -EINVAL;
9333         }
9334
9335         if (offset + buf_size > bp->common.flash_size) {
9336                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9337                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9338                    offset, buf_size, bp->common.flash_size);
9339                 return -EINVAL;
9340         }
9341
9342         /* request access to nvram interface */
9343         rc = bnx2x_acquire_nvram_lock(bp);
9344         if (rc)
9345                 return rc;
9346
9347         /* enable access to nvram interface */
9348         bnx2x_enable_nvram_access(bp);
9349
9350         /* read the first word(s) */
9351         cmd_flags = MCPR_NVM_COMMAND_FIRST;
9352         while ((buf_size > sizeof(u32)) && (rc == 0)) {
9353                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9354                 memcpy(ret_buf, &val, 4);
9355
9356                 /* advance to the next dword */
9357                 offset += sizeof(u32);
9358                 ret_buf += sizeof(u32);
9359                 buf_size -= sizeof(u32);
9360                 cmd_flags = 0;
9361         }
9362
9363         if (rc == 0) {
9364                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9365                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9366                 memcpy(ret_buf, &val, 4);
9367         }
9368
9369         /* disable access to nvram interface */
9370         bnx2x_disable_nvram_access(bp);
9371         bnx2x_release_nvram_lock(bp);
9372
9373         return rc;
9374 }
9375
9376 static int bnx2x_get_eeprom(struct net_device *dev,
9377                             struct ethtool_eeprom *eeprom, u8 *eebuf)
9378 {
9379         struct bnx2x *bp = netdev_priv(dev);
9380         int rc;
9381
9382         if (!netif_running(dev))
9383                 return -EAGAIN;
9384
9385         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9386            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
9387            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9388            eeprom->len, eeprom->len);
9389
9390         /* parameters already validated in ethtool_get_eeprom */
9391
9392         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9393
9394         return rc;
9395 }
9396
9397 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9398                                    u32 cmd_flags)
9399 {
9400         int count, i, rc;
9401
9402         /* build the command word */
9403         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9404
9405         /* need to clear DONE bit separately */
9406         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9407
9408         /* write the data */
9409         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9410
9411         /* address of the NVRAM to write to */
9412         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9413                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9414
9415         /* issue the write command */
9416         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9417
9418         /* adjust timeout for emulation/FPGA */
9419         count = NVRAM_TIMEOUT_COUNT;
9420         if (CHIP_REV_IS_SLOW(bp))
9421                 count *= 100;
9422
9423         /* wait for completion */
9424         rc = -EBUSY;
9425         for (i = 0; i < count; i++) {
9426                 udelay(5);
9427                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9428                 if (val & MCPR_NVM_COMMAND_DONE) {
9429                         rc = 0;
9430                         break;
9431                 }
9432         }
9433
9434         return rc;
9435 }
9436
9437 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
9438
9439 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9440                               int buf_size)
9441 {
9442         int rc;
9443         u32 cmd_flags;
9444         u32 align_offset;
9445         __be32 val;
9446
9447         if (offset + buf_size > bp->common.flash_size) {
9448                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9449                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9450                    offset, buf_size, bp->common.flash_size);
9451                 return -EINVAL;
9452         }
9453
9454         /* request access to nvram interface */
9455         rc = bnx2x_acquire_nvram_lock(bp);
9456         if (rc)
9457                 return rc;
9458
9459         /* enable access to nvram interface */
9460         bnx2x_enable_nvram_access(bp);
9461
9462         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9463         align_offset = (offset & ~0x03);
9464         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9465
9466         if (rc == 0) {
9467                 val &= ~(0xff << BYTE_OFFSET(offset));
9468                 val |= (*data_buf << BYTE_OFFSET(offset));
9469
9470                 /* nvram data is returned as an array of bytes
9471                  * convert it back to cpu order */
9472                 val = be32_to_cpu(val);
9473
9474                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9475                                              cmd_flags);
9476         }
9477
9478         /* disable access to nvram interface */
9479         bnx2x_disable_nvram_access(bp);
9480         bnx2x_release_nvram_lock(bp);
9481
9482         return rc;
9483 }
9484
9485 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9486                              int buf_size)
9487 {
9488         int rc;
9489         u32 cmd_flags;
9490         u32 val;
9491         u32 written_so_far;
9492
9493         if (buf_size == 1)      /* ethtool */
9494                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
9495
9496         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9497                 DP(BNX2X_MSG_NVM,
9498                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
9499                    offset, buf_size);
9500                 return -EINVAL;
9501         }
9502
9503         if (offset + buf_size > bp->common.flash_size) {
9504                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9505                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9506                    offset, buf_size, bp->common.flash_size);
9507                 return -EINVAL;
9508         }
9509
9510         /* request access to nvram interface */
9511         rc = bnx2x_acquire_nvram_lock(bp);
9512         if (rc)
9513                 return rc;
9514
9515         /* enable access to nvram interface */
9516         bnx2x_enable_nvram_access(bp);
9517
9518         written_so_far = 0;
9519         cmd_flags = MCPR_NVM_COMMAND_FIRST;
9520         while ((written_so_far < buf_size) && (rc == 0)) {
9521                 if (written_so_far == (buf_size - sizeof(u32)))
9522                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
9523                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9524                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
9525                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9526                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9527
9528                 memcpy(&val, data_buf, 4);
9529
9530                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9531
9532                 /* advance to the next dword */
9533                 offset += sizeof(u32);
9534                 data_buf += sizeof(u32);
9535                 written_so_far += sizeof(u32);
9536                 cmd_flags = 0;
9537         }
9538
9539         /* disable access to nvram interface */
9540         bnx2x_disable_nvram_access(bp);
9541         bnx2x_release_nvram_lock(bp);
9542
9543         return rc;
9544 }
9545
9546 static int bnx2x_set_eeprom(struct net_device *dev,
9547                             struct ethtool_eeprom *eeprom, u8 *eebuf)
9548 {
9549         struct bnx2x *bp = netdev_priv(dev);
9550         int port = BP_PORT(bp);
9551         int rc = 0;
9552
9553         if (!netif_running(dev))
9554                 return -EAGAIN;
9555
9556         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9557            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
9558            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9559            eeprom->len, eeprom->len);
9560
9561         /* parameters already validated in ethtool_set_eeprom */
9562
9563         /* PHY eeprom can be accessed only by the PMF */
9564         if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9565             !bp->port.pmf)
9566                 return -EINVAL;
9567
9568         if (eeprom->magic == 0x50485950) {
9569                 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9570                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9571
9572                 bnx2x_acquire_phy_lock(bp);
9573                 rc |= bnx2x_link_reset(&bp->link_params,
9574                                        &bp->link_vars, 0);
9575                 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9576                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9577                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9578                                        MISC_REGISTERS_GPIO_HIGH, port);
9579                 bnx2x_release_phy_lock(bp);
9580                 bnx2x_link_report(bp);
9581
9582         } else if (eeprom->magic == 0x50485952) {
9583                 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
9584                 if ((bp->state == BNX2X_STATE_OPEN) ||
9585                     (bp->state == BNX2X_STATE_DISABLED)) {
9586                         bnx2x_acquire_phy_lock(bp);
9587                         rc |= bnx2x_link_reset(&bp->link_params,
9588                                                &bp->link_vars, 1);
9589
9590                         rc |= bnx2x_phy_init(&bp->link_params,
9591                                              &bp->link_vars);
9592                         bnx2x_release_phy_lock(bp);
9593                         bnx2x_calc_fc_adv(bp);
9594                 }
9595         } else if (eeprom->magic == 0x53985943) {
9596                 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9597                 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9598                                        PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9599                         u8 ext_phy_addr =
9600                              XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
9601
9602                         /* DSP Remove Download Mode */
9603                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9604                                        MISC_REGISTERS_GPIO_LOW, port);
9605
9606                         bnx2x_acquire_phy_lock(bp);
9607
9608                         bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9609
9610                         /* wait 0.5 sec to allow it to run */
9611                         msleep(500);
9612                         bnx2x_ext_phy_hw_reset(bp, port);
9613                         msleep(500);
9614                         bnx2x_release_phy_lock(bp);
9615                 }
9616         } else
9617                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
9618
9619         return rc;
9620 }
9621
9622 static int bnx2x_get_coalesce(struct net_device *dev,
9623                               struct ethtool_coalesce *coal)
9624 {
9625         struct bnx2x *bp = netdev_priv(dev);
9626
9627         memset(coal, 0, sizeof(struct ethtool_coalesce));
9628
9629         coal->rx_coalesce_usecs = bp->rx_ticks;
9630         coal->tx_coalesce_usecs = bp->tx_ticks;
9631
9632         return 0;
9633 }
9634
9635 #define BNX2X_MAX_COALES_TOUT  (0xf0*12) /* Maximal coalescing timeout in us */
9636 static int bnx2x_set_coalesce(struct net_device *dev,
9637                               struct ethtool_coalesce *coal)
9638 {
9639         struct bnx2x *bp = netdev_priv(dev);
9640
9641         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
9642         if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9643                 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
9644
9645         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
9646         if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9647                 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
9648
9649         if (netif_running(dev))
9650                 bnx2x_update_coalesce(bp);
9651
9652         return 0;
9653 }
9654
9655 static void bnx2x_get_ringparam(struct net_device *dev,
9656                                 struct ethtool_ringparam *ering)
9657 {
9658         struct bnx2x *bp = netdev_priv(dev);
9659
9660         ering->rx_max_pending = MAX_RX_AVAIL;
9661         ering->rx_mini_max_pending = 0;
9662         ering->rx_jumbo_max_pending = 0;
9663
9664         ering->rx_pending = bp->rx_ring_size;
9665         ering->rx_mini_pending = 0;
9666         ering->rx_jumbo_pending = 0;
9667
9668         ering->tx_max_pending = MAX_TX_AVAIL;
9669         ering->tx_pending = bp->tx_ring_size;
9670 }
9671
9672 static int bnx2x_set_ringparam(struct net_device *dev,
9673                                struct ethtool_ringparam *ering)
9674 {
9675         struct bnx2x *bp = netdev_priv(dev);
9676         int rc = 0;
9677
9678         if ((ering->rx_pending > MAX_RX_AVAIL) ||
9679             (ering->tx_pending > MAX_TX_AVAIL) ||
9680             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9681                 return -EINVAL;
9682
9683         bp->rx_ring_size = ering->rx_pending;
9684         bp->tx_ring_size = ering->tx_pending;
9685
9686         if (netif_running(dev)) {
9687                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9688                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9689         }
9690
9691         return rc;
9692 }
9693
9694 static void bnx2x_get_pauseparam(struct net_device *dev,
9695                                  struct ethtool_pauseparam *epause)
9696 {
9697         struct bnx2x *bp = netdev_priv(dev);
9698
9699         epause->autoneg = (bp->link_params.req_flow_ctrl ==
9700                            BNX2X_FLOW_CTRL_AUTO) &&
9701                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9702
9703         epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9704                             BNX2X_FLOW_CTRL_RX);
9705         epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9706                             BNX2X_FLOW_CTRL_TX);
9707
9708         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9709            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9710            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9711 }
9712
9713 static int bnx2x_set_pauseparam(struct net_device *dev,
9714                                 struct ethtool_pauseparam *epause)
9715 {
9716         struct bnx2x *bp = netdev_priv(dev);
9717
9718         if (IS_E1HMF(bp))
9719                 return 0;
9720
9721         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9722            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9723            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9724
9725         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9726
9727         if (epause->rx_pause)
9728                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9729
9730         if (epause->tx_pause)
9731                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9732
9733         if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9734                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9735
9736         if (epause->autoneg) {
9737                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9738                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
9739                         return -EINVAL;
9740                 }
9741
9742                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9743                         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9744         }
9745
9746         DP(NETIF_MSG_LINK,
9747            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9748
9749         if (netif_running(dev)) {
9750                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9751                 bnx2x_link_set(bp);
9752         }
9753
9754         return 0;
9755 }
9756
9757 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9758 {
9759         struct bnx2x *bp = netdev_priv(dev);
9760         int changed = 0;
9761         int rc = 0;
9762
9763         /* TPA requires Rx CSUM offloading */
9764         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9765                 if (!(dev->features & NETIF_F_LRO)) {
9766                         dev->features |= NETIF_F_LRO;
9767                         bp->flags |= TPA_ENABLE_FLAG;
9768                         changed = 1;
9769                 }
9770
9771         } else if (dev->features & NETIF_F_LRO) {
9772                 dev->features &= ~NETIF_F_LRO;
9773                 bp->flags &= ~TPA_ENABLE_FLAG;
9774                 changed = 1;
9775         }
9776
9777         if (changed && netif_running(dev)) {
9778                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9779                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9780         }
9781
9782         return rc;
9783 }
9784
9785 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9786 {
9787         struct bnx2x *bp = netdev_priv(dev);
9788
9789         return bp->rx_csum;
9790 }
9791
9792 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9793 {
9794         struct bnx2x *bp = netdev_priv(dev);
9795         int rc = 0;
9796
9797         bp->rx_csum = data;
9798
9799         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9800            TPA'ed packets will be discarded due to wrong TCP CSUM */
9801         if (!data) {
9802                 u32 flags = ethtool_op_get_flags(dev);
9803
9804                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9805         }
9806
9807         return rc;
9808 }
9809
9810 static int bnx2x_set_tso(struct net_device *dev, u32 data)
9811 {
9812         if (data) {
9813                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9814                 dev->features |= NETIF_F_TSO6;
9815         } else {
9816                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
9817                 dev->features &= ~NETIF_F_TSO6;
9818         }
9819
9820         return 0;
9821 }
9822
9823 static const struct {
9824         char string[ETH_GSTRING_LEN];
9825 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
9826         { "register_test (offline)" },
9827         { "memory_test (offline)" },
9828         { "loopback_test (offline)" },
9829         { "nvram_test (online)" },
9830         { "interrupt_test (online)" },
9831         { "link_test (online)" },
9832         { "idle check (online)" }
9833 };
9834
9835 static int bnx2x_test_registers(struct bnx2x *bp)
9836 {
9837         int idx, i, rc = -ENODEV;
9838         u32 wr_val = 0;
9839         int port = BP_PORT(bp);
9840         static const struct {
9841                 u32  offset0;
9842                 u32  offset1;
9843                 u32  mask;
9844         } reg_tbl[] = {
9845 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
9846                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
9847                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
9848                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
9849                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
9850                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
9851                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
9852                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
9853                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
9854                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
9855 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
9856                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
9857                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
9858                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
9859                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
9860                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9861                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
9862                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
9863                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
9864                 { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
9865 /* 20 */        { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
9866                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
9867                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
9868                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
9869                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
9870                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
9871                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
9872                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
9873                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
9874                 { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
9875 /* 30 */        { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
9876                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
9877                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
9878                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9879                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
9880                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9881                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
9882
9883                 { 0xffffffff, 0, 0x00000000 }
9884         };
9885
9886         if (!netif_running(bp->dev))
9887                 return rc;
9888
9889         /* Repeat the test twice:
9890            First by writing 0x00000000, second by writing 0xffffffff */
9891         for (idx = 0; idx < 2; idx++) {
9892
9893                 switch (idx) {
9894                 case 0:
9895                         wr_val = 0;
9896                         break;
9897                 case 1:
9898                         wr_val = 0xffffffff;
9899                         break;
9900                 }
9901
9902                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9903                         u32 offset, mask, save_val, val;
9904
9905                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9906                         mask = reg_tbl[i].mask;
9907
9908                         save_val = REG_RD(bp, offset);
9909
9910                         REG_WR(bp, offset, wr_val);
9911                         val = REG_RD(bp, offset);
9912
9913                         /* Restore the original register's value */
9914                         REG_WR(bp, offset, save_val);
9915
9916                         /* verify that value is as expected value */
9917                         if ((val & mask) != (wr_val & mask))
9918                                 goto test_reg_exit;
9919                 }
9920         }
9921
9922         rc = 0;
9923
9924 test_reg_exit:
9925         return rc;
9926 }
9927
9928 static int bnx2x_test_memory(struct bnx2x *bp)
9929 {
9930         int i, j, rc = -ENODEV;
9931         u32 val;
9932         static const struct {
9933                 u32 offset;
9934                 int size;
9935         } mem_tbl[] = {
9936                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
9937                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9938                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
9939                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
9940                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
9941                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
9942                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
9943
9944                 { 0xffffffff, 0 }
9945         };
9946         static const struct {
9947                 char *name;
9948                 u32 offset;
9949                 u32 e1_mask;
9950                 u32 e1h_mask;
9951         } prty_tbl[] = {
9952                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
9953                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
9954                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
9955                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
9956                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
9957                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
9958
9959                 { NULL, 0xffffffff, 0, 0 }
9960         };
9961
9962         if (!netif_running(bp->dev))
9963                 return rc;
9964
9965         /* Go through all the memories */
9966         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9967                 for (j = 0; j < mem_tbl[i].size; j++)
9968                         REG_RD(bp, mem_tbl[i].offset + j*4);
9969
9970         /* Check the parity status */
9971         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9972                 val = REG_RD(bp, prty_tbl[i].offset);
9973                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9974                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
9975                         DP(NETIF_MSG_HW,
9976                            "%s is 0x%x\n", prty_tbl[i].name, val);
9977                         goto test_mem_exit;
9978                 }
9979         }
9980
9981         rc = 0;
9982
9983 test_mem_exit:
9984         return rc;
9985 }
9986
9987 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9988 {
9989         int cnt = 1000;
9990
9991         if (link_up)
9992                 while (bnx2x_link_test(bp) && cnt--)
9993                         msleep(10);
9994 }
9995
9996 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9997 {
9998         unsigned int pkt_size, num_pkts, i;
9999         struct sk_buff *skb;
10000         unsigned char *packet;
10001         struct bnx2x_fastpath *fp_rx = &bp->fp[0];
10002         struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues];
10003         u16 tx_start_idx, tx_idx;
10004         u16 rx_start_idx, rx_idx;
10005         u16 pkt_prod, bd_prod;
10006         struct sw_tx_bd *tx_buf;
10007         struct eth_tx_start_bd *tx_start_bd;
10008         struct eth_tx_parse_bd *pbd = NULL;
10009         dma_addr_t mapping;
10010         union eth_rx_cqe *cqe;
10011         u8 cqe_fp_flags;
10012         struct sw_rx_bd *rx_buf;
10013         u16 len;
10014         int rc = -ENODEV;
10015
10016         /* check the loopback mode */
10017         switch (loopback_mode) {
10018         case BNX2X_PHY_LOOPBACK:
10019                 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
10020                         return -EINVAL;
10021                 break;
10022         case BNX2X_MAC_LOOPBACK:
10023                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
10024                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
10025                 break;
10026         default:
10027                 return -EINVAL;
10028         }
10029
10030         /* prepare the loopback packet */
10031         pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
10032                      bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
10033         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
10034         if (!skb) {
10035                 rc = -ENOMEM;
10036                 goto test_loopback_exit;
10037         }
10038         packet = skb_put(skb, pkt_size);
10039         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
10040         memset(packet + ETH_ALEN, 0, ETH_ALEN);
10041         memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
10042         for (i = ETH_HLEN; i < pkt_size; i++)
10043                 packet[i] = (unsigned char) (i & 0xff);
10044
10045         /* send the loopback packet */
10046         num_pkts = 0;
10047         tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10048         rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10049
10050         pkt_prod = fp_tx->tx_pkt_prod++;
10051         tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10052         tx_buf->first_bd = fp_tx->tx_bd_prod;
10053         tx_buf->skb = skb;
10054         tx_buf->flags = 0;
10055
10056         bd_prod = TX_BD(fp_tx->tx_bd_prod);
10057         tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
10058         mapping = pci_map_single(bp->pdev, skb->data,
10059                                  skb_headlen(skb), PCI_DMA_TODEVICE);
10060         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10061         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10062         tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10063         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10064         tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10065         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10066         tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10067                                 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10068
10069         /* turn on parsing and get a BD */
10070         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10071         pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10072
10073         memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10074
10075         wmb();
10076
10077         fp_tx->tx_db.data.prod += 2;
10078         barrier();
10079         DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw);
10080
10081         mmiowb();
10082
10083         num_pkts++;
10084         fp_tx->tx_bd_prod += 2; /* start + pbd */
10085         bp->dev->trans_start = jiffies;
10086
10087         udelay(100);
10088
10089         tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10090         if (tx_idx != tx_start_idx + num_pkts)
10091                 goto test_loopback_exit;
10092
10093         rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10094         if (rx_idx != rx_start_idx + num_pkts)
10095                 goto test_loopback_exit;
10096
10097         cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
10098         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10099         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10100                 goto test_loopback_rx_exit;
10101
10102         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10103         if (len != pkt_size)
10104                 goto test_loopback_rx_exit;
10105
10106         rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
10107         skb = rx_buf->skb;
10108         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10109         for (i = ETH_HLEN; i < pkt_size; i++)
10110                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10111                         goto test_loopback_rx_exit;
10112
10113         rc = 0;
10114
10115 test_loopback_rx_exit:
10116
10117         fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10118         fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10119         fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10120         fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
10121
10122         /* Update producers */
10123         bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10124                              fp_rx->rx_sge_prod);
10125
10126 test_loopback_exit:
10127         bp->link_params.loopback_mode = LOOPBACK_NONE;
10128
10129         return rc;
10130 }
10131
10132 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10133 {
10134         int rc = 0, res;
10135
10136         if (!netif_running(bp->dev))
10137                 return BNX2X_LOOPBACK_FAILED;
10138
10139         bnx2x_netif_stop(bp, 1);
10140         bnx2x_acquire_phy_lock(bp);
10141
10142         res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10143         if (res) {
10144                 DP(NETIF_MSG_PROBE, "  PHY loopback failed  (res %d)\n", res);
10145                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
10146         }
10147
10148         res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10149         if (res) {
10150                 DP(NETIF_MSG_PROBE, "  MAC loopback failed  (res %d)\n", res);
10151                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
10152         }
10153
10154         bnx2x_release_phy_lock(bp);
10155         bnx2x_netif_start(bp);
10156
10157         return rc;
10158 }
10159
10160 #define CRC32_RESIDUAL                  0xdebb20e3
10161
10162 static int bnx2x_test_nvram(struct bnx2x *bp)
10163 {
10164         static const struct {
10165                 int offset;
10166                 int size;
10167         } nvram_tbl[] = {
10168                 {     0,  0x14 }, /* bootstrap */
10169                 {  0x14,  0xec }, /* dir */
10170                 { 0x100, 0x350 }, /* manuf_info */
10171                 { 0x450,  0xf0 }, /* feature_info */
10172                 { 0x640,  0x64 }, /* upgrade_key_info */
10173                 { 0x6a4,  0x64 },
10174                 { 0x708,  0x70 }, /* manuf_key_info */
10175                 { 0x778,  0x70 },
10176                 {     0,     0 }
10177         };
10178         __be32 buf[0x350 / 4];
10179         u8 *data = (u8 *)buf;
10180         int i, rc;
10181         u32 magic, crc;
10182
10183         rc = bnx2x_nvram_read(bp, 0, data, 4);
10184         if (rc) {
10185                 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
10186                 goto test_nvram_exit;
10187         }
10188
10189         magic = be32_to_cpu(buf[0]);
10190         if (magic != 0x669955aa) {
10191                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10192                 rc = -ENODEV;
10193                 goto test_nvram_exit;
10194         }
10195
10196         for (i = 0; nvram_tbl[i].size; i++) {
10197
10198                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10199                                       nvram_tbl[i].size);
10200                 if (rc) {
10201                         DP(NETIF_MSG_PROBE,
10202                            "nvram_tbl[%d] read data (rc %d)\n", i, rc);
10203                         goto test_nvram_exit;
10204                 }
10205
10206                 crc = ether_crc_le(nvram_tbl[i].size, data);
10207                 if (crc != CRC32_RESIDUAL) {
10208                         DP(NETIF_MSG_PROBE,
10209                            "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
10210                         rc = -ENODEV;
10211                         goto test_nvram_exit;
10212                 }
10213         }
10214
10215 test_nvram_exit:
10216         return rc;
10217 }
10218
10219 static int bnx2x_test_intr(struct bnx2x *bp)
10220 {
10221         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10222         int i, rc;
10223
10224         if (!netif_running(bp->dev))
10225                 return -ENODEV;
10226
10227         config->hdr.length = 0;
10228         if (CHIP_IS_E1(bp))
10229                 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10230         else
10231                 config->hdr.offset = BP_FUNC(bp);
10232         config->hdr.client_id = bp->fp->cl_id;
10233         config->hdr.reserved1 = 0;
10234
10235         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10236                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10237                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10238         if (rc == 0) {
10239                 bp->set_mac_pending++;
10240                 for (i = 0; i < 10; i++) {
10241                         if (!bp->set_mac_pending)
10242                                 break;
10243                         msleep_interruptible(10);
10244                 }
10245                 if (i == 10)
10246                         rc = -ENODEV;
10247         }
10248
10249         return rc;
10250 }
10251
10252 static void bnx2x_self_test(struct net_device *dev,
10253                             struct ethtool_test *etest, u64 *buf)
10254 {
10255         struct bnx2x *bp = netdev_priv(dev);
10256
10257         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10258
10259         if (!netif_running(dev))
10260                 return;
10261
10262         /* offline tests are not supported in MF mode */
10263         if (IS_E1HMF(bp))
10264                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10265
10266         if (etest->flags & ETH_TEST_FL_OFFLINE) {
10267                 int port = BP_PORT(bp);
10268                 u32 val;
10269                 u8 link_up;
10270
10271                 /* save current value of input enable for TX port IF */
10272                 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10273                 /* disable input for TX port IF */
10274                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10275
10276                 link_up = bp->link_vars.link_up;
10277                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10278                 bnx2x_nic_load(bp, LOAD_DIAG);
10279                 /* wait until link state is restored */
10280                 bnx2x_wait_for_link(bp, link_up);
10281
10282                 if (bnx2x_test_registers(bp) != 0) {
10283                         buf[0] = 1;
10284                         etest->flags |= ETH_TEST_FL_FAILED;
10285                 }
10286                 if (bnx2x_test_memory(bp) != 0) {
10287                         buf[1] = 1;
10288                         etest->flags |= ETH_TEST_FL_FAILED;
10289                 }
10290                 buf[2] = bnx2x_test_loopback(bp, link_up);
10291                 if (buf[2] != 0)
10292                         etest->flags |= ETH_TEST_FL_FAILED;
10293
10294                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10295
10296                 /* restore input for TX port IF */
10297                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10298
10299                 bnx2x_nic_load(bp, LOAD_NORMAL);
10300                 /* wait until link state is restored */
10301                 bnx2x_wait_for_link(bp, link_up);
10302         }
10303         if (bnx2x_test_nvram(bp) != 0) {
10304                 buf[3] = 1;
10305                 etest->flags |= ETH_TEST_FL_FAILED;
10306         }
10307         if (bnx2x_test_intr(bp) != 0) {
10308                 buf[4] = 1;
10309                 etest->flags |= ETH_TEST_FL_FAILED;
10310         }
10311         if (bp->port.pmf)
10312                 if (bnx2x_link_test(bp) != 0) {
10313                         buf[5] = 1;
10314                         etest->flags |= ETH_TEST_FL_FAILED;
10315                 }
10316
10317 #ifdef BNX2X_EXTRA_DEBUG
10318         bnx2x_panic_dump(bp);
10319 #endif
10320 }
10321
10322 static const struct {
10323         long offset;
10324         int size;
10325         u8 string[ETH_GSTRING_LEN];
10326 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10327 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10328         { Q_STATS_OFFSET32(error_bytes_received_hi),
10329                                                 8, "[%d]: rx_error_bytes" },
10330         { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10331                                                 8, "[%d]: rx_ucast_packets" },
10332         { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10333                                                 8, "[%d]: rx_mcast_packets" },
10334         { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10335                                                 8, "[%d]: rx_bcast_packets" },
10336         { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10337         { Q_STATS_OFFSET32(rx_err_discard_pkt),
10338                                          4, "[%d]: rx_phy_ip_err_discards"},
10339         { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10340                                          4, "[%d]: rx_skb_alloc_discard" },
10341         { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10342
10343 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10344         { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10345                                                         8, "[%d]: tx_packets" }
10346 };
10347
10348 static const struct {
10349         long offset;
10350         int size;
10351         u32 flags;
10352 #define STATS_FLAGS_PORT                1
10353 #define STATS_FLAGS_FUNC                2
10354 #define STATS_FLAGS_BOTH                (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
10355         u8 string[ETH_GSTRING_LEN];
10356 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
10357 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10358                                 8, STATS_FLAGS_BOTH, "rx_bytes" },
10359         { STATS_OFFSET32(error_bytes_received_hi),
10360                                 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
10361         { STATS_OFFSET32(total_unicast_packets_received_hi),
10362                                 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
10363         { STATS_OFFSET32(total_multicast_packets_received_hi),
10364                                 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
10365         { STATS_OFFSET32(total_broadcast_packets_received_hi),
10366                                 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
10367         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
10368                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
10369         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
10370                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
10371         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10372                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10373         { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10374                                 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10375 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10376                                 8, STATS_FLAGS_PORT, "rx_fragments" },
10377         { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10378                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
10379         { STATS_OFFSET32(no_buff_discard_hi),
10380                                 8, STATS_FLAGS_BOTH, "rx_discards" },
10381         { STATS_OFFSET32(mac_filter_discard),
10382                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10383         { STATS_OFFSET32(xxoverflow_discard),
10384                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10385         { STATS_OFFSET32(brb_drop_hi),
10386                                 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10387         { STATS_OFFSET32(brb_truncate_hi),
10388                                 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10389         { STATS_OFFSET32(pause_frames_received_hi),
10390                                 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10391         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10392                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10393         { STATS_OFFSET32(nig_timer_max),
10394                         4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10395 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10396                                 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10397         { STATS_OFFSET32(rx_skb_alloc_failed),
10398                                 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10399         { STATS_OFFSET32(hw_csum_err),
10400                                 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10401
10402         { STATS_OFFSET32(total_bytes_transmitted_hi),
10403                                 8, STATS_FLAGS_BOTH, "tx_bytes" },
10404         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10405                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10406         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10407                                 8, STATS_FLAGS_BOTH, "tx_packets" },
10408         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10409                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10410         { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10411                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
10412         { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
10413                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
10414         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
10415                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
10416 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
10417                                 8, STATS_FLAGS_PORT, "tx_deferred" },
10418         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
10419                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
10420         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
10421                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
10422         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
10423                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
10424         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
10425                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
10426         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
10427                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
10428         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
10429                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
10430         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
10431                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
10432         { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
10433                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
10434         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
10435                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
10436 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
10437                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
10438         { STATS_OFFSET32(pause_frames_sent_hi),
10439                                 8, STATS_FLAGS_PORT, "tx_pause_frames" }
10440 };
10441
10442 #define IS_PORT_STAT(i) \
10443         ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10444 #define IS_FUNC_STAT(i)         (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10445 #define IS_E1HMF_MODE_STAT(bp) \
10446                         (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
10447
10448 static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10449 {
10450         struct bnx2x *bp = netdev_priv(dev);
10451         int i, num_stats;
10452
10453         switch(stringset) {
10454         case ETH_SS_STATS:
10455                 if (is_multi(bp)) {
10456                         num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
10457                         if (!IS_E1HMF_MODE_STAT(bp))
10458                                 num_stats += BNX2X_NUM_STATS;
10459                 } else {
10460                         if (IS_E1HMF_MODE_STAT(bp)) {
10461                                 num_stats = 0;
10462                                 for (i = 0; i < BNX2X_NUM_STATS; i++)
10463                                         if (IS_FUNC_STAT(i))
10464                                                 num_stats++;
10465                         } else
10466                                 num_stats = BNX2X_NUM_STATS;
10467                 }
10468                 return num_stats;
10469
10470         case ETH_SS_TEST:
10471                 return BNX2X_NUM_TESTS;
10472
10473         default:
10474                 return -EINVAL;
10475         }
10476 }
10477
10478 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10479 {
10480         struct bnx2x *bp = netdev_priv(dev);
10481         int i, j, k;
10482
10483         switch (stringset) {
10484         case ETH_SS_STATS:
10485                 if (is_multi(bp)) {
10486                         k = 0;
10487                         for_each_rx_queue(bp, i) {
10488                                 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10489                                         sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10490                                                 bnx2x_q_stats_arr[j].string, i);
10491                                 k += BNX2X_NUM_Q_STATS;
10492                         }
10493                         if (IS_E1HMF_MODE_STAT(bp))
10494                                 break;
10495                         for (j = 0; j < BNX2X_NUM_STATS; j++)
10496                                 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10497                                        bnx2x_stats_arr[j].string);
10498                 } else {
10499                         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10500                                 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10501                                         continue;
10502                                 strcpy(buf + j*ETH_GSTRING_LEN,
10503                                        bnx2x_stats_arr[i].string);
10504                                 j++;
10505                         }
10506                 }
10507                 break;
10508
10509         case ETH_SS_TEST:
10510                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10511                 break;
10512         }
10513 }
10514
10515 static void bnx2x_get_ethtool_stats(struct net_device *dev,
10516                                     struct ethtool_stats *stats, u64 *buf)
10517 {
10518         struct bnx2x *bp = netdev_priv(dev);
10519         u32 *hw_stats, *offset;
10520         int i, j, k;
10521
10522         if (is_multi(bp)) {
10523                 k = 0;
10524                 for_each_rx_queue(bp, i) {
10525                         hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10526                         for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10527                                 if (bnx2x_q_stats_arr[j].size == 0) {
10528                                         /* skip this counter */
10529                                         buf[k + j] = 0;
10530                                         continue;
10531                                 }
10532                                 offset = (hw_stats +
10533                                           bnx2x_q_stats_arr[j].offset);
10534                                 if (bnx2x_q_stats_arr[j].size == 4) {
10535                                         /* 4-byte counter */
10536                                         buf[k + j] = (u64) *offset;
10537                                         continue;
10538                                 }
10539                                 /* 8-byte counter */
10540                                 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10541                         }
10542                         k += BNX2X_NUM_Q_STATS;
10543                 }
10544                 if (IS_E1HMF_MODE_STAT(bp))
10545                         return;
10546                 hw_stats = (u32 *)&bp->eth_stats;
10547                 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10548                         if (bnx2x_stats_arr[j].size == 0) {
10549                                 /* skip this counter */
10550                                 buf[k + j] = 0;
10551                                 continue;
10552                         }
10553                         offset = (hw_stats + bnx2x_stats_arr[j].offset);
10554                         if (bnx2x_stats_arr[j].size == 4) {
10555                                 /* 4-byte counter */
10556                                 buf[k + j] = (u64) *offset;
10557                                 continue;
10558                         }
10559                         /* 8-byte counter */
10560                         buf[k + j] = HILO_U64(*offset, *(offset + 1));
10561                 }
10562         } else {
10563                 hw_stats = (u32 *)&bp->eth_stats;
10564                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10565                         if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10566                                 continue;
10567                         if (bnx2x_stats_arr[i].size == 0) {
10568                                 /* skip this counter */
10569                                 buf[j] = 0;
10570                                 j++;
10571                                 continue;
10572                         }
10573                         offset = (hw_stats + bnx2x_stats_arr[i].offset);
10574                         if (bnx2x_stats_arr[i].size == 4) {
10575                                 /* 4-byte counter */
10576                                 buf[j] = (u64) *offset;
10577                                 j++;
10578                                 continue;
10579                         }
10580                         /* 8-byte counter */
10581                         buf[j] = HILO_U64(*offset, *(offset + 1));
10582                         j++;
10583                 }
10584         }
10585 }
10586
10587 static int bnx2x_phys_id(struct net_device *dev, u32 data)
10588 {
10589         struct bnx2x *bp = netdev_priv(dev);
10590         int port = BP_PORT(bp);
10591         int i;
10592
10593         if (!netif_running(dev))
10594                 return 0;
10595
10596         if (!bp->port.pmf)
10597                 return 0;
10598
10599         if (data == 0)
10600                 data = 2;
10601
10602         for (i = 0; i < (data * 2); i++) {
10603                 if ((i % 2) == 0)
10604                         bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
10605                                       bp->link_params.hw_led_mode,
10606                                       bp->link_params.chip_id);
10607                 else
10608                         bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
10609                                       bp->link_params.hw_led_mode,
10610                                       bp->link_params.chip_id);
10611
10612                 msleep_interruptible(500);
10613                 if (signal_pending(current))
10614                         break;
10615         }
10616
10617         if (bp->link_vars.link_up)
10618                 bnx2x_set_led(bp, port, LED_MODE_OPER,
10619                               bp->link_vars.line_speed,
10620                               bp->link_params.hw_led_mode,
10621                               bp->link_params.chip_id);
10622
10623         return 0;
10624 }
10625
10626 static const struct ethtool_ops bnx2x_ethtool_ops = {
10627         .get_settings           = bnx2x_get_settings,
10628         .set_settings           = bnx2x_set_settings,
10629         .get_drvinfo            = bnx2x_get_drvinfo,
10630         .get_regs_len           = bnx2x_get_regs_len,
10631         .get_regs               = bnx2x_get_regs,
10632         .get_wol                = bnx2x_get_wol,
10633         .set_wol                = bnx2x_set_wol,
10634         .get_msglevel           = bnx2x_get_msglevel,
10635         .set_msglevel           = bnx2x_set_msglevel,
10636         .nway_reset             = bnx2x_nway_reset,
10637         .get_link               = bnx2x_get_link,
10638         .get_eeprom_len         = bnx2x_get_eeprom_len,
10639         .get_eeprom             = bnx2x_get_eeprom,
10640         .set_eeprom             = bnx2x_set_eeprom,
10641         .get_coalesce           = bnx2x_get_coalesce,
10642         .set_coalesce           = bnx2x_set_coalesce,
10643         .get_ringparam          = bnx2x_get_ringparam,
10644         .set_ringparam          = bnx2x_set_ringparam,
10645         .get_pauseparam         = bnx2x_get_pauseparam,
10646         .set_pauseparam         = bnx2x_set_pauseparam,
10647         .get_rx_csum            = bnx2x_get_rx_csum,
10648         .set_rx_csum            = bnx2x_set_rx_csum,
10649         .get_tx_csum            = ethtool_op_get_tx_csum,
10650         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
10651         .set_flags              = bnx2x_set_flags,
10652         .get_flags              = ethtool_op_get_flags,
10653         .get_sg                 = ethtool_op_get_sg,
10654         .set_sg                 = ethtool_op_set_sg,
10655         .get_tso                = ethtool_op_get_tso,
10656         .set_tso                = bnx2x_set_tso,
10657         .self_test              = bnx2x_self_test,
10658         .get_sset_count         = bnx2x_get_sset_count,
10659         .get_strings            = bnx2x_get_strings,
10660         .phys_id                = bnx2x_phys_id,
10661         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
10662 };
10663
10664 /* end of ethtool_ops */
10665
10666 /****************************************************************************
10667 * General service functions
10668 ****************************************************************************/
10669
10670 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10671 {
10672         u16 pmcsr;
10673
10674         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10675
10676         switch (state) {
10677         case PCI_D0:
10678                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10679                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10680                                        PCI_PM_CTRL_PME_STATUS));
10681
10682                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10683                         /* delay required during transition out of D3hot */
10684                         msleep(20);
10685                 break;
10686
10687         case PCI_D3hot:
10688                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10689                 pmcsr |= 3;
10690
10691                 if (bp->wol)
10692                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10693
10694                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10695                                       pmcsr);
10696
10697                 /* No more memory access after this point until
10698                 * device is brought back to D0.
10699                 */
10700                 break;
10701
10702         default:
10703                 return -EINVAL;
10704         }
10705         return 0;
10706 }
10707
10708 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10709 {
10710         u16 rx_cons_sb;
10711
10712         /* Tell compiler that status block fields can change */
10713         barrier();
10714         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10715         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10716                 rx_cons_sb++;
10717         return (fp->rx_comp_cons != rx_cons_sb);
10718 }
10719
10720 /*
10721  * net_device service functions
10722  */
10723
10724 static int bnx2x_poll(struct napi_struct *napi, int budget)
10725 {
10726         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10727                                                  napi);
10728         struct bnx2x *bp = fp->bp;
10729         int work_done = 0;
10730
10731 #ifdef BNX2X_STOP_ON_ERROR
10732         if (unlikely(bp->panic))
10733                 goto poll_panic;
10734 #endif
10735
10736         prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10737         prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10738
10739         bnx2x_update_fpsb_idx(fp);
10740
10741         if (bnx2x_has_rx_work(fp)) {
10742                 work_done = bnx2x_rx_int(fp, budget);
10743
10744                 /* must not complete if we consumed full budget */
10745                 if (work_done >= budget)
10746                         goto poll_again;
10747         }
10748
10749         /* bnx2x_has_rx_work() reads the status block, thus we need to
10750          * ensure that status block indices have been actually read
10751          * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work)
10752          * so that we won't write the "newer" value of the status block to IGU
10753          * (if there was a DMA right after bnx2x_has_rx_work and
10754          * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10755          * may be postponed to right before bnx2x_ack_sb). In this case
10756          * there will never be another interrupt until there is another update
10757          * of the status block, while there is still unhandled work.
10758          */
10759         rmb();
10760
10761         if (!bnx2x_has_rx_work(fp)) {
10762 #ifdef BNX2X_STOP_ON_ERROR
10763 poll_panic:
10764 #endif
10765                 napi_complete(napi);
10766
10767                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10768                              le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
10769                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10770                              le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10771         }
10772
10773 poll_again:
10774         return work_done;
10775 }
10776
10777
10778 /* we split the first BD into headers and data BDs
10779  * to ease the pain of our fellow microcode engineers
10780  * we use one mapping for both BDs
10781  * So far this has only been observed to happen
10782  * in Other Operating Systems(TM)
10783  */
10784 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10785                                    struct bnx2x_fastpath *fp,
10786                                    struct sw_tx_bd *tx_buf,
10787                                    struct eth_tx_start_bd **tx_bd, u16 hlen,
10788                                    u16 bd_prod, int nbd)
10789 {
10790         struct eth_tx_start_bd *h_tx_bd = *tx_bd;
10791         struct eth_tx_bd *d_tx_bd;
10792         dma_addr_t mapping;
10793         int old_len = le16_to_cpu(h_tx_bd->nbytes);
10794
10795         /* first fix first BD */
10796         h_tx_bd->nbd = cpu_to_le16(nbd);
10797         h_tx_bd->nbytes = cpu_to_le16(hlen);
10798
10799         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10800            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10801            h_tx_bd->addr_lo, h_tx_bd->nbd);
10802
10803         /* now get a new data BD
10804          * (after the pbd) and fill it */
10805         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10806         d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
10807
10808         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10809                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10810
10811         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10812         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10813         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10814
10815         /* this marks the BD as one that has no individual mapping */
10816         tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
10817
10818         DP(NETIF_MSG_TX_QUEUED,
10819            "TSO split data size is %d (%x:%x)\n",
10820            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10821
10822         /* update tx_bd */
10823         *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
10824
10825         return bd_prod;
10826 }
10827
10828 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10829 {
10830         if (fix > 0)
10831                 csum = (u16) ~csum_fold(csum_sub(csum,
10832                                 csum_partial(t_header - fix, fix, 0)));
10833
10834         else if (fix < 0)
10835                 csum = (u16) ~csum_fold(csum_add(csum,
10836                                 csum_partial(t_header, -fix, 0)));
10837
10838         return swab16(csum);
10839 }
10840
10841 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10842 {
10843         u32 rc;
10844
10845         if (skb->ip_summed != CHECKSUM_PARTIAL)
10846                 rc = XMIT_PLAIN;
10847
10848         else {
10849                 if (skb->protocol == htons(ETH_P_IPV6)) {
10850                         rc = XMIT_CSUM_V6;
10851                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10852                                 rc |= XMIT_CSUM_TCP;
10853
10854                 } else {
10855                         rc = XMIT_CSUM_V4;
10856                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10857                                 rc |= XMIT_CSUM_TCP;
10858                 }
10859         }
10860
10861         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10862                 rc |= XMIT_GSO_V4;
10863
10864         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10865                 rc |= XMIT_GSO_V6;
10866
10867         return rc;
10868 }
10869
10870 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10871 /* check if packet requires linearization (packet is too fragmented)
10872    no need to check fragmentation if page size > 8K (there will be no
10873    violation to FW restrictions) */
10874 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10875                              u32 xmit_type)
10876 {
10877         int to_copy = 0;
10878         int hlen = 0;
10879         int first_bd_sz = 0;
10880
10881         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10882         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10883
10884                 if (xmit_type & XMIT_GSO) {
10885                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10886                         /* Check if LSO packet needs to be copied:
10887                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10888                         int wnd_size = MAX_FETCH_BD - 3;
10889                         /* Number of windows to check */
10890                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10891                         int wnd_idx = 0;
10892                         int frag_idx = 0;
10893                         u32 wnd_sum = 0;
10894
10895                         /* Headers length */
10896                         hlen = (int)(skb_transport_header(skb) - skb->data) +
10897                                 tcp_hdrlen(skb);
10898
10899                         /* Amount of data (w/o headers) on linear part of SKB*/
10900                         first_bd_sz = skb_headlen(skb) - hlen;
10901
10902                         wnd_sum  = first_bd_sz;
10903
10904                         /* Calculate the first sum - it's special */
10905                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10906                                 wnd_sum +=
10907                                         skb_shinfo(skb)->frags[frag_idx].size;
10908
10909                         /* If there was data on linear skb data - check it */
10910                         if (first_bd_sz > 0) {
10911                                 if (unlikely(wnd_sum < lso_mss)) {
10912                                         to_copy = 1;
10913                                         goto exit_lbl;
10914                                 }
10915
10916                                 wnd_sum -= first_bd_sz;
10917                         }
10918
10919                         /* Others are easier: run through the frag list and
10920                            check all windows */
10921                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10922                                 wnd_sum +=
10923                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10924
10925                                 if (unlikely(wnd_sum < lso_mss)) {
10926                                         to_copy = 1;
10927                                         break;
10928                                 }
10929                                 wnd_sum -=
10930                                         skb_shinfo(skb)->frags[wnd_idx].size;
10931                         }
10932                 } else {
10933                         /* in non-LSO too fragmented packet should always
10934                            be linearized */
10935                         to_copy = 1;
10936                 }
10937         }
10938
10939 exit_lbl:
10940         if (unlikely(to_copy))
10941                 DP(NETIF_MSG_TX_QUEUED,
10942                    "Linearization IS REQUIRED for %s packet. "
10943                    "num_frags %d  hlen %d  first_bd_sz %d\n",
10944                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10945                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10946
10947         return to_copy;
10948 }
10949 #endif
10950
10951 /* called with netif_tx_lock
10952  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
10953  * netif_wake_queue()
10954  */
10955 static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10956 {
10957         struct bnx2x *bp = netdev_priv(dev);
10958         struct bnx2x_fastpath *fp, *fp_stat;
10959         struct netdev_queue *txq;
10960         struct sw_tx_bd *tx_buf;
10961         struct eth_tx_start_bd *tx_start_bd;
10962         struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
10963         struct eth_tx_parse_bd *pbd = NULL;
10964         u16 pkt_prod, bd_prod;
10965         int nbd, fp_index;
10966         dma_addr_t mapping;
10967         u32 xmit_type = bnx2x_xmit_type(bp, skb);
10968         int i;
10969         u8 hlen = 0;
10970         __le16 pkt_size = 0;
10971
10972 #ifdef BNX2X_STOP_ON_ERROR
10973         if (unlikely(bp->panic))
10974                 return NETDEV_TX_BUSY;
10975 #endif
10976
10977         fp_index = skb_get_queue_mapping(skb);
10978         txq = netdev_get_tx_queue(dev, fp_index);
10979
10980         fp = &bp->fp[fp_index + bp->num_rx_queues];
10981         fp_stat = &bp->fp[fp_index];
10982
10983         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
10984                 fp_stat->eth_q_stats.driver_xoff++;
10985                 netif_tx_stop_queue(txq);
10986                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10987                 return NETDEV_TX_BUSY;
10988         }
10989
10990         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
10991            "  gso type %x  xmit_type %x\n",
10992            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10993            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10994
10995 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10996         /* First, check if we need to linearize the skb (due to FW
10997            restrictions). No need to check fragmentation if page size > 8K
10998            (there will be no violation to FW restrictions) */
10999         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
11000                 /* Statistics of linearization */
11001                 bp->lin_cnt++;
11002                 if (skb_linearize(skb) != 0) {
11003                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
11004                            "silently dropping this SKB\n");
11005                         dev_kfree_skb_any(skb);
11006                         return NETDEV_TX_OK;
11007                 }
11008         }
11009 #endif
11010
11011         /*
11012         Please read carefully. First we use one BD which we mark as start,
11013         then we have a parsing info BD (used for TSO or xsum),
11014         and only then we have the rest of the TSO BDs.
11015         (don't forget to mark the last one as last,
11016         and to unmap only AFTER you write to the BD ...)
11017         And above all, all pdb sizes are in words - NOT DWORDS!
11018         */
11019
11020         pkt_prod = fp->tx_pkt_prod++;
11021         bd_prod = TX_BD(fp->tx_bd_prod);
11022
11023         /* get a tx_buf and first BD */
11024         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
11025         tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
11026
11027         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11028         tx_start_bd->general_data = (UNICAST_ADDRESS <<
11029                                      ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
11030         /* header nbd */
11031         tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
11032
11033         /* remember the first BD of the packet */
11034         tx_buf->first_bd = fp->tx_bd_prod;
11035         tx_buf->skb = skb;
11036         tx_buf->flags = 0;
11037
11038         DP(NETIF_MSG_TX_QUEUED,
11039            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
11040            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
11041
11042 #ifdef BCM_VLAN
11043         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
11044             (bp->flags & HW_VLAN_TX_FLAG)) {
11045                 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
11046                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
11047         } else
11048 #endif
11049                 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11050
11051         /* turn on parsing and get a BD */
11052         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11053         pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
11054
11055         memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
11056
11057         if (xmit_type & XMIT_CSUM) {
11058                 hlen = (skb_network_header(skb) - skb->data) / 2;
11059
11060                 /* for now NS flag is not used in Linux */
11061                 pbd->global_data =
11062                         (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11063                                  ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
11064
11065                 pbd->ip_hlen = (skb_transport_header(skb) -
11066                                 skb_network_header(skb)) / 2;
11067
11068                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
11069
11070                 pbd->total_hlen = cpu_to_le16(hlen);
11071                 hlen = hlen*2;
11072
11073                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
11074
11075                 if (xmit_type & XMIT_CSUM_V4)
11076                         tx_start_bd->bd_flags.as_bitfield |=
11077                                                 ETH_TX_BD_FLAGS_IP_CSUM;
11078                 else
11079                         tx_start_bd->bd_flags.as_bitfield |=
11080                                                 ETH_TX_BD_FLAGS_IPV6;
11081
11082                 if (xmit_type & XMIT_CSUM_TCP) {
11083                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11084
11085                 } else {
11086                         s8 fix = SKB_CS_OFF(skb); /* signed! */
11087
11088                         pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
11089
11090                         DP(NETIF_MSG_TX_QUEUED,
11091                            "hlen %d  fix %d  csum before fix %x\n",
11092                            le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
11093
11094                         /* HW bug: fixup the CSUM */
11095                         pbd->tcp_pseudo_csum =
11096                                 bnx2x_csum_fix(skb_transport_header(skb),
11097                                                SKB_CS(skb), fix);
11098
11099                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11100                            pbd->tcp_pseudo_csum);
11101                 }
11102         }
11103
11104         mapping = pci_map_single(bp->pdev, skb->data,
11105                                  skb_headlen(skb), PCI_DMA_TODEVICE);
11106
11107         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11108         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11109         nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11110         tx_start_bd->nbd = cpu_to_le16(nbd);
11111         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11112         pkt_size = tx_start_bd->nbytes;
11113
11114         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
11115            "  nbytes %d  flags %x  vlan %x\n",
11116            tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11117            le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11118            tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
11119
11120         if (xmit_type & XMIT_GSO) {
11121
11122                 DP(NETIF_MSG_TX_QUEUED,
11123                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
11124                    skb->len, hlen, skb_headlen(skb),
11125                    skb_shinfo(skb)->gso_size);
11126
11127                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
11128
11129                 if (unlikely(skb_headlen(skb) > hlen))
11130                         bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11131                                                  hlen, bd_prod, ++nbd);
11132
11133                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11134                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
11135                 pbd->tcp_flags = pbd_tcp_flags(skb);
11136
11137                 if (xmit_type & XMIT_GSO_V4) {
11138                         pbd->ip_id = swab16(ip_hdr(skb)->id);
11139                         pbd->tcp_pseudo_csum =
11140                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11141                                                           ip_hdr(skb)->daddr,
11142                                                           0, IPPROTO_TCP, 0));
11143
11144                 } else
11145                         pbd->tcp_pseudo_csum =
11146                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11147                                                         &ipv6_hdr(skb)->daddr,
11148                                                         0, IPPROTO_TCP, 0));
11149
11150                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11151         }
11152         tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
11153
11154         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11155                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
11156
11157                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11158                 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11159                 if (total_pkt_bd == NULL)
11160                         total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11161
11162                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11163                                        frag->size, PCI_DMA_TODEVICE);
11164
11165                 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11166                 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11167                 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11168                 le16_add_cpu(&pkt_size, frag->size);
11169
11170                 DP(NETIF_MSG_TX_QUEUED,
11171                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
11172                    i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11173                    le16_to_cpu(tx_data_bd->nbytes));
11174         }
11175
11176         DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
11177
11178         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11179
11180         /* now send a tx doorbell, counting the next BD
11181          * if the packet contains or ends with it
11182          */
11183         if (TX_BD_POFF(bd_prod) < nbd)
11184                 nbd++;
11185
11186         if (total_pkt_bd != NULL)
11187                 total_pkt_bd->total_pkt_bytes = pkt_size;
11188
11189         if (pbd)
11190                 DP(NETIF_MSG_TX_QUEUED,
11191                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
11192                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
11193                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11194                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
11195                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
11196
11197         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
11198
11199         /*
11200          * Make sure that the BD data is updated before updating the producer
11201          * since FW might read the BD right after the producer is updated.
11202          * This is only applicable for weak-ordered memory model archs such
11203          * as IA-64. The following barrier is also mandatory since FW will
11204          * assumes packets must have BDs.
11205          */
11206         wmb();
11207
11208         fp->tx_db.data.prod += nbd;
11209         barrier();
11210         DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw);
11211
11212         mmiowb();
11213
11214         fp->tx_bd_prod += nbd;
11215
11216         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
11217                 netif_tx_stop_queue(txq);
11218                 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11219                    if we put Tx into XOFF state. */
11220                 smp_mb();
11221                 fp_stat->eth_q_stats.driver_xoff++;
11222                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
11223                         netif_tx_wake_queue(txq);
11224         }
11225         fp_stat->tx_pkt++;
11226
11227         return NETDEV_TX_OK;
11228 }
11229
11230 /* called with rtnl_lock */
11231 static int bnx2x_open(struct net_device *dev)
11232 {
11233         struct bnx2x *bp = netdev_priv(dev);
11234
11235         netif_carrier_off(dev);
11236
11237         bnx2x_set_power_state(bp, PCI_D0);
11238
11239         return bnx2x_nic_load(bp, LOAD_OPEN);
11240 }
11241
11242 /* called with rtnl_lock */
11243 static int bnx2x_close(struct net_device *dev)
11244 {
11245         struct bnx2x *bp = netdev_priv(dev);
11246
11247         /* Unload the driver, release IRQs */
11248         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11249         if (atomic_read(&bp->pdev->enable_cnt) == 1)
11250                 if (!CHIP_REV_IS_SLOW(bp))
11251                         bnx2x_set_power_state(bp, PCI_D3hot);
11252
11253         return 0;
11254 }
11255
11256 /* called with netif_tx_lock from dev_mcast.c */
11257 static void bnx2x_set_rx_mode(struct net_device *dev)
11258 {
11259         struct bnx2x *bp = netdev_priv(dev);
11260         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11261         int port = BP_PORT(bp);
11262
11263         if (bp->state != BNX2X_STATE_OPEN) {
11264                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11265                 return;
11266         }
11267
11268         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11269
11270         if (dev->flags & IFF_PROMISC)
11271                 rx_mode = BNX2X_RX_MODE_PROMISC;
11272
11273         else if ((dev->flags & IFF_ALLMULTI) ||
11274                  ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11275                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11276
11277         else { /* some multicasts */
11278                 if (CHIP_IS_E1(bp)) {
11279                         int i, old, offset;
11280                         struct dev_mc_list *mclist;
11281                         struct mac_configuration_cmd *config =
11282                                                 bnx2x_sp(bp, mcast_config);
11283
11284                         for (i = 0, mclist = dev->mc_list;
11285                              mclist && (i < dev->mc_count);
11286                              i++, mclist = mclist->next) {
11287
11288                                 config->config_table[i].
11289                                         cam_entry.msb_mac_addr =
11290                                         swab16(*(u16 *)&mclist->dmi_addr[0]);
11291                                 config->config_table[i].
11292                                         cam_entry.middle_mac_addr =
11293                                         swab16(*(u16 *)&mclist->dmi_addr[2]);
11294                                 config->config_table[i].
11295                                         cam_entry.lsb_mac_addr =
11296                                         swab16(*(u16 *)&mclist->dmi_addr[4]);
11297                                 config->config_table[i].cam_entry.flags =
11298                                                         cpu_to_le16(port);
11299                                 config->config_table[i].
11300                                         target_table_entry.flags = 0;
11301                                 config->config_table[i].target_table_entry.
11302                                         clients_bit_vector =
11303                                                 cpu_to_le32(1 << BP_L_ID(bp));
11304                                 config->config_table[i].
11305                                         target_table_entry.vlan_id = 0;
11306
11307                                 DP(NETIF_MSG_IFUP,
11308                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11309                                    config->config_table[i].
11310                                                 cam_entry.msb_mac_addr,
11311                                    config->config_table[i].
11312                                                 cam_entry.middle_mac_addr,
11313                                    config->config_table[i].
11314                                                 cam_entry.lsb_mac_addr);
11315                         }
11316                         old = config->hdr.length;
11317                         if (old > i) {
11318                                 for (; i < old; i++) {
11319                                         if (CAM_IS_INVALID(config->
11320                                                            config_table[i])) {
11321                                                 /* already invalidated */
11322                                                 break;
11323                                         }
11324                                         /* invalidate */
11325                                         CAM_INVALIDATE(config->
11326                                                        config_table[i]);
11327                                 }
11328                         }
11329
11330                         if (CHIP_REV_IS_SLOW(bp))
11331                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11332                         else
11333                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
11334
11335                         config->hdr.length = i;
11336                         config->hdr.offset = offset;
11337                         config->hdr.client_id = bp->fp->cl_id;
11338                         config->hdr.reserved1 = 0;
11339
11340                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11341                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11342                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11343                                       0);
11344                 } else { /* E1H */
11345                         /* Accept one or more multicasts */
11346                         struct dev_mc_list *mclist;
11347                         u32 mc_filter[MC_HASH_SIZE];
11348                         u32 crc, bit, regidx;
11349                         int i;
11350
11351                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11352
11353                         for (i = 0, mclist = dev->mc_list;
11354                              mclist && (i < dev->mc_count);
11355                              i++, mclist = mclist->next) {
11356
11357                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11358                                    mclist->dmi_addr);
11359
11360                                 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11361                                 bit = (crc >> 24) & 0xff;
11362                                 regidx = bit >> 5;
11363                                 bit &= 0x1f;
11364                                 mc_filter[regidx] |= (1 << bit);
11365                         }
11366
11367                         for (i = 0; i < MC_HASH_SIZE; i++)
11368                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11369                                        mc_filter[i]);
11370                 }
11371         }
11372
11373         bp->rx_mode = rx_mode;
11374         bnx2x_set_storm_rx_mode(bp);
11375 }
11376
11377 /* called with rtnl_lock */
11378 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11379 {
11380         struct sockaddr *addr = p;
11381         struct bnx2x *bp = netdev_priv(dev);
11382
11383         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
11384                 return -EINVAL;
11385
11386         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
11387         if (netif_running(dev)) {
11388                 if (CHIP_IS_E1(bp))
11389                         bnx2x_set_mac_addr_e1(bp, 1);
11390                 else
11391                         bnx2x_set_mac_addr_e1h(bp, 1);
11392         }
11393
11394         return 0;
11395 }
11396
11397 /* called with rtnl_lock */
11398 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11399                            int devad, u16 addr)
11400 {
11401         struct bnx2x *bp = netdev_priv(netdev);
11402         u16 value;
11403         int rc;
11404         u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11405
11406         DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11407            prtad, devad, addr);
11408
11409         if (prtad != bp->mdio.prtad) {
11410                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11411                    prtad, bp->mdio.prtad);
11412                 return -EINVAL;
11413         }
11414
11415         /* The HW expects different devad if CL22 is used */
11416         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11417
11418         bnx2x_acquire_phy_lock(bp);
11419         rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11420                              devad, addr, &value);
11421         bnx2x_release_phy_lock(bp);
11422         DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
11423
11424         if (!rc)
11425                 rc = value;
11426         return rc;
11427 }
11428
11429 /* called with rtnl_lock */
11430 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11431                             u16 addr, u16 value)
11432 {
11433         struct bnx2x *bp = netdev_priv(netdev);
11434         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11435         int rc;
11436
11437         DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11438                            " value 0x%x\n", prtad, devad, addr, value);
11439
11440         if (prtad != bp->mdio.prtad) {
11441                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11442                    prtad, bp->mdio.prtad);
11443                 return -EINVAL;
11444         }
11445
11446         /* The HW expects different devad if CL22 is used */
11447         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11448
11449         bnx2x_acquire_phy_lock(bp);
11450         rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11451                               devad, addr, value);
11452         bnx2x_release_phy_lock(bp);
11453         return rc;
11454 }
11455
11456 /* called with rtnl_lock */
11457 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11458 {
11459         struct bnx2x *bp = netdev_priv(dev);
11460         struct mii_ioctl_data *mdio = if_mii(ifr);
11461
11462         DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11463            mdio->phy_id, mdio->reg_num, mdio->val_in);
11464
11465         if (!netif_running(dev))
11466                 return -EAGAIN;
11467
11468         return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
11469 }
11470
11471 /* called with rtnl_lock */
11472 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11473 {
11474         struct bnx2x *bp = netdev_priv(dev);
11475         int rc = 0;
11476
11477         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11478             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11479                 return -EINVAL;
11480
11481         /* This does not race with packet allocation
11482          * because the actual alloc size is
11483          * only updated as part of load
11484          */
11485         dev->mtu = new_mtu;
11486
11487         if (netif_running(dev)) {
11488                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11489                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11490         }
11491
11492         return rc;
11493 }
11494
11495 static void bnx2x_tx_timeout(struct net_device *dev)
11496 {
11497         struct bnx2x *bp = netdev_priv(dev);
11498
11499 #ifdef BNX2X_STOP_ON_ERROR
11500         if (!bp->panic)
11501                 bnx2x_panic();
11502 #endif
11503         /* This allows the netif to be shutdown gracefully before resetting */
11504         schedule_work(&bp->reset_task);
11505 }
11506
11507 #ifdef BCM_VLAN
11508 /* called with rtnl_lock */
11509 static void bnx2x_vlan_rx_register(struct net_device *dev,
11510                                    struct vlan_group *vlgrp)
11511 {
11512         struct bnx2x *bp = netdev_priv(dev);
11513
11514         bp->vlgrp = vlgrp;
11515
11516         /* Set flags according to the required capabilities */
11517         bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11518
11519         if (dev->features & NETIF_F_HW_VLAN_TX)
11520                 bp->flags |= HW_VLAN_TX_FLAG;
11521
11522         if (dev->features & NETIF_F_HW_VLAN_RX)
11523                 bp->flags |= HW_VLAN_RX_FLAG;
11524
11525         if (netif_running(dev))
11526                 bnx2x_set_client_config(bp);
11527 }
11528
11529 #endif
11530
11531 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11532 static void poll_bnx2x(struct net_device *dev)
11533 {
11534         struct bnx2x *bp = netdev_priv(dev);
11535
11536         disable_irq(bp->pdev->irq);
11537         bnx2x_interrupt(bp->pdev->irq, dev);
11538         enable_irq(bp->pdev->irq);
11539 }
11540 #endif
11541
11542 static const struct net_device_ops bnx2x_netdev_ops = {
11543         .ndo_open               = bnx2x_open,
11544         .ndo_stop               = bnx2x_close,
11545         .ndo_start_xmit         = bnx2x_start_xmit,
11546         .ndo_set_multicast_list = bnx2x_set_rx_mode,
11547         .ndo_set_mac_address    = bnx2x_change_mac_addr,
11548         .ndo_validate_addr      = eth_validate_addr,
11549         .ndo_do_ioctl           = bnx2x_ioctl,
11550         .ndo_change_mtu         = bnx2x_change_mtu,
11551         .ndo_tx_timeout         = bnx2x_tx_timeout,
11552 #ifdef BCM_VLAN
11553         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
11554 #endif
11555 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11556         .ndo_poll_controller    = poll_bnx2x,
11557 #endif
11558 };
11559
11560 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11561                                     struct net_device *dev)
11562 {
11563         struct bnx2x *bp;
11564         int rc;
11565
11566         SET_NETDEV_DEV(dev, &pdev->dev);
11567         bp = netdev_priv(dev);
11568
11569         bp->dev = dev;
11570         bp->pdev = pdev;
11571         bp->flags = 0;
11572         bp->func = PCI_FUNC(pdev->devfn);
11573
11574         rc = pci_enable_device(pdev);
11575         if (rc) {
11576                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11577                 goto err_out;
11578         }
11579
11580         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11581                 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11582                        " aborting\n");
11583                 rc = -ENODEV;
11584                 goto err_out_disable;
11585         }
11586
11587         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11588                 printk(KERN_ERR PFX "Cannot find second PCI device"
11589                        " base address, aborting\n");
11590                 rc = -ENODEV;
11591                 goto err_out_disable;
11592         }
11593
11594         if (atomic_read(&pdev->enable_cnt) == 1) {
11595                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11596                 if (rc) {
11597                         printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11598                                " aborting\n");
11599                         goto err_out_disable;
11600                 }
11601
11602                 pci_set_master(pdev);
11603                 pci_save_state(pdev);
11604         }
11605
11606         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11607         if (bp->pm_cap == 0) {
11608                 printk(KERN_ERR PFX "Cannot find power management"
11609                        " capability, aborting\n");
11610                 rc = -EIO;
11611                 goto err_out_release;
11612         }
11613
11614         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11615         if (bp->pcie_cap == 0) {
11616                 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11617                        " aborting\n");
11618                 rc = -EIO;
11619                 goto err_out_release;
11620         }
11621
11622         if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
11623                 bp->flags |= USING_DAC_FLAG;
11624                 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
11625                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11626                                " failed, aborting\n");
11627                         rc = -EIO;
11628                         goto err_out_release;
11629                 }
11630
11631         } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
11632                 printk(KERN_ERR PFX "System does not support DMA,"
11633                        " aborting\n");
11634                 rc = -EIO;
11635                 goto err_out_release;
11636         }
11637
11638         dev->mem_start = pci_resource_start(pdev, 0);
11639         dev->base_addr = dev->mem_start;
11640         dev->mem_end = pci_resource_end(pdev, 0);
11641
11642         dev->irq = pdev->irq;
11643
11644         bp->regview = pci_ioremap_bar(pdev, 0);
11645         if (!bp->regview) {
11646                 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11647                 rc = -ENOMEM;
11648                 goto err_out_release;
11649         }
11650
11651         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11652                                         min_t(u64, BNX2X_DB_SIZE,
11653                                               pci_resource_len(pdev, 2)));
11654         if (!bp->doorbells) {
11655                 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11656                 rc = -ENOMEM;
11657                 goto err_out_unmap;
11658         }
11659
11660         bnx2x_set_power_state(bp, PCI_D0);
11661
11662         /* clean indirect addresses */
11663         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11664                                PCICFG_VENDOR_ID_OFFSET);
11665         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11666         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11667         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11668         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11669
11670         dev->watchdog_timeo = TX_TIMEOUT;
11671
11672         dev->netdev_ops = &bnx2x_netdev_ops;
11673         dev->ethtool_ops = &bnx2x_ethtool_ops;
11674         dev->features |= NETIF_F_SG;
11675         dev->features |= NETIF_F_HW_CSUM;
11676         if (bp->flags & USING_DAC_FLAG)
11677                 dev->features |= NETIF_F_HIGHDMA;
11678         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11679         dev->features |= NETIF_F_TSO6;
11680 #ifdef BCM_VLAN
11681         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
11682         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11683
11684         dev->vlan_features |= NETIF_F_SG;
11685         dev->vlan_features |= NETIF_F_HW_CSUM;
11686         if (bp->flags & USING_DAC_FLAG)
11687                 dev->vlan_features |= NETIF_F_HIGHDMA;
11688         dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11689         dev->vlan_features |= NETIF_F_TSO6;
11690 #endif
11691
11692         /* get_port_hwinfo() will set prtad and mmds properly */
11693         bp->mdio.prtad = MDIO_PRTAD_NONE;
11694         bp->mdio.mmds = 0;
11695         bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11696         bp->mdio.dev = dev;
11697         bp->mdio.mdio_read = bnx2x_mdio_read;
11698         bp->mdio.mdio_write = bnx2x_mdio_write;
11699
11700         return 0;
11701
11702 err_out_unmap:
11703         if (bp->regview) {
11704                 iounmap(bp->regview);
11705                 bp->regview = NULL;
11706         }
11707         if (bp->doorbells) {
11708                 iounmap(bp->doorbells);
11709                 bp->doorbells = NULL;
11710         }
11711
11712 err_out_release:
11713         if (atomic_read(&pdev->enable_cnt) == 1)
11714                 pci_release_regions(pdev);
11715
11716 err_out_disable:
11717         pci_disable_device(pdev);
11718         pci_set_drvdata(pdev, NULL);
11719
11720 err_out:
11721         return rc;
11722 }
11723
11724 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11725                                                  int *width, int *speed)
11726 {
11727         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11728
11729         *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11730
11731         /* return value of 1=2.5GHz 2=5GHz */
11732         *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11733 }
11734
11735 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11736 {
11737         const struct firmware *firmware = bp->firmware;
11738         struct bnx2x_fw_file_hdr *fw_hdr;
11739         struct bnx2x_fw_file_section *sections;
11740         u32 offset, len, num_ops;
11741         u16 *ops_offsets;
11742         int i;
11743         const u8 *fw_ver;
11744
11745         if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11746                 return -EINVAL;
11747
11748         fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11749         sections = (struct bnx2x_fw_file_section *)fw_hdr;
11750
11751         /* Make sure none of the offsets and sizes make us read beyond
11752          * the end of the firmware data */
11753         for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11754                 offset = be32_to_cpu(sections[i].offset);
11755                 len = be32_to_cpu(sections[i].len);
11756                 if (offset + len > firmware->size) {
11757                         printk(KERN_ERR PFX "Section %d length is out of "
11758                                             "bounds\n", i);
11759                         return -EINVAL;
11760                 }
11761         }
11762
11763         /* Likewise for the init_ops offsets */
11764         offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11765         ops_offsets = (u16 *)(firmware->data + offset);
11766         num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11767
11768         for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11769                 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11770                         printk(KERN_ERR PFX "Section offset %d is out of "
11771                                             "bounds\n", i);
11772                         return -EINVAL;
11773                 }
11774         }
11775
11776         /* Check FW version */
11777         offset = be32_to_cpu(fw_hdr->fw_version.offset);
11778         fw_ver = firmware->data + offset;
11779         if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11780             (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11781             (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11782             (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11783                 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11784                                     " Should be %d.%d.%d.%d\n",
11785                        fw_ver[0], fw_ver[1], fw_ver[2],
11786                        fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11787                        BCM_5710_FW_MINOR_VERSION,
11788                        BCM_5710_FW_REVISION_VERSION,
11789                        BCM_5710_FW_ENGINEERING_VERSION);
11790                 return -EINVAL;
11791         }
11792
11793         return 0;
11794 }
11795
11796 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11797 {
11798         const __be32 *source = (const __be32 *)_source;
11799         u32 *target = (u32 *)_target;
11800         u32 i;
11801
11802         for (i = 0; i < n/4; i++)
11803                 target[i] = be32_to_cpu(source[i]);
11804 }
11805
11806 /*
11807    Ops array is stored in the following format:
11808    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11809  */
11810 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11811 {
11812         const __be32 *source = (const __be32 *)_source;
11813         struct raw_op *target = (struct raw_op *)_target;
11814         u32 i, j, tmp;
11815
11816         for (i = 0, j = 0; i < n/8; i++, j += 2) {
11817                 tmp = be32_to_cpu(source[j]);
11818                 target[i].op = (tmp >> 24) & 0xff;
11819                 target[i].offset =  tmp & 0xffffff;
11820                 target[i].raw_data = be32_to_cpu(source[j+1]);
11821         }
11822 }
11823
11824 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11825 {
11826         const __be16 *source = (const __be16 *)_source;
11827         u16 *target = (u16 *)_target;
11828         u32 i;
11829
11830         for (i = 0; i < n/2; i++)
11831                 target[i] = be16_to_cpu(source[i]);
11832 }
11833
11834 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11835         do { \
11836                 u32 len = be32_to_cpu(fw_hdr->arr.len); \
11837                 bp->arr = kmalloc(len, GFP_KERNEL); \
11838                 if (!bp->arr) { \
11839                         printk(KERN_ERR PFX "Failed to allocate %d bytes " \
11840                                             "for "#arr"\n", len); \
11841                         goto lbl; \
11842                 } \
11843                 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
11844                      (u8 *)bp->arr, len); \
11845         } while (0)
11846
11847 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11848 {
11849         char fw_file_name[40] = {0};
11850         struct bnx2x_fw_file_hdr *fw_hdr;
11851         int rc, offset;
11852
11853         /* Create a FW file name */
11854         if (CHIP_IS_E1(bp))
11855                 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
11856         else
11857                 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
11858
11859         sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11860                 BCM_5710_FW_MAJOR_VERSION,
11861                 BCM_5710_FW_MINOR_VERSION,
11862                 BCM_5710_FW_REVISION_VERSION,
11863                 BCM_5710_FW_ENGINEERING_VERSION);
11864
11865         printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11866
11867         rc = request_firmware(&bp->firmware, fw_file_name, dev);
11868         if (rc) {
11869                 printk(KERN_ERR PFX "Can't load firmware file %s\n",
11870                        fw_file_name);
11871                 goto request_firmware_exit;
11872         }
11873
11874         rc = bnx2x_check_firmware(bp);
11875         if (rc) {
11876                 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
11877                 goto request_firmware_exit;
11878         }
11879
11880         fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11881
11882         /* Initialize the pointers to the init arrays */
11883         /* Blob */
11884         BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11885
11886         /* Opcodes */
11887         BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11888
11889         /* Offsets */
11890         BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
11891                             be16_to_cpu_n);
11892
11893         /* STORMs firmware */
11894         INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
11895                         be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11896         INIT_TSEM_PRAM_DATA(bp)      = bp->firmware->data +
11897                         be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11898         INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
11899                         be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11900         INIT_USEM_PRAM_DATA(bp)      = bp->firmware->data +
11901                         be32_to_cpu(fw_hdr->usem_pram_data.offset);
11902         INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
11903                         be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11904         INIT_XSEM_PRAM_DATA(bp)      = bp->firmware->data +
11905                         be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11906         INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
11907                         be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11908         INIT_CSEM_PRAM_DATA(bp)      = bp->firmware->data +
11909                         be32_to_cpu(fw_hdr->csem_pram_data.offset);
11910
11911         return 0;
11912
11913 init_offsets_alloc_err:
11914         kfree(bp->init_ops);
11915 init_ops_alloc_err:
11916         kfree(bp->init_data);
11917 request_firmware_exit:
11918         release_firmware(bp->firmware);
11919
11920         return rc;
11921 }
11922
11923
11924 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11925                                     const struct pci_device_id *ent)
11926 {
11927         struct net_device *dev = NULL;
11928         struct bnx2x *bp;
11929         int pcie_width, pcie_speed;
11930         int rc;
11931
11932         /* dev zeroed in init_etherdev */
11933         dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
11934         if (!dev) {
11935                 printk(KERN_ERR PFX "Cannot allocate net device\n");
11936                 return -ENOMEM;
11937         }
11938
11939         bp = netdev_priv(dev);
11940         bp->msglevel = debug;
11941
11942         pci_set_drvdata(pdev, dev);
11943
11944         rc = bnx2x_init_dev(pdev, dev);
11945         if (rc < 0) {
11946                 free_netdev(dev);
11947                 return rc;
11948         }
11949
11950         rc = bnx2x_init_bp(bp);
11951         if (rc)
11952                 goto init_one_exit;
11953
11954         /* Set init arrays */
11955         rc = bnx2x_init_firmware(bp, &pdev->dev);
11956         if (rc) {
11957                 printk(KERN_ERR PFX "Error loading firmware\n");
11958                 goto init_one_exit;
11959         }
11960
11961         rc = register_netdev(dev);
11962         if (rc) {
11963                 dev_err(&pdev->dev, "Cannot register net device\n");
11964                 goto init_one_exit;
11965         }
11966
11967         bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
11968         printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
11969                " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
11970                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
11971                pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
11972                dev->base_addr, bp->pdev->irq);
11973         printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
11974
11975         return 0;
11976
11977 init_one_exit:
11978         if (bp->regview)
11979                 iounmap(bp->regview);
11980
11981         if (bp->doorbells)
11982                 iounmap(bp->doorbells);
11983
11984         free_netdev(dev);
11985
11986         if (atomic_read(&pdev->enable_cnt) == 1)
11987                 pci_release_regions(pdev);
11988
11989         pci_disable_device(pdev);
11990         pci_set_drvdata(pdev, NULL);
11991
11992         return rc;
11993 }
11994
11995 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11996 {
11997         struct net_device *dev = pci_get_drvdata(pdev);
11998         struct bnx2x *bp;
11999
12000         if (!dev) {
12001                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12002                 return;
12003         }
12004         bp = netdev_priv(dev);
12005
12006         unregister_netdev(dev);
12007
12008         kfree(bp->init_ops_offsets);
12009         kfree(bp->init_ops);
12010         kfree(bp->init_data);
12011         release_firmware(bp->firmware);
12012
12013         if (bp->regview)
12014                 iounmap(bp->regview);
12015
12016         if (bp->doorbells)
12017                 iounmap(bp->doorbells);
12018
12019         free_netdev(dev);
12020
12021         if (atomic_read(&pdev->enable_cnt) == 1)
12022                 pci_release_regions(pdev);
12023
12024         pci_disable_device(pdev);
12025         pci_set_drvdata(pdev, NULL);
12026 }
12027
12028 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12029 {
12030         struct net_device *dev = pci_get_drvdata(pdev);
12031         struct bnx2x *bp;
12032
12033         if (!dev) {
12034                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12035                 return -ENODEV;
12036         }
12037         bp = netdev_priv(dev);
12038
12039         rtnl_lock();
12040
12041         pci_save_state(pdev);
12042
12043         if (!netif_running(dev)) {
12044                 rtnl_unlock();
12045                 return 0;
12046         }
12047
12048         netif_device_detach(dev);
12049
12050         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
12051
12052         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
12053
12054         rtnl_unlock();
12055
12056         return 0;
12057 }
12058
12059 static int bnx2x_resume(struct pci_dev *pdev)
12060 {
12061         struct net_device *dev = pci_get_drvdata(pdev);
12062         struct bnx2x *bp;
12063         int rc;
12064
12065         if (!dev) {
12066                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12067                 return -ENODEV;
12068         }
12069         bp = netdev_priv(dev);
12070
12071         rtnl_lock();
12072
12073         pci_restore_state(pdev);
12074
12075         if (!netif_running(dev)) {
12076                 rtnl_unlock();
12077                 return 0;
12078         }
12079
12080         bnx2x_set_power_state(bp, PCI_D0);
12081         netif_device_attach(dev);
12082
12083         rc = bnx2x_nic_load(bp, LOAD_OPEN);
12084
12085         rtnl_unlock();
12086
12087         return rc;
12088 }
12089
12090 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12091 {
12092         int i;
12093
12094         bp->state = BNX2X_STATE_ERROR;
12095
12096         bp->rx_mode = BNX2X_RX_MODE_NONE;
12097
12098         bnx2x_netif_stop(bp, 0);
12099
12100         del_timer_sync(&bp->timer);
12101         bp->stats_state = STATS_STATE_DISABLED;
12102         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12103
12104         /* Release IRQs */
12105         bnx2x_free_irq(bp);
12106
12107         if (CHIP_IS_E1(bp)) {
12108                 struct mac_configuration_cmd *config =
12109                                                 bnx2x_sp(bp, mcast_config);
12110
12111                 for (i = 0; i < config->hdr.length; i++)
12112                         CAM_INVALIDATE(config->config_table[i]);
12113         }
12114
12115         /* Free SKBs, SGEs, TPA pool and driver internals */
12116         bnx2x_free_skbs(bp);
12117         for_each_rx_queue(bp, i)
12118                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
12119         for_each_rx_queue(bp, i)
12120                 netif_napi_del(&bnx2x_fp(bp, i, napi));
12121         bnx2x_free_mem(bp);
12122
12123         bp->state = BNX2X_STATE_CLOSED;
12124
12125         netif_carrier_off(bp->dev);
12126
12127         return 0;
12128 }
12129
12130 static void bnx2x_eeh_recover(struct bnx2x *bp)
12131 {
12132         u32 val;
12133
12134         mutex_init(&bp->port.phy_mutex);
12135
12136         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12137         bp->link_params.shmem_base = bp->common.shmem_base;
12138         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12139
12140         if (!bp->common.shmem_base ||
12141             (bp->common.shmem_base < 0xA0000) ||
12142             (bp->common.shmem_base >= 0xC0000)) {
12143                 BNX2X_DEV_INFO("MCP not active\n");
12144                 bp->flags |= NO_MCP_FLAG;
12145                 return;
12146         }
12147
12148         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12149         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12150                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12151                 BNX2X_ERR("BAD MCP validity signature\n");
12152
12153         if (!BP_NOMCP(bp)) {
12154                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12155                               & DRV_MSG_SEQ_NUMBER_MASK);
12156                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12157         }
12158 }
12159
12160 /**
12161  * bnx2x_io_error_detected - called when PCI error is detected
12162  * @pdev: Pointer to PCI device
12163  * @state: The current pci connection state
12164  *
12165  * This function is called after a PCI bus error affecting
12166  * this device has been detected.
12167  */
12168 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12169                                                 pci_channel_state_t state)
12170 {
12171         struct net_device *dev = pci_get_drvdata(pdev);
12172         struct bnx2x *bp = netdev_priv(dev);
12173
12174         rtnl_lock();
12175
12176         netif_device_detach(dev);
12177
12178         if (state == pci_channel_io_perm_failure) {
12179                 rtnl_unlock();
12180                 return PCI_ERS_RESULT_DISCONNECT;
12181         }
12182
12183         if (netif_running(dev))
12184                 bnx2x_eeh_nic_unload(bp);
12185
12186         pci_disable_device(pdev);
12187
12188         rtnl_unlock();
12189
12190         /* Request a slot reset */
12191         return PCI_ERS_RESULT_NEED_RESET;
12192 }
12193
12194 /**
12195  * bnx2x_io_slot_reset - called after the PCI bus has been reset
12196  * @pdev: Pointer to PCI device
12197  *
12198  * Restart the card from scratch, as if from a cold-boot.
12199  */
12200 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12201 {
12202         struct net_device *dev = pci_get_drvdata(pdev);
12203         struct bnx2x *bp = netdev_priv(dev);
12204
12205         rtnl_lock();
12206
12207         if (pci_enable_device(pdev)) {
12208                 dev_err(&pdev->dev,
12209                         "Cannot re-enable PCI device after reset\n");
12210                 rtnl_unlock();
12211                 return PCI_ERS_RESULT_DISCONNECT;
12212         }
12213
12214         pci_set_master(pdev);
12215         pci_restore_state(pdev);
12216
12217         if (netif_running(dev))
12218                 bnx2x_set_power_state(bp, PCI_D0);
12219
12220         rtnl_unlock();
12221
12222         return PCI_ERS_RESULT_RECOVERED;
12223 }
12224
12225 /**
12226  * bnx2x_io_resume - called when traffic can start flowing again
12227  * @pdev: Pointer to PCI device
12228  *
12229  * This callback is called when the error recovery driver tells us that
12230  * its OK to resume normal operation.
12231  */
12232 static void bnx2x_io_resume(struct pci_dev *pdev)
12233 {
12234         struct net_device *dev = pci_get_drvdata(pdev);
12235         struct bnx2x *bp = netdev_priv(dev);
12236
12237         rtnl_lock();
12238
12239         bnx2x_eeh_recover(bp);
12240
12241         if (netif_running(dev))
12242                 bnx2x_nic_load(bp, LOAD_NORMAL);
12243
12244         netif_device_attach(dev);
12245
12246         rtnl_unlock();
12247 }
12248
12249 static struct pci_error_handlers bnx2x_err_handler = {
12250         .error_detected = bnx2x_io_error_detected,
12251         .slot_reset     = bnx2x_io_slot_reset,
12252         .resume         = bnx2x_io_resume,
12253 };
12254
12255 static struct pci_driver bnx2x_pci_driver = {
12256         .name        = DRV_MODULE_NAME,
12257         .id_table    = bnx2x_pci_tbl,
12258         .probe       = bnx2x_init_one,
12259         .remove      = __devexit_p(bnx2x_remove_one),
12260         .suspend     = bnx2x_suspend,
12261         .resume      = bnx2x_resume,
12262         .err_handler = &bnx2x_err_handler,
12263 };
12264
12265 static int __init bnx2x_init(void)
12266 {
12267         int ret;
12268
12269         printk(KERN_INFO "%s", version);
12270
12271         bnx2x_wq = create_singlethread_workqueue("bnx2x");
12272         if (bnx2x_wq == NULL) {
12273                 printk(KERN_ERR PFX "Cannot create workqueue\n");
12274                 return -ENOMEM;
12275         }
12276
12277         ret = pci_register_driver(&bnx2x_pci_driver);
12278         if (ret) {
12279                 printk(KERN_ERR PFX "Cannot register driver\n");
12280                 destroy_workqueue(bnx2x_wq);
12281         }
12282         return ret;
12283 }
12284
12285 static void __exit bnx2x_cleanup(void)
12286 {
12287         pci_unregister_driver(&bnx2x_pci_driver);
12288
12289         destroy_workqueue(bnx2x_wq);
12290 }
12291
12292 module_init(bnx2x_init);
12293 module_exit(bnx2x_cleanup);
12294
12295