bnx2x: Handle Rx and Tx together in NAPI
[safe/jmp/linux-2.6] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52 #include <linux/stringify.h>
53
54
55 #include "bnx2x.h"
56 #include "bnx2x_init.h"
57 #include "bnx2x_init_ops.h"
58 #include "bnx2x_dump.h"
59
60 #define DRV_MODULE_VERSION      "1.52.1-5"
61 #define DRV_MODULE_RELDATE      "2009/11/09"
62 #define BNX2X_BC_VER            0x040200
63
64 #include <linux/firmware.h>
65 #include "bnx2x_fw_file_hdr.h"
66 /* FW files */
67 #define FW_FILE_VERSION                                 \
68         __stringify(BCM_5710_FW_MAJOR_VERSION) "."      \
69         __stringify(BCM_5710_FW_MINOR_VERSION) "."      \
70         __stringify(BCM_5710_FW_REVISION_VERSION) "."   \
71         __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72 #define FW_FILE_NAME_E1         "bnx2x-e1-" FW_FILE_VERSION ".fw"
73 #define FW_FILE_NAME_E1H        "bnx2x-e1h-" FW_FILE_VERSION ".fw"
74
75 /* Time in jiffies before concluding the transmitter is hung */
76 #define TX_TIMEOUT              (5*HZ)
77
78 static char version[] __devinitdata =
79         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
80         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
81
82 MODULE_AUTHOR("Eliezer Tamir");
83 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
84 MODULE_LICENSE("GPL");
85 MODULE_VERSION(DRV_MODULE_VERSION);
86 MODULE_FIRMWARE(FW_FILE_NAME_E1);
87 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
88
89 static int multi_mode = 1;
90 module_param(multi_mode, int, 0);
91 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92                              "(0 Disable; 1 Enable (default))");
93
94 static int num_queues;
95 module_param(num_queues, int, 0);
96 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97                                 " (default is as a number of CPUs)");
98
99 static int disable_tpa;
100 module_param(disable_tpa, int, 0);
101 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
102
103 static int int_mode;
104 module_param(int_mode, int, 0);
105 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
106
107 static int dropless_fc;
108 module_param(dropless_fc, int, 0);
109 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
110
111 static int poll;
112 module_param(poll, int, 0);
113 MODULE_PARM_DESC(poll, " Use polling (for debug)");
114
115 static int mrrs = -1;
116 module_param(mrrs, int, 0);
117 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
118
119 static int debug;
120 module_param(debug, int, 0);
121 MODULE_PARM_DESC(debug, " Default debug msglevel");
122
123 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
124
125 static struct workqueue_struct *bnx2x_wq;
126
127 enum bnx2x_board_type {
128         BCM57710 = 0,
129         BCM57711 = 1,
130         BCM57711E = 2,
131 };
132
133 /* indexed by board_type, above */
134 static struct {
135         char *name;
136 } board_info[] __devinitdata = {
137         { "Broadcom NetXtreme II BCM57710 XGb" },
138         { "Broadcom NetXtreme II BCM57711 XGb" },
139         { "Broadcom NetXtreme II BCM57711E XGb" }
140 };
141
142
143 static const struct pci_device_id bnx2x_pci_tbl[] = {
144         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
145         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
146         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
147         { 0 }
148 };
149
150 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
151
152 /****************************************************************************
153 * General service functions
154 ****************************************************************************/
155
156 /* used only at init
157  * locking is done by mcp
158  */
159 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
160 {
161         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
162         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
163         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
164                                PCICFG_VENDOR_ID_OFFSET);
165 }
166
167 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
168 {
169         u32 val;
170
171         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
172         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
173         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
174                                PCICFG_VENDOR_ID_OFFSET);
175
176         return val;
177 }
178
179 static const u32 dmae_reg_go_c[] = {
180         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
181         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
182         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
183         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
184 };
185
186 /* copy command into DMAE command memory and set DMAE command go */
187 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
188                             int idx)
189 {
190         u32 cmd_offset;
191         int i;
192
193         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
194         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
195                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
196
197                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
198                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
199         }
200         REG_WR(bp, dmae_reg_go_c[idx], 1);
201 }
202
203 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
204                       u32 len32)
205 {
206         struct dmae_command dmae;
207         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
208         int cnt = 200;
209
210         if (!bp->dmae_ready) {
211                 u32 *data = bnx2x_sp(bp, wb_data[0]);
212
213                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
214                    "  using indirect\n", dst_addr, len32);
215                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
216                 return;
217         }
218
219         memset(&dmae, 0, sizeof(struct dmae_command));
220
221         dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
222                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
223                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
224 #ifdef __BIG_ENDIAN
225                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
226 #else
227                        DMAE_CMD_ENDIANITY_DW_SWAP |
228 #endif
229                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
230                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
231         dmae.src_addr_lo = U64_LO(dma_addr);
232         dmae.src_addr_hi = U64_HI(dma_addr);
233         dmae.dst_addr_lo = dst_addr >> 2;
234         dmae.dst_addr_hi = 0;
235         dmae.len = len32;
236         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
237         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
238         dmae.comp_val = DMAE_COMP_VAL;
239
240         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
241            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
242                     "dst_addr [%x:%08x (%08x)]\n"
243            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
244            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
245            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
246            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
247         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
248            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
249            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
250
251         mutex_lock(&bp->dmae_mutex);
252
253         *wb_comp = 0;
254
255         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
256
257         udelay(5);
258
259         while (*wb_comp != DMAE_COMP_VAL) {
260                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
261
262                 if (!cnt) {
263                         BNX2X_ERR("DMAE timeout!\n");
264                         break;
265                 }
266                 cnt--;
267                 /* adjust delay for emulation/FPGA */
268                 if (CHIP_REV_IS_SLOW(bp))
269                         msleep(100);
270                 else
271                         udelay(5);
272         }
273
274         mutex_unlock(&bp->dmae_mutex);
275 }
276
277 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
278 {
279         struct dmae_command dmae;
280         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
281         int cnt = 200;
282
283         if (!bp->dmae_ready) {
284                 u32 *data = bnx2x_sp(bp, wb_data[0]);
285                 int i;
286
287                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
288                    "  using indirect\n", src_addr, len32);
289                 for (i = 0; i < len32; i++)
290                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
291                 return;
292         }
293
294         memset(&dmae, 0, sizeof(struct dmae_command));
295
296         dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
297                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
298                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
299 #ifdef __BIG_ENDIAN
300                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
301 #else
302                        DMAE_CMD_ENDIANITY_DW_SWAP |
303 #endif
304                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
305                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
306         dmae.src_addr_lo = src_addr >> 2;
307         dmae.src_addr_hi = 0;
308         dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
309         dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
310         dmae.len = len32;
311         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
312         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
313         dmae.comp_val = DMAE_COMP_VAL;
314
315         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
316            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
317                     "dst_addr [%x:%08x (%08x)]\n"
318            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
319            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
320            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
321            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
322
323         mutex_lock(&bp->dmae_mutex);
324
325         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
326         *wb_comp = 0;
327
328         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
329
330         udelay(5);
331
332         while (*wb_comp != DMAE_COMP_VAL) {
333
334                 if (!cnt) {
335                         BNX2X_ERR("DMAE timeout!\n");
336                         break;
337                 }
338                 cnt--;
339                 /* adjust delay for emulation/FPGA */
340                 if (CHIP_REV_IS_SLOW(bp))
341                         msleep(100);
342                 else
343                         udelay(5);
344         }
345         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
346            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
347            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
348
349         mutex_unlock(&bp->dmae_mutex);
350 }
351
352 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
353                                u32 addr, u32 len)
354 {
355         int offset = 0;
356
357         while (len > DMAE_LEN32_WR_MAX) {
358                 bnx2x_write_dmae(bp, phys_addr + offset,
359                                  addr + offset, DMAE_LEN32_WR_MAX);
360                 offset += DMAE_LEN32_WR_MAX * 4;
361                 len -= DMAE_LEN32_WR_MAX;
362         }
363
364         bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
365 }
366
367 /* used only for slowpath so not inlined */
368 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
369 {
370         u32 wb_write[2];
371
372         wb_write[0] = val_hi;
373         wb_write[1] = val_lo;
374         REG_WR_DMAE(bp, reg, wb_write, 2);
375 }
376
377 #ifdef USE_WB_RD
378 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
379 {
380         u32 wb_data[2];
381
382         REG_RD_DMAE(bp, reg, wb_data, 2);
383
384         return HILO_U64(wb_data[0], wb_data[1]);
385 }
386 #endif
387
388 static int bnx2x_mc_assert(struct bnx2x *bp)
389 {
390         char last_idx;
391         int i, rc = 0;
392         u32 row0, row1, row2, row3;
393
394         /* XSTORM */
395         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
396                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
397         if (last_idx)
398                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
399
400         /* print the asserts */
401         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
402
403                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
404                               XSTORM_ASSERT_LIST_OFFSET(i));
405                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
407                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
408                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
409                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
410                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
411
412                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
413                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
414                                   " 0x%08x 0x%08x 0x%08x\n",
415                                   i, row3, row2, row1, row0);
416                         rc++;
417                 } else {
418                         break;
419                 }
420         }
421
422         /* TSTORM */
423         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
424                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
425         if (last_idx)
426                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
427
428         /* print the asserts */
429         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
430
431                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
432                               TSTORM_ASSERT_LIST_OFFSET(i));
433                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
435                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
436                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
437                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
438                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
439
440                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
441                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
442                                   " 0x%08x 0x%08x 0x%08x\n",
443                                   i, row3, row2, row1, row0);
444                         rc++;
445                 } else {
446                         break;
447                 }
448         }
449
450         /* CSTORM */
451         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
452                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
453         if (last_idx)
454                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
455
456         /* print the asserts */
457         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
458
459                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
460                               CSTORM_ASSERT_LIST_OFFSET(i));
461                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
463                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
464                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
465                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
466                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
467
468                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
469                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
470                                   " 0x%08x 0x%08x 0x%08x\n",
471                                   i, row3, row2, row1, row0);
472                         rc++;
473                 } else {
474                         break;
475                 }
476         }
477
478         /* USTORM */
479         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
480                            USTORM_ASSERT_LIST_INDEX_OFFSET);
481         if (last_idx)
482                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
483
484         /* print the asserts */
485         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
486
487                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
488                               USTORM_ASSERT_LIST_OFFSET(i));
489                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
490                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
491                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
492                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
493                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
494                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
495
496                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
497                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
498                                   " 0x%08x 0x%08x 0x%08x\n",
499                                   i, row3, row2, row1, row0);
500                         rc++;
501                 } else {
502                         break;
503                 }
504         }
505
506         return rc;
507 }
508
509 static void bnx2x_fw_dump(struct bnx2x *bp)
510 {
511         u32 mark, offset;
512         __be32 data[9];
513         int word;
514
515         mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
516         mark = ((mark + 0x3) & ~0x3);
517         printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
518
519         printk(KERN_ERR PFX);
520         for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
521                 for (word = 0; word < 8; word++)
522                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
523                                                   offset + 4*word));
524                 data[8] = 0x0;
525                 printk(KERN_CONT "%s", (char *)data);
526         }
527         for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
528                 for (word = 0; word < 8; word++)
529                         data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
530                                                   offset + 4*word));
531                 data[8] = 0x0;
532                 printk(KERN_CONT "%s", (char *)data);
533         }
534         printk(KERN_ERR PFX "end of fw dump\n");
535 }
536
537 static void bnx2x_panic_dump(struct bnx2x *bp)
538 {
539         int i;
540         u16 j, start, end;
541
542         bp->stats_state = STATS_STATE_DISABLED;
543         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
544
545         BNX2X_ERR("begin crash dump -----------------\n");
546
547         /* Indices */
548         /* Common */
549         BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
550                   "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
551                   "  spq_prod_idx(%u)\n",
552                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
553                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
554
555         /* Rx */
556         for_each_queue(bp, i) {
557                 struct bnx2x_fastpath *fp = &bp->fp[i];
558
559                 BNX2X_ERR("fp%d: rx_bd_prod(%x)  rx_bd_cons(%x)"
560                           "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
561                           "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
562                           i, fp->rx_bd_prod, fp->rx_bd_cons,
563                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
564                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
565                 BNX2X_ERR("      rx_sge_prod(%x)  last_max_sge(%x)"
566                           "  fp_u_idx(%x) *sb_u_idx(%x)\n",
567                           fp->rx_sge_prod, fp->last_max_sge,
568                           le16_to_cpu(fp->fp_u_idx),
569                           fp->status_blk->u_status_block.status_block_index);
570         }
571
572         /* Tx */
573         for_each_queue(bp, i) {
574                 struct bnx2x_fastpath *fp = &bp->fp[i];
575
576                 BNX2X_ERR("fp%d: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
577                           "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
578                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
579                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
580                 BNX2X_ERR("      fp_c_idx(%x)  *sb_c_idx(%x)"
581                           "  tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
582                           fp->status_blk->c_status_block.status_block_index,
583                           fp->tx_db.data.prod);
584         }
585
586         /* Rings */
587         /* Rx */
588         for_each_queue(bp, i) {
589                 struct bnx2x_fastpath *fp = &bp->fp[i];
590
591                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
592                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
593                 for (j = start; j != end; j = RX_BD(j + 1)) {
594                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
595                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
596
597                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
598                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
599                 }
600
601                 start = RX_SGE(fp->rx_sge_prod);
602                 end = RX_SGE(fp->last_max_sge);
603                 for (j = start; j != end; j = RX_SGE(j + 1)) {
604                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
605                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
606
607                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
608                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
609                 }
610
611                 start = RCQ_BD(fp->rx_comp_cons - 10);
612                 end = RCQ_BD(fp->rx_comp_cons + 503);
613                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
614                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
615
616                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
617                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
618                 }
619         }
620
621         /* Tx */
622         for_each_queue(bp, i) {
623                 struct bnx2x_fastpath *fp = &bp->fp[i];
624
625                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
626                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
627                 for (j = start; j != end; j = TX_BD(j + 1)) {
628                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
629
630                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
631                                   i, j, sw_bd->skb, sw_bd->first_bd);
632                 }
633
634                 start = TX_BD(fp->tx_bd_cons - 10);
635                 end = TX_BD(fp->tx_bd_cons + 254);
636                 for (j = start; j != end; j = TX_BD(j + 1)) {
637                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
638
639                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
640                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
641                 }
642         }
643
644         bnx2x_fw_dump(bp);
645         bnx2x_mc_assert(bp);
646         BNX2X_ERR("end crash dump -----------------\n");
647 }
648
649 static void bnx2x_int_enable(struct bnx2x *bp)
650 {
651         int port = BP_PORT(bp);
652         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
653         u32 val = REG_RD(bp, addr);
654         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
655         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
656
657         if (msix) {
658                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
659                          HC_CONFIG_0_REG_INT_LINE_EN_0);
660                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
661                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
662         } else if (msi) {
663                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
664                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
665                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
666                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
667         } else {
668                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
669                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
670                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
671                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
672
673                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
674                    val, port, addr);
675
676                 REG_WR(bp, addr, val);
677
678                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
679         }
680
681         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
682            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
683
684         REG_WR(bp, addr, val);
685         /*
686          * Ensure that HC_CONFIG is written before leading/trailing edge config
687          */
688         mmiowb();
689         barrier();
690
691         if (CHIP_IS_E1H(bp)) {
692                 /* init leading/trailing edge */
693                 if (IS_E1HMF(bp)) {
694                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
695                         if (bp->port.pmf)
696                                 /* enable nig and gpio3 attention */
697                                 val |= 0x1100;
698                 } else
699                         val = 0xffff;
700
701                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
702                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
703         }
704
705         /* Make sure that interrupts are indeed enabled from here on */
706         mmiowb();
707 }
708
709 static void bnx2x_int_disable(struct bnx2x *bp)
710 {
711         int port = BP_PORT(bp);
712         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
713         u32 val = REG_RD(bp, addr);
714
715         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
716                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
717                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
718                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
719
720         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
721            val, port, addr);
722
723         /* flush all outstanding writes */
724         mmiowb();
725
726         REG_WR(bp, addr, val);
727         if (REG_RD(bp, addr) != val)
728                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
729 }
730
731 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
732 {
733         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
734         int i, offset;
735
736         /* disable interrupt handling */
737         atomic_inc(&bp->intr_sem);
738         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
739
740         if (disable_hw)
741                 /* prevent the HW from sending interrupts */
742                 bnx2x_int_disable(bp);
743
744         /* make sure all ISRs are done */
745         if (msix) {
746                 synchronize_irq(bp->msix_table[0].vector);
747                 offset = 1;
748 #ifdef BCM_CNIC
749                 offset++;
750 #endif
751                 for_each_queue(bp, i)
752                         synchronize_irq(bp->msix_table[i + offset].vector);
753         } else
754                 synchronize_irq(bp->pdev->irq);
755
756         /* make sure sp_task is not running */
757         cancel_delayed_work(&bp->sp_task);
758         flush_workqueue(bnx2x_wq);
759 }
760
761 /* fast path */
762
763 /*
764  * General service functions
765  */
766
767 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
768                                 u8 storm, u16 index, u8 op, u8 update)
769 {
770         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
771                        COMMAND_REG_INT_ACK);
772         struct igu_ack_register igu_ack;
773
774         igu_ack.status_block_index = index;
775         igu_ack.sb_id_and_flags =
776                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
777                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
778                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
779                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
780
781         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
782            (*(u32 *)&igu_ack), hc_addr);
783         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
784
785         /* Make sure that ACK is written */
786         mmiowb();
787         barrier();
788 }
789
790 static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
791 {
792         struct host_status_block *fpsb = fp->status_blk;
793
794         barrier(); /* status block is written to by the chip */
795         fp->fp_c_idx = fpsb->c_status_block.status_block_index;
796         fp->fp_u_idx = fpsb->u_status_block.status_block_index;
797 }
798
799 static u16 bnx2x_ack_int(struct bnx2x *bp)
800 {
801         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
802                        COMMAND_REG_SIMD_MASK);
803         u32 result = REG_RD(bp, hc_addr);
804
805         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
806            result, hc_addr);
807
808         return result;
809 }
810
811
812 /*
813  * fast path service functions
814  */
815
816 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
817 {
818         /* Tell compiler that consumer and producer can change */
819         barrier();
820         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
821 }
822
823 /* free skb in the packet ring at pos idx
824  * return idx of last bd freed
825  */
826 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
827                              u16 idx)
828 {
829         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
830         struct eth_tx_start_bd *tx_start_bd;
831         struct eth_tx_bd *tx_data_bd;
832         struct sk_buff *skb = tx_buf->skb;
833         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
834         int nbd;
835
836         /* prefetch skb end pointer to speedup dev_kfree_skb() */
837         prefetch(&skb->end);
838
839         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
840            idx, tx_buf, skb);
841
842         /* unmap first bd */
843         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
844         tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
845         pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
846                          BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
847
848         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
849 #ifdef BNX2X_STOP_ON_ERROR
850         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
851                 BNX2X_ERR("BAD nbd!\n");
852                 bnx2x_panic();
853         }
854 #endif
855         new_cons = nbd + tx_buf->first_bd;
856
857         /* Get the next bd */
858         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
859
860         /* Skip a parse bd... */
861         --nbd;
862         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
863
864         /* ...and the TSO split header bd since they have no mapping */
865         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
866                 --nbd;
867                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
868         }
869
870         /* now free frags */
871         while (nbd > 0) {
872
873                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
874                 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
875                 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
876                                BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
877                 if (--nbd)
878                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
879         }
880
881         /* release skb */
882         WARN_ON(!skb);
883         dev_kfree_skb(skb);
884         tx_buf->first_bd = 0;
885         tx_buf->skb = NULL;
886
887         return new_cons;
888 }
889
890 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
891 {
892         s16 used;
893         u16 prod;
894         u16 cons;
895
896         barrier(); /* Tell compiler that prod and cons can change */
897         prod = fp->tx_bd_prod;
898         cons = fp->tx_bd_cons;
899
900         /* NUM_TX_RINGS = number of "next-page" entries
901            It will be used as a threshold */
902         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
903
904 #ifdef BNX2X_STOP_ON_ERROR
905         WARN_ON(used < 0);
906         WARN_ON(used > fp->bp->tx_ring_size);
907         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
908 #endif
909
910         return (s16)(fp->bp->tx_ring_size) - used;
911 }
912
913 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
914 {
915         u16 hw_cons;
916
917         /* Tell compiler that status block fields can change */
918         barrier();
919         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
920         return hw_cons != fp->tx_pkt_cons;
921 }
922
923 static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
924 {
925         struct bnx2x *bp = fp->bp;
926         struct netdev_queue *txq;
927         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
928
929 #ifdef BNX2X_STOP_ON_ERROR
930         if (unlikely(bp->panic))
931                 return -1;
932 #endif
933
934         txq = netdev_get_tx_queue(bp->dev, fp->index);
935         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
936         sw_cons = fp->tx_pkt_cons;
937
938         while (sw_cons != hw_cons) {
939                 u16 pkt_cons;
940
941                 pkt_cons = TX_BD(sw_cons);
942
943                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
944
945                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
946                    hw_cons, sw_cons, pkt_cons);
947
948 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
949                         rmb();
950                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
951                 }
952 */
953                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
954                 sw_cons++;
955         }
956
957         fp->tx_pkt_cons = sw_cons;
958         fp->tx_bd_cons = bd_cons;
959
960         /* TBD need a thresh? */
961         if (unlikely(netif_tx_queue_stopped(txq))) {
962
963                 /* Need to make the tx_bd_cons update visible to start_xmit()
964                  * before checking for netif_tx_queue_stopped().  Without the
965                  * memory barrier, there is a small possibility that
966                  * start_xmit() will miss it and cause the queue to be stopped
967                  * forever.
968                  */
969                 smp_mb();
970
971                 if ((netif_tx_queue_stopped(txq)) &&
972                     (bp->state == BNX2X_STATE_OPEN) &&
973                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
974                         netif_tx_wake_queue(txq);
975         }
976         return 0;
977 }
978
979 #ifdef BCM_CNIC
980 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
981 #endif
982
983 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
984                            union eth_rx_cqe *rr_cqe)
985 {
986         struct bnx2x *bp = fp->bp;
987         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
988         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
989
990         DP(BNX2X_MSG_SP,
991            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
992            fp->index, cid, command, bp->state,
993            rr_cqe->ramrod_cqe.ramrod_type);
994
995         bp->spq_left++;
996
997         if (fp->index) {
998                 switch (command | fp->state) {
999                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1000                                                 BNX2X_FP_STATE_OPENING):
1001                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1002                            cid);
1003                         fp->state = BNX2X_FP_STATE_OPEN;
1004                         break;
1005
1006                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1007                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1008                            cid);
1009                         fp->state = BNX2X_FP_STATE_HALTED;
1010                         break;
1011
1012                 default:
1013                         BNX2X_ERR("unexpected MC reply (%d)  "
1014                                   "fp->state is %x\n", command, fp->state);
1015                         break;
1016                 }
1017                 mb(); /* force bnx2x_wait_ramrod() to see the change */
1018                 return;
1019         }
1020
1021         switch (command | bp->state) {
1022         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1023                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1024                 bp->state = BNX2X_STATE_OPEN;
1025                 break;
1026
1027         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1028                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1029                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1030                 fp->state = BNX2X_FP_STATE_HALTED;
1031                 break;
1032
1033         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1034                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1035                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1036                 break;
1037
1038 #ifdef BCM_CNIC
1039         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1040                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1041                 bnx2x_cnic_cfc_comp(bp, cid);
1042                 break;
1043 #endif
1044
1045         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1046         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1047                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1048                 bp->set_mac_pending--;
1049                 smp_wmb();
1050                 break;
1051
1052         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1053                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1054                 bp->set_mac_pending--;
1055                 smp_wmb();
1056                 break;
1057
1058         default:
1059                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
1060                           command, bp->state);
1061                 break;
1062         }
1063         mb(); /* force bnx2x_wait_ramrod() to see the change */
1064 }
1065
1066 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1067                                      struct bnx2x_fastpath *fp, u16 index)
1068 {
1069         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1070         struct page *page = sw_buf->page;
1071         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1072
1073         /* Skip "next page" elements */
1074         if (!page)
1075                 return;
1076
1077         pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1078                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1079         __free_pages(page, PAGES_PER_SGE_SHIFT);
1080
1081         sw_buf->page = NULL;
1082         sge->addr_hi = 0;
1083         sge->addr_lo = 0;
1084 }
1085
1086 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1087                                            struct bnx2x_fastpath *fp, int last)
1088 {
1089         int i;
1090
1091         for (i = 0; i < last; i++)
1092                 bnx2x_free_rx_sge(bp, fp, i);
1093 }
1094
1095 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1096                                      struct bnx2x_fastpath *fp, u16 index)
1097 {
1098         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1099         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1100         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1101         dma_addr_t mapping;
1102
1103         if (unlikely(page == NULL))
1104                 return -ENOMEM;
1105
1106         mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1107                                PCI_DMA_FROMDEVICE);
1108         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1109                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1110                 return -ENOMEM;
1111         }
1112
1113         sw_buf->page = page;
1114         pci_unmap_addr_set(sw_buf, mapping, mapping);
1115
1116         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1117         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1118
1119         return 0;
1120 }
1121
1122 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1123                                      struct bnx2x_fastpath *fp, u16 index)
1124 {
1125         struct sk_buff *skb;
1126         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1127         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1128         dma_addr_t mapping;
1129
1130         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1131         if (unlikely(skb == NULL))
1132                 return -ENOMEM;
1133
1134         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1135                                  PCI_DMA_FROMDEVICE);
1136         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1137                 dev_kfree_skb(skb);
1138                 return -ENOMEM;
1139         }
1140
1141         rx_buf->skb = skb;
1142         pci_unmap_addr_set(rx_buf, mapping, mapping);
1143
1144         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1145         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1146
1147         return 0;
1148 }
1149
1150 /* note that we are not allocating a new skb,
1151  * we are just moving one from cons to prod
1152  * we are not creating a new mapping,
1153  * so there is no need to check for dma_mapping_error().
1154  */
1155 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1156                                struct sk_buff *skb, u16 cons, u16 prod)
1157 {
1158         struct bnx2x *bp = fp->bp;
1159         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1160         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1161         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1162         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1163
1164         pci_dma_sync_single_for_device(bp->pdev,
1165                                        pci_unmap_addr(cons_rx_buf, mapping),
1166                                        RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1167
1168         prod_rx_buf->skb = cons_rx_buf->skb;
1169         pci_unmap_addr_set(prod_rx_buf, mapping,
1170                            pci_unmap_addr(cons_rx_buf, mapping));
1171         *prod_bd = *cons_bd;
1172 }
1173
1174 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1175                                              u16 idx)
1176 {
1177         u16 last_max = fp->last_max_sge;
1178
1179         if (SUB_S16(idx, last_max) > 0)
1180                 fp->last_max_sge = idx;
1181 }
1182
1183 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1184 {
1185         int i, j;
1186
1187         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1188                 int idx = RX_SGE_CNT * i - 1;
1189
1190                 for (j = 0; j < 2; j++) {
1191                         SGE_MASK_CLEAR_BIT(fp, idx);
1192                         idx--;
1193                 }
1194         }
1195 }
1196
1197 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1198                                   struct eth_fast_path_rx_cqe *fp_cqe)
1199 {
1200         struct bnx2x *bp = fp->bp;
1201         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1202                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1203                       SGE_PAGE_SHIFT;
1204         u16 last_max, last_elem, first_elem;
1205         u16 delta = 0;
1206         u16 i;
1207
1208         if (!sge_len)
1209                 return;
1210
1211         /* First mark all used pages */
1212         for (i = 0; i < sge_len; i++)
1213                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1214
1215         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1216            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1217
1218         /* Here we assume that the last SGE index is the biggest */
1219         prefetch((void *)(fp->sge_mask));
1220         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1221
1222         last_max = RX_SGE(fp->last_max_sge);
1223         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1224         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1225
1226         /* If ring is not full */
1227         if (last_elem + 1 != first_elem)
1228                 last_elem++;
1229
1230         /* Now update the prod */
1231         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1232                 if (likely(fp->sge_mask[i]))
1233                         break;
1234
1235                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1236                 delta += RX_SGE_MASK_ELEM_SZ;
1237         }
1238
1239         if (delta > 0) {
1240                 fp->rx_sge_prod += delta;
1241                 /* clear page-end entries */
1242                 bnx2x_clear_sge_mask_next_elems(fp);
1243         }
1244
1245         DP(NETIF_MSG_RX_STATUS,
1246            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1247            fp->last_max_sge, fp->rx_sge_prod);
1248 }
1249
1250 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1251 {
1252         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1253         memset(fp->sge_mask, 0xff,
1254                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1255
1256         /* Clear the two last indices in the page to 1:
1257            these are the indices that correspond to the "next" element,
1258            hence will never be indicated and should be removed from
1259            the calculations. */
1260         bnx2x_clear_sge_mask_next_elems(fp);
1261 }
1262
1263 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1264                             struct sk_buff *skb, u16 cons, u16 prod)
1265 {
1266         struct bnx2x *bp = fp->bp;
1267         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1268         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1269         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1270         dma_addr_t mapping;
1271
1272         /* move empty skb from pool to prod and map it */
1273         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1274         mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1275                                  bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1276         pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1277
1278         /* move partial skb from cons to pool (don't unmap yet) */
1279         fp->tpa_pool[queue] = *cons_rx_buf;
1280
1281         /* mark bin state as start - print error if current state != stop */
1282         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1283                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1284
1285         fp->tpa_state[queue] = BNX2X_TPA_START;
1286
1287         /* point prod_bd to new skb */
1288         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1289         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1290
1291 #ifdef BNX2X_STOP_ON_ERROR
1292         fp->tpa_queue_used |= (1 << queue);
1293 #ifdef __powerpc64__
1294         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1295 #else
1296         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1297 #endif
1298            fp->tpa_queue_used);
1299 #endif
1300 }
1301
1302 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1303                                struct sk_buff *skb,
1304                                struct eth_fast_path_rx_cqe *fp_cqe,
1305                                u16 cqe_idx)
1306 {
1307         struct sw_rx_page *rx_pg, old_rx_pg;
1308         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1309         u32 i, frag_len, frag_size, pages;
1310         int err;
1311         int j;
1312
1313         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1314         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1315
1316         /* This is needed in order to enable forwarding support */
1317         if (frag_size)
1318                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1319                                                max(frag_size, (u32)len_on_bd));
1320
1321 #ifdef BNX2X_STOP_ON_ERROR
1322         if (pages >
1323             min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1324                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1325                           pages, cqe_idx);
1326                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1327                           fp_cqe->pkt_len, len_on_bd);
1328                 bnx2x_panic();
1329                 return -EINVAL;
1330         }
1331 #endif
1332
1333         /* Run through the SGL and compose the fragmented skb */
1334         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1335                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1336
1337                 /* FW gives the indices of the SGE as if the ring is an array
1338                    (meaning that "next" element will consume 2 indices) */
1339                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1340                 rx_pg = &fp->rx_page_ring[sge_idx];
1341                 old_rx_pg = *rx_pg;
1342
1343                 /* If we fail to allocate a substitute page, we simply stop
1344                    where we are and drop the whole packet */
1345                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1346                 if (unlikely(err)) {
1347                         fp->eth_q_stats.rx_skb_alloc_failed++;
1348                         return err;
1349                 }
1350
1351                 /* Unmap the page as we r going to pass it to the stack */
1352                 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1353                               SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1354
1355                 /* Add one frag and update the appropriate fields in the skb */
1356                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1357
1358                 skb->data_len += frag_len;
1359                 skb->truesize += frag_len;
1360                 skb->len += frag_len;
1361
1362                 frag_size -= frag_len;
1363         }
1364
1365         return 0;
1366 }
1367
1368 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1369                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1370                            u16 cqe_idx)
1371 {
1372         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1373         struct sk_buff *skb = rx_buf->skb;
1374         /* alloc new skb */
1375         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1376
1377         /* Unmap skb in the pool anyway, as we are going to change
1378            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1379            fails. */
1380         pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1381                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1382
1383         if (likely(new_skb)) {
1384                 /* fix ip xsum and give it to the stack */
1385                 /* (no need to map the new skb) */
1386 #ifdef BCM_VLAN
1387                 int is_vlan_cqe =
1388                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1389                          PARSING_FLAGS_VLAN);
1390                 int is_not_hwaccel_vlan_cqe =
1391                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1392 #endif
1393
1394                 prefetch(skb);
1395                 prefetch(((char *)(skb)) + 128);
1396
1397 #ifdef BNX2X_STOP_ON_ERROR
1398                 if (pad + len > bp->rx_buf_size) {
1399                         BNX2X_ERR("skb_put is about to fail...  "
1400                                   "pad %d  len %d  rx_buf_size %d\n",
1401                                   pad, len, bp->rx_buf_size);
1402                         bnx2x_panic();
1403                         return;
1404                 }
1405 #endif
1406
1407                 skb_reserve(skb, pad);
1408                 skb_put(skb, len);
1409
1410                 skb->protocol = eth_type_trans(skb, bp->dev);
1411                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1412
1413                 {
1414                         struct iphdr *iph;
1415
1416                         iph = (struct iphdr *)skb->data;
1417 #ifdef BCM_VLAN
1418                         /* If there is no Rx VLAN offloading -
1419                            take VLAN tag into an account */
1420                         if (unlikely(is_not_hwaccel_vlan_cqe))
1421                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1422 #endif
1423                         iph->check = 0;
1424                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1425                 }
1426
1427                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1428                                          &cqe->fast_path_cqe, cqe_idx)) {
1429 #ifdef BCM_VLAN
1430                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1431                             (!is_not_hwaccel_vlan_cqe))
1432                                 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1433                                                 le16_to_cpu(cqe->fast_path_cqe.
1434                                                             vlan_tag));
1435                         else
1436 #endif
1437                                 netif_receive_skb(skb);
1438                 } else {
1439                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1440                            " - dropping packet!\n");
1441                         dev_kfree_skb(skb);
1442                 }
1443
1444
1445                 /* put new skb in bin */
1446                 fp->tpa_pool[queue].skb = new_skb;
1447
1448         } else {
1449                 /* else drop the packet and keep the buffer in the bin */
1450                 DP(NETIF_MSG_RX_STATUS,
1451                    "Failed to allocate new skb - dropping packet!\n");
1452                 fp->eth_q_stats.rx_skb_alloc_failed++;
1453         }
1454
1455         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1456 }
1457
1458 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1459                                         struct bnx2x_fastpath *fp,
1460                                         u16 bd_prod, u16 rx_comp_prod,
1461                                         u16 rx_sge_prod)
1462 {
1463         struct ustorm_eth_rx_producers rx_prods = {0};
1464         int i;
1465
1466         /* Update producers */
1467         rx_prods.bd_prod = bd_prod;
1468         rx_prods.cqe_prod = rx_comp_prod;
1469         rx_prods.sge_prod = rx_sge_prod;
1470
1471         /*
1472          * Make sure that the BD and SGE data is updated before updating the
1473          * producers since FW might read the BD/SGE right after the producer
1474          * is updated.
1475          * This is only applicable for weak-ordered memory model archs such
1476          * as IA-64. The following barrier is also mandatory since FW will
1477          * assumes BDs must have buffers.
1478          */
1479         wmb();
1480
1481         for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1482                 REG_WR(bp, BAR_USTRORM_INTMEM +
1483                        USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1484                        ((u32 *)&rx_prods)[i]);
1485
1486         mmiowb(); /* keep prod updates ordered */
1487
1488         DP(NETIF_MSG_RX_STATUS,
1489            "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
1490            fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1491 }
1492
1493 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1494 {
1495         struct bnx2x *bp = fp->bp;
1496         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1497         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1498         int rx_pkt = 0;
1499
1500 #ifdef BNX2X_STOP_ON_ERROR
1501         if (unlikely(bp->panic))
1502                 return 0;
1503 #endif
1504
1505         /* CQ "next element" is of the size of the regular element,
1506            that's why it's ok here */
1507         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1508         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1509                 hw_comp_cons++;
1510
1511         bd_cons = fp->rx_bd_cons;
1512         bd_prod = fp->rx_bd_prod;
1513         bd_prod_fw = bd_prod;
1514         sw_comp_cons = fp->rx_comp_cons;
1515         sw_comp_prod = fp->rx_comp_prod;
1516
1517         /* Memory barrier necessary as speculative reads of the rx
1518          * buffer can be ahead of the index in the status block
1519          */
1520         rmb();
1521
1522         DP(NETIF_MSG_RX_STATUS,
1523            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1524            fp->index, hw_comp_cons, sw_comp_cons);
1525
1526         while (sw_comp_cons != hw_comp_cons) {
1527                 struct sw_rx_bd *rx_buf = NULL;
1528                 struct sk_buff *skb;
1529                 union eth_rx_cqe *cqe;
1530                 u8 cqe_fp_flags;
1531                 u16 len, pad;
1532
1533                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1534                 bd_prod = RX_BD(bd_prod);
1535                 bd_cons = RX_BD(bd_cons);
1536
1537                 /* Prefetch the page containing the BD descriptor
1538                    at producer's index. It will be needed when new skb is
1539                    allocated */
1540                 prefetch((void *)(PAGE_ALIGN((unsigned long)
1541                                              (&fp->rx_desc_ring[bd_prod])) -
1542                                   PAGE_SIZE + 1));
1543
1544                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1545                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1546
1547                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1548                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1549                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1550                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1551                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1552                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1553
1554                 /* is this a slowpath msg? */
1555                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1556                         bnx2x_sp_event(fp, cqe);
1557                         goto next_cqe;
1558
1559                 /* this is an rx packet */
1560                 } else {
1561                         rx_buf = &fp->rx_buf_ring[bd_cons];
1562                         skb = rx_buf->skb;
1563                         prefetch(skb);
1564                         prefetch((u8 *)skb + 256);
1565                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1566                         pad = cqe->fast_path_cqe.placement_offset;
1567
1568                         /* If CQE is marked both TPA_START and TPA_END
1569                            it is a non-TPA CQE */
1570                         if ((!fp->disable_tpa) &&
1571                             (TPA_TYPE(cqe_fp_flags) !=
1572                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1573                                 u16 queue = cqe->fast_path_cqe.queue_index;
1574
1575                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1576                                         DP(NETIF_MSG_RX_STATUS,
1577                                            "calling tpa_start on queue %d\n",
1578                                            queue);
1579
1580                                         bnx2x_tpa_start(fp, queue, skb,
1581                                                         bd_cons, bd_prod);
1582                                         goto next_rx;
1583                                 }
1584
1585                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1586                                         DP(NETIF_MSG_RX_STATUS,
1587                                            "calling tpa_stop on queue %d\n",
1588                                            queue);
1589
1590                                         if (!BNX2X_RX_SUM_FIX(cqe))
1591                                                 BNX2X_ERR("STOP on none TCP "
1592                                                           "data\n");
1593
1594                                         /* This is a size of the linear data
1595                                            on this skb */
1596                                         len = le16_to_cpu(cqe->fast_path_cqe.
1597                                                                 len_on_bd);
1598                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1599                                                     len, cqe, comp_ring_cons);
1600 #ifdef BNX2X_STOP_ON_ERROR
1601                                         if (bp->panic)
1602                                                 return 0;
1603 #endif
1604
1605                                         bnx2x_update_sge_prod(fp,
1606                                                         &cqe->fast_path_cqe);
1607                                         goto next_cqe;
1608                                 }
1609                         }
1610
1611                         pci_dma_sync_single_for_device(bp->pdev,
1612                                         pci_unmap_addr(rx_buf, mapping),
1613                                                        pad + RX_COPY_THRESH,
1614                                                        PCI_DMA_FROMDEVICE);
1615                         prefetch(skb);
1616                         prefetch(((char *)(skb)) + 128);
1617
1618                         /* is this an error packet? */
1619                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1620                                 DP(NETIF_MSG_RX_ERR,
1621                                    "ERROR  flags %x  rx packet %u\n",
1622                                    cqe_fp_flags, sw_comp_cons);
1623                                 fp->eth_q_stats.rx_err_discard_pkt++;
1624                                 goto reuse_rx;
1625                         }
1626
1627                         /* Since we don't have a jumbo ring
1628                          * copy small packets if mtu > 1500
1629                          */
1630                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1631                             (len <= RX_COPY_THRESH)) {
1632                                 struct sk_buff *new_skb;
1633
1634                                 new_skb = netdev_alloc_skb(bp->dev,
1635                                                            len + pad);
1636                                 if (new_skb == NULL) {
1637                                         DP(NETIF_MSG_RX_ERR,
1638                                            "ERROR  packet dropped "
1639                                            "because of alloc failure\n");
1640                                         fp->eth_q_stats.rx_skb_alloc_failed++;
1641                                         goto reuse_rx;
1642                                 }
1643
1644                                 /* aligned copy */
1645                                 skb_copy_from_linear_data_offset(skb, pad,
1646                                                     new_skb->data + pad, len);
1647                                 skb_reserve(new_skb, pad);
1648                                 skb_put(new_skb, len);
1649
1650                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1651
1652                                 skb = new_skb;
1653
1654                         } else
1655                         if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1656                                 pci_unmap_single(bp->pdev,
1657                                         pci_unmap_addr(rx_buf, mapping),
1658                                                  bp->rx_buf_size,
1659                                                  PCI_DMA_FROMDEVICE);
1660                                 skb_reserve(skb, pad);
1661                                 skb_put(skb, len);
1662
1663                         } else {
1664                                 DP(NETIF_MSG_RX_ERR,
1665                                    "ERROR  packet dropped because "
1666                                    "of alloc failure\n");
1667                                 fp->eth_q_stats.rx_skb_alloc_failed++;
1668 reuse_rx:
1669                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1670                                 goto next_rx;
1671                         }
1672
1673                         skb->protocol = eth_type_trans(skb, bp->dev);
1674
1675                         skb->ip_summed = CHECKSUM_NONE;
1676                         if (bp->rx_csum) {
1677                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1678                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1679                                 else
1680                                         fp->eth_q_stats.hw_csum_err++;
1681                         }
1682                 }
1683
1684                 skb_record_rx_queue(skb, fp->index);
1685
1686 #ifdef BCM_VLAN
1687                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1688                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1689                      PARSING_FLAGS_VLAN))
1690                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1691                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1692                 else
1693 #endif
1694                         netif_receive_skb(skb);
1695
1696
1697 next_rx:
1698                 rx_buf->skb = NULL;
1699
1700                 bd_cons = NEXT_RX_IDX(bd_cons);
1701                 bd_prod = NEXT_RX_IDX(bd_prod);
1702                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1703                 rx_pkt++;
1704 next_cqe:
1705                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1706                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1707
1708                 if (rx_pkt == budget)
1709                         break;
1710         } /* while */
1711
1712         fp->rx_bd_cons = bd_cons;
1713         fp->rx_bd_prod = bd_prod_fw;
1714         fp->rx_comp_cons = sw_comp_cons;
1715         fp->rx_comp_prod = sw_comp_prod;
1716
1717         /* Update producers */
1718         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1719                              fp->rx_sge_prod);
1720
1721         fp->rx_pkt += rx_pkt;
1722         fp->rx_calls++;
1723
1724         return rx_pkt;
1725 }
1726
1727 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1728 {
1729         struct bnx2x_fastpath *fp = fp_cookie;
1730         struct bnx2x *bp = fp->bp;
1731
1732         /* Return here if interrupt is disabled */
1733         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1734                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1735                 return IRQ_HANDLED;
1736         }
1737
1738         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1739            fp->index, fp->sb_id);
1740         bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1741
1742 #ifdef BNX2X_STOP_ON_ERROR
1743         if (unlikely(bp->panic))
1744                 return IRQ_HANDLED;
1745 #endif
1746
1747         /* Handle Rx and Tx according to MSI-X vector */
1748         prefetch(fp->rx_cons_sb);
1749         prefetch(fp->tx_cons_sb);
1750         prefetch(&fp->status_blk->u_status_block.status_block_index);
1751         prefetch(&fp->status_blk->c_status_block.status_block_index);
1752         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1753
1754         return IRQ_HANDLED;
1755 }
1756
1757 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1758 {
1759         struct bnx2x *bp = netdev_priv(dev_instance);
1760         u16 status = bnx2x_ack_int(bp);
1761         u16 mask;
1762         int i;
1763
1764         /* Return here if interrupt is shared and it's not for us */
1765         if (unlikely(status == 0)) {
1766                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1767                 return IRQ_NONE;
1768         }
1769         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1770
1771         /* Return here if interrupt is disabled */
1772         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1773                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1774                 return IRQ_HANDLED;
1775         }
1776
1777 #ifdef BNX2X_STOP_ON_ERROR
1778         if (unlikely(bp->panic))
1779                 return IRQ_HANDLED;
1780 #endif
1781
1782         for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1783                 struct bnx2x_fastpath *fp = &bp->fp[i];
1784
1785                 mask = 0x2 << fp->sb_id;
1786                 if (status & mask) {
1787                         /* Handle Rx and Tx according to SB id */
1788                         prefetch(fp->rx_cons_sb);
1789                         prefetch(&fp->status_blk->u_status_block.
1790                                                 status_block_index);
1791                         prefetch(fp->tx_cons_sb);
1792                         prefetch(&fp->status_blk->c_status_block.
1793                                                 status_block_index);
1794                         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1795                         status &= ~mask;
1796                 }
1797         }
1798
1799 #ifdef BCM_CNIC
1800         mask = 0x2 << CNIC_SB_ID(bp);
1801         if (status & (mask | 0x1)) {
1802                 struct cnic_ops *c_ops = NULL;
1803
1804                 rcu_read_lock();
1805                 c_ops = rcu_dereference(bp->cnic_ops);
1806                 if (c_ops)
1807                         c_ops->cnic_handler(bp->cnic_data, NULL);
1808                 rcu_read_unlock();
1809
1810                 status &= ~mask;
1811         }
1812 #endif
1813
1814         if (unlikely(status & 0x1)) {
1815                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1816
1817                 status &= ~0x1;
1818                 if (!status)
1819                         return IRQ_HANDLED;
1820         }
1821
1822         if (status)
1823                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1824                    status);
1825
1826         return IRQ_HANDLED;
1827 }
1828
1829 /* end of fast path */
1830
1831 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1832
1833 /* Link */
1834
1835 /*
1836  * General service functions
1837  */
1838
1839 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1840 {
1841         u32 lock_status;
1842         u32 resource_bit = (1 << resource);
1843         int func = BP_FUNC(bp);
1844         u32 hw_lock_control_reg;
1845         int cnt;
1846
1847         /* Validating that the resource is within range */
1848         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1849                 DP(NETIF_MSG_HW,
1850                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1851                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1852                 return -EINVAL;
1853         }
1854
1855         if (func <= 5) {
1856                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1857         } else {
1858                 hw_lock_control_reg =
1859                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1860         }
1861
1862         /* Validating that the resource is not already taken */
1863         lock_status = REG_RD(bp, hw_lock_control_reg);
1864         if (lock_status & resource_bit) {
1865                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1866                    lock_status, resource_bit);
1867                 return -EEXIST;
1868         }
1869
1870         /* Try for 5 second every 5ms */
1871         for (cnt = 0; cnt < 1000; cnt++) {
1872                 /* Try to acquire the lock */
1873                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1874                 lock_status = REG_RD(bp, hw_lock_control_reg);
1875                 if (lock_status & resource_bit)
1876                         return 0;
1877
1878                 msleep(5);
1879         }
1880         DP(NETIF_MSG_HW, "Timeout\n");
1881         return -EAGAIN;
1882 }
1883
1884 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1885 {
1886         u32 lock_status;
1887         u32 resource_bit = (1 << resource);
1888         int func = BP_FUNC(bp);
1889         u32 hw_lock_control_reg;
1890
1891         /* Validating that the resource is within range */
1892         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1893                 DP(NETIF_MSG_HW,
1894                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1895                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1896                 return -EINVAL;
1897         }
1898
1899         if (func <= 5) {
1900                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1901         } else {
1902                 hw_lock_control_reg =
1903                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1904         }
1905
1906         /* Validating that the resource is currently taken */
1907         lock_status = REG_RD(bp, hw_lock_control_reg);
1908         if (!(lock_status & resource_bit)) {
1909                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1910                    lock_status, resource_bit);
1911                 return -EFAULT;
1912         }
1913
1914         REG_WR(bp, hw_lock_control_reg, resource_bit);
1915         return 0;
1916 }
1917
1918 /* HW Lock for shared dual port PHYs */
1919 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1920 {
1921         mutex_lock(&bp->port.phy_mutex);
1922
1923         if (bp->port.need_hw_lock)
1924                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1925 }
1926
1927 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1928 {
1929         if (bp->port.need_hw_lock)
1930                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1931
1932         mutex_unlock(&bp->port.phy_mutex);
1933 }
1934
1935 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1936 {
1937         /* The GPIO should be swapped if swap register is set and active */
1938         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1939                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1940         int gpio_shift = gpio_num +
1941                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1942         u32 gpio_mask = (1 << gpio_shift);
1943         u32 gpio_reg;
1944         int value;
1945
1946         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1947                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1948                 return -EINVAL;
1949         }
1950
1951         /* read GPIO value */
1952         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1953
1954         /* get the requested pin value */
1955         if ((gpio_reg & gpio_mask) == gpio_mask)
1956                 value = 1;
1957         else
1958                 value = 0;
1959
1960         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1961
1962         return value;
1963 }
1964
1965 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1966 {
1967         /* The GPIO should be swapped if swap register is set and active */
1968         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1969                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1970         int gpio_shift = gpio_num +
1971                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1972         u32 gpio_mask = (1 << gpio_shift);
1973         u32 gpio_reg;
1974
1975         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1976                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1977                 return -EINVAL;
1978         }
1979
1980         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1981         /* read GPIO and mask except the float bits */
1982         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1983
1984         switch (mode) {
1985         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1986                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1987                    gpio_num, gpio_shift);
1988                 /* clear FLOAT and set CLR */
1989                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1990                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1991                 break;
1992
1993         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1994                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1995                    gpio_num, gpio_shift);
1996                 /* clear FLOAT and set SET */
1997                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1998                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1999                 break;
2000
2001         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2002                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2003                    gpio_num, gpio_shift);
2004                 /* set FLOAT */
2005                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2006                 break;
2007
2008         default:
2009                 break;
2010         }
2011
2012         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2013         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2014
2015         return 0;
2016 }
2017
2018 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2019 {
2020         /* The GPIO should be swapped if swap register is set and active */
2021         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2022                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2023         int gpio_shift = gpio_num +
2024                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2025         u32 gpio_mask = (1 << gpio_shift);
2026         u32 gpio_reg;
2027
2028         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2029                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2030                 return -EINVAL;
2031         }
2032
2033         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2034         /* read GPIO int */
2035         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2036
2037         switch (mode) {
2038         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2039                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2040                                    "output low\n", gpio_num, gpio_shift);
2041                 /* clear SET and set CLR */
2042                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2043                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2044                 break;
2045
2046         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2047                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2048                                    "output high\n", gpio_num, gpio_shift);
2049                 /* clear CLR and set SET */
2050                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2051                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2052                 break;
2053
2054         default:
2055                 break;
2056         }
2057
2058         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2059         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2060
2061         return 0;
2062 }
2063
2064 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2065 {
2066         u32 spio_mask = (1 << spio_num);
2067         u32 spio_reg;
2068
2069         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2070             (spio_num > MISC_REGISTERS_SPIO_7)) {
2071                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2072                 return -EINVAL;
2073         }
2074
2075         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2076         /* read SPIO and mask except the float bits */
2077         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2078
2079         switch (mode) {
2080         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2081                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2082                 /* clear FLOAT and set CLR */
2083                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2084                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2085                 break;
2086
2087         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2088                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2089                 /* clear FLOAT and set SET */
2090                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2091                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2092                 break;
2093
2094         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2095                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2096                 /* set FLOAT */
2097                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2098                 break;
2099
2100         default:
2101                 break;
2102         }
2103
2104         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2105         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2106
2107         return 0;
2108 }
2109
2110 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2111 {
2112         switch (bp->link_vars.ieee_fc &
2113                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2114         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2115                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2116                                           ADVERTISED_Pause);
2117                 break;
2118
2119         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2120                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2121                                          ADVERTISED_Pause);
2122                 break;
2123
2124         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2125                 bp->port.advertising |= ADVERTISED_Asym_Pause;
2126                 break;
2127
2128         default:
2129                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2130                                           ADVERTISED_Pause);
2131                 break;
2132         }
2133 }
2134
2135 static void bnx2x_link_report(struct bnx2x *bp)
2136 {
2137         if (bp->flags & MF_FUNC_DIS) {
2138                 netif_carrier_off(bp->dev);
2139                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2140                 return;
2141         }
2142
2143         if (bp->link_vars.link_up) {
2144                 u16 line_speed;
2145
2146                 if (bp->state == BNX2X_STATE_OPEN)
2147                         netif_carrier_on(bp->dev);
2148                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2149
2150                 line_speed = bp->link_vars.line_speed;
2151                 if (IS_E1HMF(bp)) {
2152                         u16 vn_max_rate;
2153
2154                         vn_max_rate =
2155                                 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2156                                  FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2157                         if (vn_max_rate < line_speed)
2158                                 line_speed = vn_max_rate;
2159                 }
2160                 printk("%d Mbps ", line_speed);
2161
2162                 if (bp->link_vars.duplex == DUPLEX_FULL)
2163                         printk("full duplex");
2164                 else
2165                         printk("half duplex");
2166
2167                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2168                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2169                                 printk(", receive ");
2170                                 if (bp->link_vars.flow_ctrl &
2171                                     BNX2X_FLOW_CTRL_TX)
2172                                         printk("& transmit ");
2173                         } else {
2174                                 printk(", transmit ");
2175                         }
2176                         printk("flow control ON");
2177                 }
2178                 printk("\n");
2179
2180         } else { /* link_down */
2181                 netif_carrier_off(bp->dev);
2182                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2183         }
2184 }
2185
2186 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2187 {
2188         if (!BP_NOMCP(bp)) {
2189                 u8 rc;
2190
2191                 /* Initialize link parameters structure variables */
2192                 /* It is recommended to turn off RX FC for jumbo frames
2193                    for better performance */
2194                 if (bp->dev->mtu > 5000)
2195                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2196                 else
2197                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2198
2199                 bnx2x_acquire_phy_lock(bp);
2200
2201                 if (load_mode == LOAD_DIAG)
2202                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2203
2204                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2205
2206                 bnx2x_release_phy_lock(bp);
2207
2208                 bnx2x_calc_fc_adv(bp);
2209
2210                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2211                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2212                         bnx2x_link_report(bp);
2213                 }
2214
2215                 return rc;
2216         }
2217         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2218         return -EINVAL;
2219 }
2220
2221 static void bnx2x_link_set(struct bnx2x *bp)
2222 {
2223         if (!BP_NOMCP(bp)) {
2224                 bnx2x_acquire_phy_lock(bp);
2225                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2226                 bnx2x_release_phy_lock(bp);
2227
2228                 bnx2x_calc_fc_adv(bp);
2229         } else
2230                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2231 }
2232
2233 static void bnx2x__link_reset(struct bnx2x *bp)
2234 {
2235         if (!BP_NOMCP(bp)) {
2236                 bnx2x_acquire_phy_lock(bp);
2237                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2238                 bnx2x_release_phy_lock(bp);
2239         } else
2240                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2241 }
2242
2243 static u8 bnx2x_link_test(struct bnx2x *bp)
2244 {
2245         u8 rc;
2246
2247         bnx2x_acquire_phy_lock(bp);
2248         rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2249         bnx2x_release_phy_lock(bp);
2250
2251         return rc;
2252 }
2253
2254 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2255 {
2256         u32 r_param = bp->link_vars.line_speed / 8;
2257         u32 fair_periodic_timeout_usec;
2258         u32 t_fair;
2259
2260         memset(&(bp->cmng.rs_vars), 0,
2261                sizeof(struct rate_shaping_vars_per_port));
2262         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2263
2264         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2265         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2266
2267         /* this is the threshold below which no timer arming will occur
2268            1.25 coefficient is for the threshold to be a little bigger
2269            than the real time, to compensate for timer in-accuracy */
2270         bp->cmng.rs_vars.rs_threshold =
2271                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2272
2273         /* resolution of fairness timer */
2274         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2275         /* for 10G it is 1000usec. for 1G it is 10000usec. */
2276         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2277
2278         /* this is the threshold below which we won't arm the timer anymore */
2279         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2280
2281         /* we multiply by 1e3/8 to get bytes/msec.
2282            We don't want the credits to pass a credit
2283            of the t_fair*FAIR_MEM (algorithm resolution) */
2284         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2285         /* since each tick is 4 usec */
2286         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2287 }
2288
2289 /* Calculates the sum of vn_min_rates.
2290    It's needed for further normalizing of the min_rates.
2291    Returns:
2292      sum of vn_min_rates.
2293        or
2294      0 - if all the min_rates are 0.
2295      In the later case fainess algorithm should be deactivated.
2296      If not all min_rates are zero then those that are zeroes will be set to 1.
2297  */
2298 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2299 {
2300         int all_zero = 1;
2301         int port = BP_PORT(bp);
2302         int vn;
2303
2304         bp->vn_weight_sum = 0;
2305         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2306                 int func = 2*vn + port;
2307                 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2308                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2309                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2310
2311                 /* Skip hidden vns */
2312                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2313                         continue;
2314
2315                 /* If min rate is zero - set it to 1 */
2316                 if (!vn_min_rate)
2317                         vn_min_rate = DEF_MIN_RATE;
2318                 else
2319                         all_zero = 0;
2320
2321                 bp->vn_weight_sum += vn_min_rate;
2322         }
2323
2324         /* ... only if all min rates are zeros - disable fairness */
2325         if (all_zero) {
2326                 bp->cmng.flags.cmng_enables &=
2327                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2328                 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2329                    "  fairness will be disabled\n");
2330         } else
2331                 bp->cmng.flags.cmng_enables |=
2332                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2333 }
2334
2335 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2336 {
2337         struct rate_shaping_vars_per_vn m_rs_vn;
2338         struct fairness_vars_per_vn m_fair_vn;
2339         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2340         u16 vn_min_rate, vn_max_rate;
2341         int i;
2342
2343         /* If function is hidden - set min and max to zeroes */
2344         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2345                 vn_min_rate = 0;
2346                 vn_max_rate = 0;
2347
2348         } else {
2349                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2350                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2351                 /* If min rate is zero - set it to 1 */
2352                 if (!vn_min_rate)
2353                         vn_min_rate = DEF_MIN_RATE;
2354                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2355                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2356         }
2357         DP(NETIF_MSG_IFUP,
2358            "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
2359            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2360
2361         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2362         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2363
2364         /* global vn counter - maximal Mbps for this vn */
2365         m_rs_vn.vn_counter.rate = vn_max_rate;
2366
2367         /* quota - number of bytes transmitted in this period */
2368         m_rs_vn.vn_counter.quota =
2369                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2370
2371         if (bp->vn_weight_sum) {
2372                 /* credit for each period of the fairness algorithm:
2373                    number of bytes in T_FAIR (the vn share the port rate).
2374                    vn_weight_sum should not be larger than 10000, thus
2375                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2376                    than zero */
2377                 m_fair_vn.vn_credit_delta =
2378                         max((u32)(vn_min_rate * (T_FAIR_COEF /
2379                                                  (8 * bp->vn_weight_sum))),
2380                             (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2381                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2382                    m_fair_vn.vn_credit_delta);
2383         }
2384
2385         /* Store it to internal memory */
2386         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2387                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2388                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2389                        ((u32 *)(&m_rs_vn))[i]);
2390
2391         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2392                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2393                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2394                        ((u32 *)(&m_fair_vn))[i]);
2395 }
2396
2397
2398 /* This function is called upon link interrupt */
2399 static void bnx2x_link_attn(struct bnx2x *bp)
2400 {
2401         /* Make sure that we are synced with the current statistics */
2402         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2403
2404         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2405
2406         if (bp->link_vars.link_up) {
2407
2408                 /* dropless flow control */
2409                 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
2410                         int port = BP_PORT(bp);
2411                         u32 pause_enabled = 0;
2412
2413                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2414                                 pause_enabled = 1;
2415
2416                         REG_WR(bp, BAR_USTRORM_INTMEM +
2417                                USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2418                                pause_enabled);
2419                 }
2420
2421                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2422                         struct host_port_stats *pstats;
2423
2424                         pstats = bnx2x_sp(bp, port_stats);
2425                         /* reset old bmac stats */
2426                         memset(&(pstats->mac_stx[0]), 0,
2427                                sizeof(struct mac_stx));
2428                 }
2429                 if (bp->state == BNX2X_STATE_OPEN)
2430                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2431         }
2432
2433         /* indicate link status */
2434         bnx2x_link_report(bp);
2435
2436         if (IS_E1HMF(bp)) {
2437                 int port = BP_PORT(bp);
2438                 int func;
2439                 int vn;
2440
2441                 /* Set the attention towards other drivers on the same port */
2442                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2443                         if (vn == BP_E1HVN(bp))
2444                                 continue;
2445
2446                         func = ((vn << 1) | port);
2447                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2448                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2449                 }
2450
2451                 if (bp->link_vars.link_up) {
2452                         int i;
2453
2454                         /* Init rate shaping and fairness contexts */
2455                         bnx2x_init_port_minmax(bp);
2456
2457                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2458                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2459
2460                         /* Store it to internal memory */
2461                         for (i = 0;
2462                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
2463                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2464                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2465                                        ((u32 *)(&bp->cmng))[i]);
2466                 }
2467         }
2468 }
2469
2470 static void bnx2x__link_status_update(struct bnx2x *bp)
2471 {
2472         if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2473                 return;
2474
2475         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2476
2477         if (bp->link_vars.link_up)
2478                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2479         else
2480                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2481
2482         bnx2x_calc_vn_weight_sum(bp);
2483
2484         /* indicate link status */
2485         bnx2x_link_report(bp);
2486 }
2487
2488 static void bnx2x_pmf_update(struct bnx2x *bp)
2489 {
2490         int port = BP_PORT(bp);
2491         u32 val;
2492
2493         bp->port.pmf = 1;
2494         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2495
2496         /* enable nig attention */
2497         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2498         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2499         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2500
2501         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2502 }
2503
2504 /* end of Link */
2505
2506 /* slow path */
2507
2508 /*
2509  * General service functions
2510  */
2511
2512 /* send the MCP a request, block until there is a reply */
2513 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2514 {
2515         int func = BP_FUNC(bp);
2516         u32 seq = ++bp->fw_seq;
2517         u32 rc = 0;
2518         u32 cnt = 1;
2519         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2520
2521         mutex_lock(&bp->fw_mb_mutex);
2522         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2523         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2524
2525         do {
2526                 /* let the FW do it's magic ... */
2527                 msleep(delay);
2528
2529                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2530
2531                 /* Give the FW up to 5 second (500*10ms) */
2532         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2533
2534         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2535            cnt*delay, rc, seq);
2536
2537         /* is this a reply to our command? */
2538         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2539                 rc &= FW_MSG_CODE_MASK;
2540         else {
2541                 /* FW BUG! */
2542                 BNX2X_ERR("FW failed to respond!\n");
2543                 bnx2x_fw_dump(bp);
2544                 rc = 0;
2545         }
2546         mutex_unlock(&bp->fw_mb_mutex);
2547
2548         return rc;
2549 }
2550
2551 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2552 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2553 static void bnx2x_set_rx_mode(struct net_device *dev);
2554
2555 static void bnx2x_e1h_disable(struct bnx2x *bp)
2556 {
2557         int port = BP_PORT(bp);
2558
2559         netif_tx_disable(bp->dev);
2560         bp->dev->trans_start = jiffies; /* prevent tx timeout */
2561
2562         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2563
2564         netif_carrier_off(bp->dev);
2565 }
2566
2567 static void bnx2x_e1h_enable(struct bnx2x *bp)
2568 {
2569         int port = BP_PORT(bp);
2570
2571         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2572
2573         /* Tx queue should be only reenabled */
2574         netif_tx_wake_all_queues(bp->dev);
2575
2576         /*
2577          * Should not call netif_carrier_on since it will be called if the link
2578          * is up when checking for link state
2579          */
2580 }
2581
2582 static void bnx2x_update_min_max(struct bnx2x *bp)
2583 {
2584         int port = BP_PORT(bp);
2585         int vn, i;
2586
2587         /* Init rate shaping and fairness contexts */
2588         bnx2x_init_port_minmax(bp);
2589
2590         bnx2x_calc_vn_weight_sum(bp);
2591
2592         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2593                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2594
2595         if (bp->port.pmf) {
2596                 int func;
2597
2598                 /* Set the attention towards other drivers on the same port */
2599                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2600                         if (vn == BP_E1HVN(bp))
2601                                 continue;
2602
2603                         func = ((vn << 1) | port);
2604                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2605                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2606                 }
2607
2608                 /* Store it to internal memory */
2609                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2610                         REG_WR(bp, BAR_XSTRORM_INTMEM +
2611                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2612                                ((u32 *)(&bp->cmng))[i]);
2613         }
2614 }
2615
2616 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2617 {
2618         DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2619
2620         if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2621
2622                 /*
2623                  * This is the only place besides the function initialization
2624                  * where the bp->flags can change so it is done without any
2625                  * locks
2626                  */
2627                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2628                         DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2629                         bp->flags |= MF_FUNC_DIS;
2630
2631                         bnx2x_e1h_disable(bp);
2632                 } else {
2633                         DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2634                         bp->flags &= ~MF_FUNC_DIS;
2635
2636                         bnx2x_e1h_enable(bp);
2637                 }
2638                 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2639         }
2640         if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2641
2642                 bnx2x_update_min_max(bp);
2643                 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2644         }
2645
2646         /* Report results to MCP */
2647         if (dcc_event)
2648                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2649         else
2650                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2651 }
2652
2653 /* must be called under the spq lock */
2654 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2655 {
2656         struct eth_spe *next_spe = bp->spq_prod_bd;
2657
2658         if (bp->spq_prod_bd == bp->spq_last_bd) {
2659                 bp->spq_prod_bd = bp->spq;
2660                 bp->spq_prod_idx = 0;
2661                 DP(NETIF_MSG_TIMER, "end of spq\n");
2662         } else {
2663                 bp->spq_prod_bd++;
2664                 bp->spq_prod_idx++;
2665         }
2666         return next_spe;
2667 }
2668
2669 /* must be called under the spq lock */
2670 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2671 {
2672         int func = BP_FUNC(bp);
2673
2674         /* Make sure that BD data is updated before writing the producer */
2675         wmb();
2676
2677         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2678                bp->spq_prod_idx);
2679         mmiowb();
2680 }
2681
2682 /* the slow path queue is odd since completions arrive on the fastpath ring */
2683 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2684                          u32 data_hi, u32 data_lo, int common)
2685 {
2686         struct eth_spe *spe;
2687
2688         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2689            "SPQE (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2690            (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2691            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2692            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2693
2694 #ifdef BNX2X_STOP_ON_ERROR
2695         if (unlikely(bp->panic))
2696                 return -EIO;
2697 #endif
2698
2699         spin_lock_bh(&bp->spq_lock);
2700
2701         if (!bp->spq_left) {
2702                 BNX2X_ERR("BUG! SPQ ring full!\n");
2703                 spin_unlock_bh(&bp->spq_lock);
2704                 bnx2x_panic();
2705                 return -EBUSY;
2706         }
2707
2708         spe = bnx2x_sp_get_next(bp);
2709
2710         /* CID needs port number to be encoded int it */
2711         spe->hdr.conn_and_cmd_data =
2712                         cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2713                                      HW_CID(bp, cid)));
2714         spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2715         if (common)
2716                 spe->hdr.type |=
2717                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2718
2719         spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2720         spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2721
2722         bp->spq_left--;
2723
2724         bnx2x_sp_prod_update(bp);
2725         spin_unlock_bh(&bp->spq_lock);
2726         return 0;
2727 }
2728
2729 /* acquire split MCP access lock register */
2730 static int bnx2x_acquire_alr(struct bnx2x *bp)
2731 {
2732         u32 i, j, val;
2733         int rc = 0;
2734
2735         might_sleep();
2736         i = 100;
2737         for (j = 0; j < i*10; j++) {
2738                 val = (1UL << 31);
2739                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2740                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2741                 if (val & (1L << 31))
2742                         break;
2743
2744                 msleep(5);
2745         }
2746         if (!(val & (1L << 31))) {
2747                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2748                 rc = -EBUSY;
2749         }
2750
2751         return rc;
2752 }
2753
2754 /* release split MCP access lock register */
2755 static void bnx2x_release_alr(struct bnx2x *bp)
2756 {
2757         u32 val = 0;
2758
2759         REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2760 }
2761
2762 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2763 {
2764         struct host_def_status_block *def_sb = bp->def_status_blk;
2765         u16 rc = 0;
2766
2767         barrier(); /* status block is written to by the chip */
2768         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2769                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2770                 rc |= 1;
2771         }
2772         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2773                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2774                 rc |= 2;
2775         }
2776         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2777                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2778                 rc |= 4;
2779         }
2780         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2781                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2782                 rc |= 8;
2783         }
2784         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2785                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2786                 rc |= 16;
2787         }
2788         return rc;
2789 }
2790
2791 /*
2792  * slow path service functions
2793  */
2794
2795 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2796 {
2797         int port = BP_PORT(bp);
2798         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2799                        COMMAND_REG_ATTN_BITS_SET);
2800         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2801                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2802         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2803                                        NIG_REG_MASK_INTERRUPT_PORT0;
2804         u32 aeu_mask;
2805         u32 nig_mask = 0;
2806
2807         if (bp->attn_state & asserted)
2808                 BNX2X_ERR("IGU ERROR\n");
2809
2810         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2811         aeu_mask = REG_RD(bp, aeu_addr);
2812
2813         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2814            aeu_mask, asserted);
2815         aeu_mask &= ~(asserted & 0xff);
2816         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2817
2818         REG_WR(bp, aeu_addr, aeu_mask);
2819         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2820
2821         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2822         bp->attn_state |= asserted;
2823         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2824
2825         if (asserted & ATTN_HARD_WIRED_MASK) {
2826                 if (asserted & ATTN_NIG_FOR_FUNC) {
2827
2828                         bnx2x_acquire_phy_lock(bp);
2829
2830                         /* save nig interrupt mask */
2831                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2832                         REG_WR(bp, nig_int_mask_addr, 0);
2833
2834                         bnx2x_link_attn(bp);
2835
2836                         /* handle unicore attn? */
2837                 }
2838                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2839                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2840
2841                 if (asserted & GPIO_2_FUNC)
2842                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2843
2844                 if (asserted & GPIO_3_FUNC)
2845                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2846
2847                 if (asserted & GPIO_4_FUNC)
2848                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2849
2850                 if (port == 0) {
2851                         if (asserted & ATTN_GENERAL_ATTN_1) {
2852                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2853                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2854                         }
2855                         if (asserted & ATTN_GENERAL_ATTN_2) {
2856                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2857                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2858                         }
2859                         if (asserted & ATTN_GENERAL_ATTN_3) {
2860                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2861                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2862                         }
2863                 } else {
2864                         if (asserted & ATTN_GENERAL_ATTN_4) {
2865                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2866                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2867                         }
2868                         if (asserted & ATTN_GENERAL_ATTN_5) {
2869                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2870                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2871                         }
2872                         if (asserted & ATTN_GENERAL_ATTN_6) {
2873                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2874                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2875                         }
2876                 }
2877
2878         } /* if hardwired */
2879
2880         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2881            asserted, hc_addr);
2882         REG_WR(bp, hc_addr, asserted);
2883
2884         /* now set back the mask */
2885         if (asserted & ATTN_NIG_FOR_FUNC) {
2886                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2887                 bnx2x_release_phy_lock(bp);
2888         }
2889 }
2890
2891 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2892 {
2893         int port = BP_PORT(bp);
2894
2895         /* mark the failure */
2896         bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2897         bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2898         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2899                  bp->link_params.ext_phy_config);
2900
2901         /* log the failure */
2902         printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2903                " the driver to shutdown the card to prevent permanent"
2904                " damage.  Please contact Dell Support for assistance\n",
2905                bp->dev->name);
2906 }
2907
2908 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2909 {
2910         int port = BP_PORT(bp);
2911         int reg_offset;
2912         u32 val, swap_val, swap_override;
2913
2914         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2915                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2916
2917         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2918
2919                 val = REG_RD(bp, reg_offset);
2920                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2921                 REG_WR(bp, reg_offset, val);
2922
2923                 BNX2X_ERR("SPIO5 hw attention\n");
2924
2925                 /* Fan failure attention */
2926                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2927                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2928                         /* Low power mode is controlled by GPIO 2 */
2929                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2930                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2931                         /* The PHY reset is controlled by GPIO 1 */
2932                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2933                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2934                         break;
2935
2936                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2937                         /* The PHY reset is controlled by GPIO 1 */
2938                         /* fake the port number to cancel the swap done in
2939                            set_gpio() */
2940                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2941                         swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2942                         port = (swap_val && swap_override) ^ 1;
2943                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2944                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2945                         break;
2946
2947                 default:
2948                         break;
2949                 }
2950                 bnx2x_fan_failure(bp);
2951         }
2952
2953         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2954                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2955                 bnx2x_acquire_phy_lock(bp);
2956                 bnx2x_handle_module_detect_int(&bp->link_params);
2957                 bnx2x_release_phy_lock(bp);
2958         }
2959
2960         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2961
2962                 val = REG_RD(bp, reg_offset);
2963                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2964                 REG_WR(bp, reg_offset, val);
2965
2966                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2967                           (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2968                 bnx2x_panic();
2969         }
2970 }
2971
2972 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2973 {
2974         u32 val;
2975
2976         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2977
2978                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2979                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2980                 /* DORQ discard attention */
2981                 if (val & 0x2)
2982                         BNX2X_ERR("FATAL error from DORQ\n");
2983         }
2984
2985         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2986
2987                 int port = BP_PORT(bp);
2988                 int reg_offset;
2989
2990                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2991                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2992
2993                 val = REG_RD(bp, reg_offset);
2994                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2995                 REG_WR(bp, reg_offset, val);
2996
2997                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2998                           (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
2999                 bnx2x_panic();
3000         }
3001 }
3002
3003 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3004 {
3005         u32 val;
3006
3007         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3008
3009                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3010                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3011                 /* CFC error attention */
3012                 if (val & 0x2)
3013                         BNX2X_ERR("FATAL error from CFC\n");
3014         }
3015
3016         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3017
3018                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3019                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3020                 /* RQ_USDMDP_FIFO_OVERFLOW */
3021                 if (val & 0x18000)
3022                         BNX2X_ERR("FATAL error from PXP\n");
3023         }
3024
3025         if (attn & HW_INTERRUT_ASSERT_SET_2) {
3026
3027                 int port = BP_PORT(bp);
3028                 int reg_offset;
3029
3030                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3031                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3032
3033                 val = REG_RD(bp, reg_offset);
3034                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3035                 REG_WR(bp, reg_offset, val);
3036
3037                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3038                           (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3039                 bnx2x_panic();
3040         }
3041 }
3042
3043 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3044 {
3045         u32 val;
3046
3047         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3048
3049                 if (attn & BNX2X_PMF_LINK_ASSERT) {
3050                         int func = BP_FUNC(bp);
3051
3052                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3053                         bp->mf_config = SHMEM_RD(bp,
3054                                            mf_cfg.func_mf_config[func].config);
3055                         val = SHMEM_RD(bp, func_mb[func].drv_status);
3056                         if (val & DRV_STATUS_DCC_EVENT_MASK)
3057                                 bnx2x_dcc_event(bp,
3058                                             (val & DRV_STATUS_DCC_EVENT_MASK));
3059                         bnx2x__link_status_update(bp);
3060                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3061                                 bnx2x_pmf_update(bp);
3062
3063                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3064
3065                         BNX2X_ERR("MC assert!\n");
3066                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3067                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3068                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3069                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3070                         bnx2x_panic();
3071
3072                 } else if (attn & BNX2X_MCP_ASSERT) {
3073
3074                         BNX2X_ERR("MCP assert!\n");
3075                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3076                         bnx2x_fw_dump(bp);
3077
3078                 } else
3079                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3080         }
3081
3082         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3083                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3084                 if (attn & BNX2X_GRC_TIMEOUT) {
3085                         val = CHIP_IS_E1H(bp) ?
3086                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3087                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
3088                 }
3089                 if (attn & BNX2X_GRC_RSV) {
3090                         val = CHIP_IS_E1H(bp) ?
3091                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3092                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
3093                 }
3094                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3095         }
3096 }
3097
3098 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3099 {
3100         struct attn_route attn;
3101         struct attn_route group_mask;
3102         int port = BP_PORT(bp);
3103         int index;
3104         u32 reg_addr;
3105         u32 val;
3106         u32 aeu_mask;
3107
3108         /* need to take HW lock because MCP or other port might also
3109            try to handle this event */
3110         bnx2x_acquire_alr(bp);
3111
3112         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3113         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3114         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3115         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3116         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3117            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3118
3119         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3120                 if (deasserted & (1 << index)) {
3121                         group_mask = bp->attn_group[index];
3122
3123                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3124                            index, group_mask.sig[0], group_mask.sig[1],
3125                            group_mask.sig[2], group_mask.sig[3]);
3126
3127                         bnx2x_attn_int_deasserted3(bp,
3128                                         attn.sig[3] & group_mask.sig[3]);
3129                         bnx2x_attn_int_deasserted1(bp,
3130                                         attn.sig[1] & group_mask.sig[1]);
3131                         bnx2x_attn_int_deasserted2(bp,
3132                                         attn.sig[2] & group_mask.sig[2]);
3133                         bnx2x_attn_int_deasserted0(bp,
3134                                         attn.sig[0] & group_mask.sig[0]);
3135
3136                         if ((attn.sig[0] & group_mask.sig[0] &
3137                                                 HW_PRTY_ASSERT_SET_0) ||
3138                             (attn.sig[1] & group_mask.sig[1] &
3139                                                 HW_PRTY_ASSERT_SET_1) ||
3140                             (attn.sig[2] & group_mask.sig[2] &
3141                                                 HW_PRTY_ASSERT_SET_2))
3142                                 BNX2X_ERR("FATAL HW block parity attention\n");
3143                 }
3144         }
3145
3146         bnx2x_release_alr(bp);
3147
3148         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3149
3150         val = ~deasserted;
3151         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3152            val, reg_addr);
3153         REG_WR(bp, reg_addr, val);
3154
3155         if (~bp->attn_state & deasserted)
3156                 BNX2X_ERR("IGU ERROR\n");
3157
3158         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3159                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
3160
3161         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3162         aeu_mask = REG_RD(bp, reg_addr);
3163
3164         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
3165            aeu_mask, deasserted);
3166         aeu_mask |= (deasserted & 0xff);
3167         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3168
3169         REG_WR(bp, reg_addr, aeu_mask);
3170         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3171
3172         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3173         bp->attn_state &= ~deasserted;
3174         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3175 }
3176
3177 static void bnx2x_attn_int(struct bnx2x *bp)
3178 {
3179         /* read local copy of bits */
3180         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3181                                                                 attn_bits);
3182         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3183                                                                 attn_bits_ack);
3184         u32 attn_state = bp->attn_state;
3185
3186         /* look for changed bits */
3187         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
3188         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
3189
3190         DP(NETIF_MSG_HW,
3191            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
3192            attn_bits, attn_ack, asserted, deasserted);
3193
3194         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3195                 BNX2X_ERR("BAD attention state\n");
3196
3197         /* handle bits that were raised */
3198         if (asserted)
3199                 bnx2x_attn_int_asserted(bp, asserted);
3200
3201         if (deasserted)
3202                 bnx2x_attn_int_deasserted(bp, deasserted);
3203 }
3204
3205 static void bnx2x_sp_task(struct work_struct *work)
3206 {
3207         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3208         u16 status;
3209
3210
3211         /* Return here if interrupt is disabled */
3212         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3213                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3214                 return;
3215         }
3216
3217         status = bnx2x_update_dsb_idx(bp);
3218 /*      if (status == 0)                                     */
3219 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
3220
3221         DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
3222
3223         /* HW attentions */
3224         if (status & 0x1)
3225                 bnx2x_attn_int(bp);
3226
3227         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3228                      IGU_INT_NOP, 1);
3229         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3230                      IGU_INT_NOP, 1);
3231         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3232                      IGU_INT_NOP, 1);
3233         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3234                      IGU_INT_NOP, 1);
3235         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3236                      IGU_INT_ENABLE, 1);
3237
3238 }
3239
3240 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3241 {
3242         struct net_device *dev = dev_instance;
3243         struct bnx2x *bp = netdev_priv(dev);
3244
3245         /* Return here if interrupt is disabled */
3246         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3247                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3248                 return IRQ_HANDLED;
3249         }
3250
3251         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3252
3253 #ifdef BNX2X_STOP_ON_ERROR
3254         if (unlikely(bp->panic))
3255                 return IRQ_HANDLED;
3256 #endif
3257
3258 #ifdef BCM_CNIC
3259         {
3260                 struct cnic_ops *c_ops;
3261
3262                 rcu_read_lock();
3263                 c_ops = rcu_dereference(bp->cnic_ops);
3264                 if (c_ops)
3265                         c_ops->cnic_handler(bp->cnic_data, NULL);
3266                 rcu_read_unlock();
3267         }
3268 #endif
3269         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3270
3271         return IRQ_HANDLED;
3272 }
3273
3274 /* end of slow path */
3275
3276 /* Statistics */
3277
3278 /****************************************************************************
3279 * Macros
3280 ****************************************************************************/
3281
3282 /* sum[hi:lo] += add[hi:lo] */
3283 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3284         do { \
3285                 s_lo += a_lo; \
3286                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3287         } while (0)
3288
3289 /* difference = minuend - subtrahend */
3290 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3291         do { \
3292                 if (m_lo < s_lo) { \
3293                         /* underflow */ \
3294                         d_hi = m_hi - s_hi; \
3295                         if (d_hi > 0) { \
3296                                 /* we can 'loan' 1 */ \
3297                                 d_hi--; \
3298                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3299                         } else { \
3300                                 /* m_hi <= s_hi */ \
3301                                 d_hi = 0; \
3302                                 d_lo = 0; \
3303                         } \
3304                 } else { \
3305                         /* m_lo >= s_lo */ \
3306                         if (m_hi < s_hi) { \
3307                                 d_hi = 0; \
3308                                 d_lo = 0; \
3309                         } else { \
3310                                 /* m_hi >= s_hi */ \
3311                                 d_hi = m_hi - s_hi; \
3312                                 d_lo = m_lo - s_lo; \
3313                         } \
3314                 } \
3315         } while (0)
3316
3317 #define UPDATE_STAT64(s, t) \
3318         do { \
3319                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3320                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3321                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3322                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3323                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3324                        pstats->mac_stx[1].t##_lo, diff.lo); \
3325         } while (0)
3326
3327 #define UPDATE_STAT64_NIG(s, t) \
3328         do { \
3329                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3330                         diff.lo, new->s##_lo, old->s##_lo); \
3331                 ADD_64(estats->t##_hi, diff.hi, \
3332                        estats->t##_lo, diff.lo); \
3333         } while (0)
3334
3335 /* sum[hi:lo] += add */
3336 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3337         do { \
3338                 s_lo += a; \
3339                 s_hi += (s_lo < a) ? 1 : 0; \
3340         } while (0)
3341
3342 #define UPDATE_EXTEND_STAT(s) \
3343         do { \
3344                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3345                               pstats->mac_stx[1].s##_lo, \
3346                               new->s); \
3347         } while (0)
3348
3349 #define UPDATE_EXTEND_TSTAT(s, t) \
3350         do { \
3351                 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3352                 old_tclient->s = tclient->s; \
3353                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3354         } while (0)
3355
3356 #define UPDATE_EXTEND_USTAT(s, t) \
3357         do { \
3358                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3359                 old_uclient->s = uclient->s; \
3360                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3361         } while (0)
3362
3363 #define UPDATE_EXTEND_XSTAT(s, t) \
3364         do { \
3365                 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3366                 old_xclient->s = xclient->s; \
3367                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3368         } while (0)
3369
3370 /* minuend -= subtrahend */
3371 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3372         do { \
3373                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3374         } while (0)
3375
3376 /* minuend[hi:lo] -= subtrahend */
3377 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3378         do { \
3379                 SUB_64(m_hi, 0, m_lo, s); \
3380         } while (0)
3381
3382 #define SUB_EXTEND_USTAT(s, t) \
3383         do { \
3384                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3385                 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3386         } while (0)
3387
3388 /*
3389  * General service functions
3390  */
3391
3392 static inline long bnx2x_hilo(u32 *hiref)
3393 {
3394         u32 lo = *(hiref + 1);
3395 #if (BITS_PER_LONG == 64)
3396         u32 hi = *hiref;
3397
3398         return HILO_U64(hi, lo);
3399 #else
3400         return lo;
3401 #endif
3402 }
3403
3404 /*
3405  * Init service functions
3406  */
3407
3408 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3409 {
3410         if (!bp->stats_pending) {
3411                 struct eth_query_ramrod_data ramrod_data = {0};
3412                 int i, rc;
3413
3414                 ramrod_data.drv_counter = bp->stats_counter++;
3415                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3416                 for_each_queue(bp, i)
3417                         ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3418
3419                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3420                                    ((u32 *)&ramrod_data)[1],
3421                                    ((u32 *)&ramrod_data)[0], 0);
3422                 if (rc == 0) {
3423                         /* stats ramrod has it's own slot on the spq */
3424                         bp->spq_left++;
3425                         bp->stats_pending = 1;
3426                 }
3427         }
3428 }
3429
3430 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3431 {
3432         struct dmae_command *dmae = &bp->stats_dmae;
3433         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3434
3435         *stats_comp = DMAE_COMP_VAL;
3436         if (CHIP_REV_IS_SLOW(bp))
3437                 return;
3438
3439         /* loader */
3440         if (bp->executer_idx) {
3441                 int loader_idx = PMF_DMAE_C(bp);
3442
3443                 memset(dmae, 0, sizeof(struct dmae_command));
3444
3445                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3446                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3447                                 DMAE_CMD_DST_RESET |
3448 #ifdef __BIG_ENDIAN
3449                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3450 #else
3451                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3452 #endif
3453                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3454                                                DMAE_CMD_PORT_0) |
3455                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3456                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3457                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3458                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3459                                      sizeof(struct dmae_command) *
3460                                      (loader_idx + 1)) >> 2;
3461                 dmae->dst_addr_hi = 0;
3462                 dmae->len = sizeof(struct dmae_command) >> 2;
3463                 if (CHIP_IS_E1(bp))
3464                         dmae->len--;
3465                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3466                 dmae->comp_addr_hi = 0;
3467                 dmae->comp_val = 1;
3468
3469                 *stats_comp = 0;
3470                 bnx2x_post_dmae(bp, dmae, loader_idx);
3471
3472         } else if (bp->func_stx) {
3473                 *stats_comp = 0;
3474                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3475         }
3476 }
3477
3478 static int bnx2x_stats_comp(struct bnx2x *bp)
3479 {
3480         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3481         int cnt = 10;
3482
3483         might_sleep();
3484         while (*stats_comp != DMAE_COMP_VAL) {
3485                 if (!cnt) {
3486                         BNX2X_ERR("timeout waiting for stats finished\n");
3487                         break;
3488                 }
3489                 cnt--;
3490                 msleep(1);
3491         }
3492         return 1;
3493 }
3494
3495 /*
3496  * Statistics service functions
3497  */
3498
3499 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3500 {
3501         struct dmae_command *dmae;
3502         u32 opcode;
3503         int loader_idx = PMF_DMAE_C(bp);
3504         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3505
3506         /* sanity */
3507         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3508                 BNX2X_ERR("BUG!\n");
3509                 return;
3510         }
3511
3512         bp->executer_idx = 0;
3513
3514         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3515                   DMAE_CMD_C_ENABLE |
3516                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3517 #ifdef __BIG_ENDIAN
3518                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3519 #else
3520                   DMAE_CMD_ENDIANITY_DW_SWAP |
3521 #endif
3522                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3523                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3524
3525         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3526         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3527         dmae->src_addr_lo = bp->port.port_stx >> 2;
3528         dmae->src_addr_hi = 0;
3529         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3530         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3531         dmae->len = DMAE_LEN32_RD_MAX;
3532         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3533         dmae->comp_addr_hi = 0;
3534         dmae->comp_val = 1;
3535
3536         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3537         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3538         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3539         dmae->src_addr_hi = 0;
3540         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3541                                    DMAE_LEN32_RD_MAX * 4);
3542         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3543                                    DMAE_LEN32_RD_MAX * 4);
3544         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3545         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3546         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3547         dmae->comp_val = DMAE_COMP_VAL;
3548
3549         *stats_comp = 0;
3550         bnx2x_hw_stats_post(bp);
3551         bnx2x_stats_comp(bp);
3552 }
3553
3554 static void bnx2x_port_stats_init(struct bnx2x *bp)
3555 {
3556         struct dmae_command *dmae;
3557         int port = BP_PORT(bp);
3558         int vn = BP_E1HVN(bp);
3559         u32 opcode;
3560         int loader_idx = PMF_DMAE_C(bp);
3561         u32 mac_addr;
3562         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3563
3564         /* sanity */
3565         if (!bp->link_vars.link_up || !bp->port.pmf) {
3566                 BNX2X_ERR("BUG!\n");
3567                 return;
3568         }
3569
3570         bp->executer_idx = 0;
3571
3572         /* MCP */
3573         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3574                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3575                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3576 #ifdef __BIG_ENDIAN
3577                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3578 #else
3579                   DMAE_CMD_ENDIANITY_DW_SWAP |
3580 #endif
3581                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3582                   (vn << DMAE_CMD_E1HVN_SHIFT));
3583
3584         if (bp->port.port_stx) {
3585
3586                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3587                 dmae->opcode = opcode;
3588                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3589                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3590                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3591                 dmae->dst_addr_hi = 0;
3592                 dmae->len = sizeof(struct host_port_stats) >> 2;
3593                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3594                 dmae->comp_addr_hi = 0;
3595                 dmae->comp_val = 1;
3596         }
3597
3598         if (bp->func_stx) {
3599
3600                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3601                 dmae->opcode = opcode;
3602                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3603                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3604                 dmae->dst_addr_lo = bp->func_stx >> 2;
3605                 dmae->dst_addr_hi = 0;
3606                 dmae->len = sizeof(struct host_func_stats) >> 2;
3607                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3608                 dmae->comp_addr_hi = 0;
3609                 dmae->comp_val = 1;
3610         }
3611
3612         /* MAC */
3613         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3614                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3615                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3616 #ifdef __BIG_ENDIAN
3617                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3618 #else
3619                   DMAE_CMD_ENDIANITY_DW_SWAP |
3620 #endif
3621                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3622                   (vn << DMAE_CMD_E1HVN_SHIFT));
3623
3624         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3625
3626                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3627                                    NIG_REG_INGRESS_BMAC0_MEM);
3628
3629                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3630                    BIGMAC_REGISTER_TX_STAT_GTBYT */
3631                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3632                 dmae->opcode = opcode;
3633                 dmae->src_addr_lo = (mac_addr +
3634                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3635                 dmae->src_addr_hi = 0;
3636                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3637                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3638                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3639                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3640                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3641                 dmae->comp_addr_hi = 0;
3642                 dmae->comp_val = 1;
3643
3644                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3645                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
3646                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3647                 dmae->opcode = opcode;
3648                 dmae->src_addr_lo = (mac_addr +
3649                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3650                 dmae->src_addr_hi = 0;
3651                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3652                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3653                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3654                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3655                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3656                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3657                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3658                 dmae->comp_addr_hi = 0;
3659                 dmae->comp_val = 1;
3660
3661         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3662
3663                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3664
3665                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3666                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3667                 dmae->opcode = opcode;
3668                 dmae->src_addr_lo = (mac_addr +
3669                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3670                 dmae->src_addr_hi = 0;
3671                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3672                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3673                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3674                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3675                 dmae->comp_addr_hi = 0;
3676                 dmae->comp_val = 1;
3677
3678                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3679                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3680                 dmae->opcode = opcode;
3681                 dmae->src_addr_lo = (mac_addr +
3682                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3683                 dmae->src_addr_hi = 0;
3684                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3685                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3686                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3687                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3688                 dmae->len = 1;
3689                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3690                 dmae->comp_addr_hi = 0;
3691                 dmae->comp_val = 1;
3692
3693                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3694                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3695                 dmae->opcode = opcode;
3696                 dmae->src_addr_lo = (mac_addr +
3697                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3698                 dmae->src_addr_hi = 0;
3699                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3700                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3701                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3702                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3703                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3704                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3705                 dmae->comp_addr_hi = 0;
3706                 dmae->comp_val = 1;
3707         }
3708
3709         /* NIG */
3710         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3711         dmae->opcode = opcode;
3712         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3713                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
3714         dmae->src_addr_hi = 0;
3715         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3716         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3717         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3718         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3719         dmae->comp_addr_hi = 0;
3720         dmae->comp_val = 1;
3721
3722         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3723         dmae->opcode = opcode;
3724         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3725                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3726         dmae->src_addr_hi = 0;
3727         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3728                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3729         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3730                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
3731         dmae->len = (2*sizeof(u32)) >> 2;
3732         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3733         dmae->comp_addr_hi = 0;
3734         dmae->comp_val = 1;
3735
3736         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3737         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3738                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3739                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3740 #ifdef __BIG_ENDIAN
3741                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3742 #else
3743                         DMAE_CMD_ENDIANITY_DW_SWAP |
3744 #endif
3745                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3746                         (vn << DMAE_CMD_E1HVN_SHIFT));
3747         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3748                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3749         dmae->src_addr_hi = 0;
3750         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3751                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3752         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3753                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
3754         dmae->len = (2*sizeof(u32)) >> 2;
3755         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3756         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3757         dmae->comp_val = DMAE_COMP_VAL;
3758
3759         *stats_comp = 0;
3760 }
3761
3762 static void bnx2x_func_stats_init(struct bnx2x *bp)
3763 {
3764         struct dmae_command *dmae = &bp->stats_dmae;
3765         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3766
3767         /* sanity */
3768         if (!bp->func_stx) {
3769                 BNX2X_ERR("BUG!\n");
3770                 return;
3771         }
3772
3773         bp->executer_idx = 0;
3774         memset(dmae, 0, sizeof(struct dmae_command));
3775
3776         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3777                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3778                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3779 #ifdef __BIG_ENDIAN
3780                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
3781 #else
3782                         DMAE_CMD_ENDIANITY_DW_SWAP |
3783 #endif
3784                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3785                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3786         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3787         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3788         dmae->dst_addr_lo = bp->func_stx >> 2;
3789         dmae->dst_addr_hi = 0;
3790         dmae->len = sizeof(struct host_func_stats) >> 2;
3791         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3792         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3793         dmae->comp_val = DMAE_COMP_VAL;
3794
3795         *stats_comp = 0;
3796 }
3797
3798 static void bnx2x_stats_start(struct bnx2x *bp)
3799 {
3800         if (bp->port.pmf)
3801                 bnx2x_port_stats_init(bp);
3802
3803         else if (bp->func_stx)
3804                 bnx2x_func_stats_init(bp);
3805
3806         bnx2x_hw_stats_post(bp);
3807         bnx2x_storm_stats_post(bp);
3808 }
3809
3810 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3811 {
3812         bnx2x_stats_comp(bp);
3813         bnx2x_stats_pmf_update(bp);
3814         bnx2x_stats_start(bp);
3815 }
3816
3817 static void bnx2x_stats_restart(struct bnx2x *bp)
3818 {
3819         bnx2x_stats_comp(bp);
3820         bnx2x_stats_start(bp);
3821 }
3822
3823 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3824 {
3825         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3826         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3827         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3828         struct {
3829                 u32 lo;
3830                 u32 hi;
3831         } diff;
3832
3833         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3834         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3835         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3836         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3837         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3838         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3839         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3840         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3841         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3842         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3843         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3844         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3845         UPDATE_STAT64(tx_stat_gt127,
3846                                 tx_stat_etherstatspkts65octetsto127octets);
3847         UPDATE_STAT64(tx_stat_gt255,
3848                                 tx_stat_etherstatspkts128octetsto255octets);
3849         UPDATE_STAT64(tx_stat_gt511,
3850                                 tx_stat_etherstatspkts256octetsto511octets);
3851         UPDATE_STAT64(tx_stat_gt1023,
3852                                 tx_stat_etherstatspkts512octetsto1023octets);
3853         UPDATE_STAT64(tx_stat_gt1518,
3854                                 tx_stat_etherstatspkts1024octetsto1522octets);
3855         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3856         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3857         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3858         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3859         UPDATE_STAT64(tx_stat_gterr,
3860                                 tx_stat_dot3statsinternalmactransmiterrors);
3861         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3862
3863         estats->pause_frames_received_hi =
3864                                 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3865         estats->pause_frames_received_lo =
3866                                 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3867
3868         estats->pause_frames_sent_hi =
3869                                 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3870         estats->pause_frames_sent_lo =
3871                                 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3872 }
3873
3874 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3875 {
3876         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3877         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3878         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3879
3880         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3881         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3882         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3883         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3884         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3885         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3886         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3887         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3888         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3889         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3890         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3891         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3892         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3893         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3894         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3895         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3896         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3897         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3898         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3899         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3900         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3901         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3902         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3903         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3904         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3905         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3906         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3907         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3908         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3909         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3910         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3911
3912         estats->pause_frames_received_hi =
3913                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3914         estats->pause_frames_received_lo =
3915                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3916         ADD_64(estats->pause_frames_received_hi,
3917                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3918                estats->pause_frames_received_lo,
3919                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3920
3921         estats->pause_frames_sent_hi =
3922                         pstats->mac_stx[1].tx_stat_outxonsent_hi;
3923         estats->pause_frames_sent_lo =
3924                         pstats->mac_stx[1].tx_stat_outxonsent_lo;
3925         ADD_64(estats->pause_frames_sent_hi,
3926                pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3927                estats->pause_frames_sent_lo,
3928                pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3929 }
3930
3931 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3932 {
3933         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3934         struct nig_stats *old = &(bp->port.old_nig_stats);
3935         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3936         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3937         struct {
3938                 u32 lo;
3939                 u32 hi;
3940         } diff;
3941         u32 nig_timer_max;
3942
3943         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3944                 bnx2x_bmac_stats_update(bp);
3945
3946         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3947                 bnx2x_emac_stats_update(bp);
3948
3949         else { /* unreached */
3950                 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3951                 return -1;
3952         }
3953
3954         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3955                       new->brb_discard - old->brb_discard);
3956         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3957                       new->brb_truncate - old->brb_truncate);
3958
3959         UPDATE_STAT64_NIG(egress_mac_pkt0,
3960                                         etherstatspkts1024octetsto1522octets);
3961         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3962
3963         memcpy(old, new, sizeof(struct nig_stats));
3964
3965         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3966                sizeof(struct mac_stx));
3967         estats->brb_drop_hi = pstats->brb_drop_hi;
3968         estats->brb_drop_lo = pstats->brb_drop_lo;
3969
3970         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3971
3972         nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3973         if (nig_timer_max != estats->nig_timer_max) {
3974                 estats->nig_timer_max = nig_timer_max;
3975                 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3976         }
3977
3978         return 0;
3979 }
3980
3981 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3982 {
3983         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3984         struct tstorm_per_port_stats *tport =
3985                                         &stats->tstorm_common.port_statistics;
3986         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3987         struct bnx2x_eth_stats *estats = &bp->eth_stats;
3988         int i;
3989
3990         memcpy(&(fstats->total_bytes_received_hi),
3991                &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
3992                sizeof(struct host_func_stats) - 2*sizeof(u32));
3993         estats->error_bytes_received_hi = 0;
3994         estats->error_bytes_received_lo = 0;
3995         estats->etherstatsoverrsizepkts_hi = 0;
3996         estats->etherstatsoverrsizepkts_lo = 0;
3997         estats->no_buff_discard_hi = 0;
3998         estats->no_buff_discard_lo = 0;
3999
4000         for_each_queue(bp, i) {
4001                 struct bnx2x_fastpath *fp = &bp->fp[i];
4002                 int cl_id = fp->cl_id;
4003                 struct tstorm_per_client_stats *tclient =
4004                                 &stats->tstorm_common.client_statistics[cl_id];
4005                 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4006                 struct ustorm_per_client_stats *uclient =
4007                                 &stats->ustorm_common.client_statistics[cl_id];
4008                 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4009                 struct xstorm_per_client_stats *xclient =
4010                                 &stats->xstorm_common.client_statistics[cl_id];
4011                 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4012                 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4013                 u32 diff;
4014
4015                 /* are storm stats valid? */
4016                 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4017                                                         bp->stats_counter) {
4018                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4019                            "  xstorm counter (%d) != stats_counter (%d)\n",
4020                            i, xclient->stats_counter, bp->stats_counter);
4021                         return -1;
4022                 }
4023                 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4024                                                         bp->stats_counter) {
4025                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4026                            "  tstorm counter (%d) != stats_counter (%d)\n",
4027                            i, tclient->stats_counter, bp->stats_counter);
4028                         return -2;
4029                 }
4030                 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4031                                                         bp->stats_counter) {
4032                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4033                            "  ustorm counter (%d) != stats_counter (%d)\n",
4034                            i, uclient->stats_counter, bp->stats_counter);
4035                         return -4;
4036                 }
4037
4038                 qstats->total_bytes_received_hi =
4039                         le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4040                 qstats->total_bytes_received_lo =
4041                         le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4042
4043                 ADD_64(qstats->total_bytes_received_hi,
4044                        le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4045                        qstats->total_bytes_received_lo,
4046                        le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4047
4048                 ADD_64(qstats->total_bytes_received_hi,
4049                        le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4050                        qstats->total_bytes_received_lo,
4051                        le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4052
4053                 qstats->valid_bytes_received_hi =
4054                                         qstats->total_bytes_received_hi;
4055                 qstats->valid_bytes_received_lo =
4056                                         qstats->total_bytes_received_lo;
4057
4058                 qstats->error_bytes_received_hi =
4059                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
4060                 qstats->error_bytes_received_lo =
4061                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
4062
4063                 ADD_64(qstats->total_bytes_received_hi,
4064                        qstats->error_bytes_received_hi,
4065                        qstats->total_bytes_received_lo,
4066                        qstats->error_bytes_received_lo);
4067
4068                 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4069                                         total_unicast_packets_received);
4070                 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4071                                         total_multicast_packets_received);
4072                 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4073                                         total_broadcast_packets_received);
4074                 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4075                                         etherstatsoverrsizepkts);
4076                 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4077
4078                 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4079                                         total_unicast_packets_received);
4080                 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4081                                         total_multicast_packets_received);
4082                 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4083                                         total_broadcast_packets_received);
4084                 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4085                 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4086                 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4087
4088                 qstats->total_bytes_transmitted_hi =
4089                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4090                 qstats->total_bytes_transmitted_lo =
4091                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4092
4093                 ADD_64(qstats->total_bytes_transmitted_hi,
4094                        le32_to_cpu(xclient->multicast_bytes_sent.hi),
4095                        qstats->total_bytes_transmitted_lo,
4096                        le32_to_cpu(xclient->multicast_bytes_sent.lo));
4097
4098                 ADD_64(qstats->total_bytes_transmitted_hi,
4099                        le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4100                        qstats->total_bytes_transmitted_lo,
4101                        le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4102
4103                 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4104                                         total_unicast_packets_transmitted);
4105                 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4106                                         total_multicast_packets_transmitted);
4107                 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4108                                         total_broadcast_packets_transmitted);
4109
4110                 old_tclient->checksum_discard = tclient->checksum_discard;
4111                 old_tclient->ttl0_discard = tclient->ttl0_discard;
4112
4113                 ADD_64(fstats->total_bytes_received_hi,
4114                        qstats->total_bytes_received_hi,
4115                        fstats->total_bytes_received_lo,
4116                        qstats->total_bytes_received_lo);
4117                 ADD_64(fstats->total_bytes_transmitted_hi,
4118                        qstats->total_bytes_transmitted_hi,
4119                        fstats->total_bytes_transmitted_lo,
4120                        qstats->total_bytes_transmitted_lo);
4121                 ADD_64(fstats->total_unicast_packets_received_hi,
4122                        qstats->total_unicast_packets_received_hi,
4123                        fstats->total_unicast_packets_received_lo,
4124                        qstats->total_unicast_packets_received_lo);
4125                 ADD_64(fstats->total_multicast_packets_received_hi,
4126                        qstats->total_multicast_packets_received_hi,
4127                        fstats->total_multicast_packets_received_lo,
4128                        qstats->total_multicast_packets_received_lo);
4129                 ADD_64(fstats->total_broadcast_packets_received_hi,
4130                        qstats->total_broadcast_packets_received_hi,
4131                        fstats->total_broadcast_packets_received_lo,
4132                        qstats->total_broadcast_packets_received_lo);
4133                 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4134                        qstats->total_unicast_packets_transmitted_hi,
4135                        fstats->total_unicast_packets_transmitted_lo,
4136                        qstats->total_unicast_packets_transmitted_lo);
4137                 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4138                        qstats->total_multicast_packets_transmitted_hi,
4139                        fstats->total_multicast_packets_transmitted_lo,
4140                        qstats->total_multicast_packets_transmitted_lo);
4141                 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4142                        qstats->total_broadcast_packets_transmitted_hi,
4143                        fstats->total_broadcast_packets_transmitted_lo,
4144                        qstats->total_broadcast_packets_transmitted_lo);
4145                 ADD_64(fstats->valid_bytes_received_hi,
4146                        qstats->valid_bytes_received_hi,
4147                        fstats->valid_bytes_received_lo,
4148                        qstats->valid_bytes_received_lo);
4149
4150                 ADD_64(estats->error_bytes_received_hi,
4151                        qstats->error_bytes_received_hi,
4152                        estats->error_bytes_received_lo,
4153                        qstats->error_bytes_received_lo);
4154                 ADD_64(estats->etherstatsoverrsizepkts_hi,
4155                        qstats->etherstatsoverrsizepkts_hi,
4156                        estats->etherstatsoverrsizepkts_lo,
4157                        qstats->etherstatsoverrsizepkts_lo);
4158                 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4159                        estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4160         }
4161
4162         ADD_64(fstats->total_bytes_received_hi,
4163                estats->rx_stat_ifhcinbadoctets_hi,
4164                fstats->total_bytes_received_lo,
4165                estats->rx_stat_ifhcinbadoctets_lo);
4166
4167         memcpy(estats, &(fstats->total_bytes_received_hi),
4168                sizeof(struct host_func_stats) - 2*sizeof(u32));
4169
4170         ADD_64(estats->etherstatsoverrsizepkts_hi,
4171                estats->rx_stat_dot3statsframestoolong_hi,
4172                estats->etherstatsoverrsizepkts_lo,
4173                estats->rx_stat_dot3statsframestoolong_lo);
4174         ADD_64(estats->error_bytes_received_hi,
4175                estats->rx_stat_ifhcinbadoctets_hi,
4176                estats->error_bytes_received_lo,
4177                estats->rx_stat_ifhcinbadoctets_lo);
4178
4179         if (bp->port.pmf) {
4180                 estats->mac_filter_discard =
4181                                 le32_to_cpu(tport->mac_filter_discard);
4182                 estats->xxoverflow_discard =
4183                                 le32_to_cpu(tport->xxoverflow_discard);
4184                 estats->brb_truncate_discard =
4185                                 le32_to_cpu(tport->brb_truncate_discard);
4186                 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4187         }
4188
4189         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4190
4191         bp->stats_pending = 0;
4192
4193         return 0;
4194 }
4195
4196 static void bnx2x_net_stats_update(struct bnx2x *bp)
4197 {
4198         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4199         struct net_device_stats *nstats = &bp->dev->stats;
4200         int i;
4201
4202         nstats->rx_packets =
4203                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4204                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4205                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4206
4207         nstats->tx_packets =
4208                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4209                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4210                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4211
4212         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4213
4214         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4215
4216         nstats->rx_dropped = estats->mac_discard;
4217         for_each_queue(bp, i)
4218                 nstats->rx_dropped +=
4219                         le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4220
4221         nstats->tx_dropped = 0;
4222
4223         nstats->multicast =
4224                 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4225
4226         nstats->collisions =
4227                 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4228
4229         nstats->rx_length_errors =
4230                 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4231                 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4232         nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4233                                  bnx2x_hilo(&estats->brb_truncate_hi);
4234         nstats->rx_crc_errors =
4235                 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4236         nstats->rx_frame_errors =
4237                 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4238         nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4239         nstats->rx_missed_errors = estats->xxoverflow_discard;
4240
4241         nstats->rx_errors = nstats->rx_length_errors +
4242                             nstats->rx_over_errors +
4243                             nstats->rx_crc_errors +
4244                             nstats->rx_frame_errors +
4245                             nstats->rx_fifo_errors +
4246                             nstats->rx_missed_errors;
4247
4248         nstats->tx_aborted_errors =
4249                 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4250                 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4251         nstats->tx_carrier_errors =
4252                 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4253         nstats->tx_fifo_errors = 0;
4254         nstats->tx_heartbeat_errors = 0;
4255         nstats->tx_window_errors = 0;
4256
4257         nstats->tx_errors = nstats->tx_aborted_errors +
4258                             nstats->tx_carrier_errors +
4259             bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4260 }
4261
4262 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4263 {
4264         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4265         int i;
4266
4267         estats->driver_xoff = 0;
4268         estats->rx_err_discard_pkt = 0;
4269         estats->rx_skb_alloc_failed = 0;
4270         estats->hw_csum_err = 0;
4271         for_each_queue(bp, i) {
4272                 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4273
4274                 estats->driver_xoff += qstats->driver_xoff;
4275                 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4276                 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4277                 estats->hw_csum_err += qstats->hw_csum_err;
4278         }
4279 }
4280
4281 static void bnx2x_stats_update(struct bnx2x *bp)
4282 {
4283         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4284
4285         if (*stats_comp != DMAE_COMP_VAL)
4286                 return;
4287
4288         if (bp->port.pmf)
4289                 bnx2x_hw_stats_update(bp);
4290
4291         if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4292                 BNX2X_ERR("storm stats were not updated for 3 times\n");
4293                 bnx2x_panic();
4294                 return;
4295         }
4296
4297         bnx2x_net_stats_update(bp);
4298         bnx2x_drv_stats_update(bp);
4299
4300         if (bp->msglevel & NETIF_MSG_TIMER) {
4301                 struct bnx2x_fastpath *fp0_rx = bp->fp;
4302                 struct bnx2x_fastpath *fp0_tx = bp->fp;
4303                 struct tstorm_per_client_stats *old_tclient =
4304                                                         &bp->fp->old_tclient;
4305                 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4306                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4307                 struct net_device_stats *nstats = &bp->dev->stats;
4308                 int i;
4309
4310                 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4311                 printk(KERN_DEBUG "  tx avail (%4x)  tx hc idx (%x)"
4312                                   "  tx pkt (%lx)\n",
4313                        bnx2x_tx_avail(fp0_tx),
4314                        le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
4315                 printk(KERN_DEBUG "  rx usage (%4x)  rx hc idx (%x)"
4316                                   "  rx pkt (%lx)\n",
4317                        (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4318                              fp0_rx->rx_comp_cons),
4319                        le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
4320                 printk(KERN_DEBUG "  %s (Xoff events %u)  brb drops %u  "
4321                                   "brb truncate %u\n",
4322                        (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4323                        qstats->driver_xoff,
4324                        estats->brb_drop_lo, estats->brb_truncate_lo);
4325                 printk(KERN_DEBUG "tstats: checksum_discard %u  "
4326                         "packets_too_big_discard %lu  no_buff_discard %lu  "
4327                         "mac_discard %u  mac_filter_discard %u  "
4328                         "xxovrflow_discard %u  brb_truncate_discard %u  "
4329                         "ttl0_discard %u\n",
4330                        le32_to_cpu(old_tclient->checksum_discard),
4331                        bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4332                        bnx2x_hilo(&qstats->no_buff_discard_hi),
4333                        estats->mac_discard, estats->mac_filter_discard,
4334                        estats->xxoverflow_discard, estats->brb_truncate_discard,
4335                        le32_to_cpu(old_tclient->ttl0_discard));
4336
4337                 for_each_queue(bp, i) {
4338                         printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4339                                bnx2x_fp(bp, i, tx_pkt),
4340                                bnx2x_fp(bp, i, rx_pkt),
4341                                bnx2x_fp(bp, i, rx_calls));
4342                 }
4343         }
4344
4345         bnx2x_hw_stats_post(bp);
4346         bnx2x_storm_stats_post(bp);
4347 }
4348
4349 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4350 {
4351         struct dmae_command *dmae;
4352         u32 opcode;
4353         int loader_idx = PMF_DMAE_C(bp);
4354         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4355
4356         bp->executer_idx = 0;
4357
4358         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4359                   DMAE_CMD_C_ENABLE |
4360                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4361 #ifdef __BIG_ENDIAN
4362                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4363 #else
4364                   DMAE_CMD_ENDIANITY_DW_SWAP |
4365 #endif
4366                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4367                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4368
4369         if (bp->port.port_stx) {
4370
4371                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4372                 if (bp->func_stx)
4373                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4374                 else
4375                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4376                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4377                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4378                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4379                 dmae->dst_addr_hi = 0;
4380                 dmae->len = sizeof(struct host_port_stats) >> 2;
4381                 if (bp->func_stx) {
4382                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4383                         dmae->comp_addr_hi = 0;
4384                         dmae->comp_val = 1;
4385                 } else {
4386                         dmae->comp_addr_lo =
4387                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4388                         dmae->comp_addr_hi =
4389                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4390                         dmae->comp_val = DMAE_COMP_VAL;
4391
4392                         *stats_comp = 0;
4393                 }
4394         }
4395
4396         if (bp->func_stx) {
4397
4398                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4399                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4400                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4401                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4402                 dmae->dst_addr_lo = bp->func_stx >> 2;
4403                 dmae->dst_addr_hi = 0;
4404                 dmae->len = sizeof(struct host_func_stats) >> 2;
4405                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4406                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4407                 dmae->comp_val = DMAE_COMP_VAL;
4408
4409                 *stats_comp = 0;
4410         }
4411 }
4412
4413 static void bnx2x_stats_stop(struct bnx2x *bp)
4414 {
4415         int update = 0;
4416
4417         bnx2x_stats_comp(bp);
4418
4419         if (bp->port.pmf)
4420                 update = (bnx2x_hw_stats_update(bp) == 0);
4421
4422         update |= (bnx2x_storm_stats_update(bp) == 0);
4423
4424         if (update) {
4425                 bnx2x_net_stats_update(bp);
4426
4427                 if (bp->port.pmf)
4428                         bnx2x_port_stats_stop(bp);
4429
4430                 bnx2x_hw_stats_post(bp);
4431                 bnx2x_stats_comp(bp);
4432         }
4433 }
4434
4435 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4436 {
4437 }
4438
4439 static const struct {
4440         void (*action)(struct bnx2x *bp);
4441         enum bnx2x_stats_state next_state;
4442 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4443 /* state        event   */
4444 {
4445 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4446 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
4447 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4448 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4449 },
4450 {
4451 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
4452 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
4453 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
4454 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
4455 }
4456 };
4457
4458 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4459 {
4460         enum bnx2x_stats_state state = bp->stats_state;
4461
4462         bnx2x_stats_stm[state][event].action(bp);
4463         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4464
4465         /* Make sure the state has been "changed" */
4466         smp_wmb();
4467
4468         if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4469                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4470                    state, event, bp->stats_state);
4471 }
4472
4473 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4474 {
4475         struct dmae_command *dmae;
4476         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4477
4478         /* sanity */
4479         if (!bp->port.pmf || !bp->port.port_stx) {
4480                 BNX2X_ERR("BUG!\n");
4481                 return;
4482         }
4483
4484         bp->executer_idx = 0;
4485
4486         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4487         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4488                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4489                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4490 #ifdef __BIG_ENDIAN
4491                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4492 #else
4493                         DMAE_CMD_ENDIANITY_DW_SWAP |
4494 #endif
4495                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4496                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4497         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4498         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4499         dmae->dst_addr_lo = bp->port.port_stx >> 2;
4500         dmae->dst_addr_hi = 0;
4501         dmae->len = sizeof(struct host_port_stats) >> 2;
4502         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4503         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4504         dmae->comp_val = DMAE_COMP_VAL;
4505
4506         *stats_comp = 0;
4507         bnx2x_hw_stats_post(bp);
4508         bnx2x_stats_comp(bp);
4509 }
4510
4511 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4512 {
4513         int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4514         int port = BP_PORT(bp);
4515         int func;
4516         u32 func_stx;
4517
4518         /* sanity */
4519         if (!bp->port.pmf || !bp->func_stx) {
4520                 BNX2X_ERR("BUG!\n");
4521                 return;
4522         }
4523
4524         /* save our func_stx */
4525         func_stx = bp->func_stx;
4526
4527         for (vn = VN_0; vn < vn_max; vn++) {
4528                 func = 2*vn + port;
4529
4530                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4531                 bnx2x_func_stats_init(bp);
4532                 bnx2x_hw_stats_post(bp);
4533                 bnx2x_stats_comp(bp);
4534         }
4535
4536         /* restore our func_stx */
4537         bp->func_stx = func_stx;
4538 }
4539
4540 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4541 {
4542         struct dmae_command *dmae = &bp->stats_dmae;
4543         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4544
4545         /* sanity */
4546         if (!bp->func_stx) {
4547                 BNX2X_ERR("BUG!\n");
4548                 return;
4549         }
4550
4551         bp->executer_idx = 0;
4552         memset(dmae, 0, sizeof(struct dmae_command));
4553
4554         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4555                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4556                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4557 #ifdef __BIG_ENDIAN
4558                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4559 #else
4560                         DMAE_CMD_ENDIANITY_DW_SWAP |
4561 #endif
4562                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4563                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4564         dmae->src_addr_lo = bp->func_stx >> 2;
4565         dmae->src_addr_hi = 0;
4566         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4567         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4568         dmae->len = sizeof(struct host_func_stats) >> 2;
4569         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4570         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4571         dmae->comp_val = DMAE_COMP_VAL;
4572
4573         *stats_comp = 0;
4574         bnx2x_hw_stats_post(bp);
4575         bnx2x_stats_comp(bp);
4576 }
4577
4578 static void bnx2x_stats_init(struct bnx2x *bp)
4579 {
4580         int port = BP_PORT(bp);
4581         int func = BP_FUNC(bp);
4582         int i;
4583
4584         bp->stats_pending = 0;
4585         bp->executer_idx = 0;
4586         bp->stats_counter = 0;
4587
4588         /* port and func stats for management */
4589         if (!BP_NOMCP(bp)) {
4590                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4591                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4592
4593         } else {
4594                 bp->port.port_stx = 0;
4595                 bp->func_stx = 0;
4596         }
4597         DP(BNX2X_MSG_STATS, "port_stx 0x%x  func_stx 0x%x\n",
4598            bp->port.port_stx, bp->func_stx);
4599
4600         /* port stats */
4601         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4602         bp->port.old_nig_stats.brb_discard =
4603                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4604         bp->port.old_nig_stats.brb_truncate =
4605                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4606         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4607                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4608         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4609                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4610
4611         /* function stats */
4612         for_each_queue(bp, i) {
4613                 struct bnx2x_fastpath *fp = &bp->fp[i];
4614
4615                 memset(&fp->old_tclient, 0,
4616                        sizeof(struct tstorm_per_client_stats));
4617                 memset(&fp->old_uclient, 0,
4618                        sizeof(struct ustorm_per_client_stats));
4619                 memset(&fp->old_xclient, 0,
4620                        sizeof(struct xstorm_per_client_stats));
4621                 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4622         }
4623
4624         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4625         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4626
4627         bp->stats_state = STATS_STATE_DISABLED;
4628
4629         if (bp->port.pmf) {
4630                 if (bp->port.port_stx)
4631                         bnx2x_port_stats_base_init(bp);
4632
4633                 if (bp->func_stx)
4634                         bnx2x_func_stats_base_init(bp);
4635
4636         } else if (bp->func_stx)
4637                 bnx2x_func_stats_base_update(bp);
4638 }
4639
4640 static void bnx2x_timer(unsigned long data)
4641 {
4642         struct bnx2x *bp = (struct bnx2x *) data;
4643
4644         if (!netif_running(bp->dev))
4645                 return;
4646
4647         if (atomic_read(&bp->intr_sem) != 0)
4648                 goto timer_restart;
4649
4650         if (poll) {
4651                 struct bnx2x_fastpath *fp = &bp->fp[0];
4652                 int rc;
4653
4654                 bnx2x_tx_int(fp);
4655                 rc = bnx2x_rx_int(fp, 1000);
4656         }
4657
4658         if (!BP_NOMCP(bp)) {
4659                 int func = BP_FUNC(bp);
4660                 u32 drv_pulse;
4661                 u32 mcp_pulse;
4662
4663                 ++bp->fw_drv_pulse_wr_seq;
4664                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4665                 /* TBD - add SYSTEM_TIME */
4666                 drv_pulse = bp->fw_drv_pulse_wr_seq;
4667                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4668
4669                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4670                              MCP_PULSE_SEQ_MASK);
4671                 /* The delta between driver pulse and mcp response
4672                  * should be 1 (before mcp response) or 0 (after mcp response)
4673                  */
4674                 if ((drv_pulse != mcp_pulse) &&
4675                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4676                         /* someone lost a heartbeat... */
4677                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4678                                   drv_pulse, mcp_pulse);
4679                 }
4680         }
4681
4682         if (bp->state == BNX2X_STATE_OPEN)
4683                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4684
4685 timer_restart:
4686         mod_timer(&bp->timer, jiffies + bp->current_interval);
4687 }
4688
4689 /* end of Statistics */
4690
4691 /* nic init */
4692
4693 /*
4694  * nic init service functions
4695  */
4696
4697 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4698 {
4699         int port = BP_PORT(bp);
4700
4701         /* "CSTORM" */
4702         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4703                         CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4704                         CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4705         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4706                         CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4707                         CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
4708 }
4709
4710 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4711                           dma_addr_t mapping, int sb_id)
4712 {
4713         int port = BP_PORT(bp);
4714         int func = BP_FUNC(bp);
4715         int index;
4716         u64 section;
4717
4718         /* USTORM */
4719         section = ((u64)mapping) + offsetof(struct host_status_block,
4720                                             u_status_block);
4721         sb->u_status_block.status_block_id = sb_id;
4722
4723         REG_WR(bp, BAR_CSTRORM_INTMEM +
4724                CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4725         REG_WR(bp, BAR_CSTRORM_INTMEM +
4726                ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
4727                U64_HI(section));
4728         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4729                 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
4730
4731         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4732                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4733                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
4734
4735         /* CSTORM */
4736         section = ((u64)mapping) + offsetof(struct host_status_block,
4737                                             c_status_block);
4738         sb->c_status_block.status_block_id = sb_id;
4739
4740         REG_WR(bp, BAR_CSTRORM_INTMEM +
4741                CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
4742         REG_WR(bp, BAR_CSTRORM_INTMEM +
4743                ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
4744                U64_HI(section));
4745         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4746                 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
4747
4748         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4749                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4750                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
4751
4752         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4753 }
4754
4755 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4756 {
4757         int func = BP_FUNC(bp);
4758
4759         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
4760                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4761                         sizeof(struct tstorm_def_status_block)/4);
4762         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4763                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4764                         sizeof(struct cstorm_def_status_block_u)/4);
4765         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4766                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4767                         sizeof(struct cstorm_def_status_block_c)/4);
4768         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
4769                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4770                         sizeof(struct xstorm_def_status_block)/4);
4771 }
4772
4773 static void bnx2x_init_def_sb(struct bnx2x *bp,
4774                               struct host_def_status_block *def_sb,
4775                               dma_addr_t mapping, int sb_id)
4776 {
4777         int port = BP_PORT(bp);
4778         int func = BP_FUNC(bp);
4779         int index, val, reg_offset;
4780         u64 section;
4781
4782         /* ATTN */
4783         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4784                                             atten_status_block);
4785         def_sb->atten_status_block.status_block_id = sb_id;
4786
4787         bp->attn_state = 0;
4788
4789         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4790                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4791
4792         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4793                 bp->attn_group[index].sig[0] = REG_RD(bp,
4794                                                      reg_offset + 0x10*index);
4795                 bp->attn_group[index].sig[1] = REG_RD(bp,
4796                                                reg_offset + 0x4 + 0x10*index);
4797                 bp->attn_group[index].sig[2] = REG_RD(bp,
4798                                                reg_offset + 0x8 + 0x10*index);
4799                 bp->attn_group[index].sig[3] = REG_RD(bp,
4800                                                reg_offset + 0xc + 0x10*index);
4801         }
4802
4803         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4804                              HC_REG_ATTN_MSG0_ADDR_L);
4805
4806         REG_WR(bp, reg_offset, U64_LO(section));
4807         REG_WR(bp, reg_offset + 4, U64_HI(section));
4808
4809         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4810
4811         val = REG_RD(bp, reg_offset);
4812         val |= sb_id;
4813         REG_WR(bp, reg_offset, val);
4814
4815         /* USTORM */
4816         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4817                                             u_def_status_block);
4818         def_sb->u_def_status_block.status_block_id = sb_id;
4819
4820         REG_WR(bp, BAR_CSTRORM_INTMEM +
4821                CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4822         REG_WR(bp, BAR_CSTRORM_INTMEM +
4823                ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
4824                U64_HI(section));
4825         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4826                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
4827
4828         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4829                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4830                          CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
4831
4832         /* CSTORM */
4833         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4834                                             c_def_status_block);
4835         def_sb->c_def_status_block.status_block_id = sb_id;
4836
4837         REG_WR(bp, BAR_CSTRORM_INTMEM +
4838                CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
4839         REG_WR(bp, BAR_CSTRORM_INTMEM +
4840                ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
4841                U64_HI(section));
4842         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4843                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
4844
4845         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4846                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4847                          CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
4848
4849         /* TSTORM */
4850         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4851                                             t_def_status_block);
4852         def_sb->t_def_status_block.status_block_id = sb_id;
4853
4854         REG_WR(bp, BAR_TSTRORM_INTMEM +
4855                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4856         REG_WR(bp, BAR_TSTRORM_INTMEM +
4857                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4858                U64_HI(section));
4859         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4860                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4861
4862         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4863                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4864                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4865
4866         /* XSTORM */
4867         section = ((u64)mapping) + offsetof(struct host_def_status_block,
4868                                             x_def_status_block);
4869         def_sb->x_def_status_block.status_block_id = sb_id;
4870
4871         REG_WR(bp, BAR_XSTRORM_INTMEM +
4872                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4873         REG_WR(bp, BAR_XSTRORM_INTMEM +
4874                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4875                U64_HI(section));
4876         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4877                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4878
4879         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4880                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4881                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4882
4883         bp->stats_pending = 0;
4884         bp->set_mac_pending = 0;
4885
4886         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4887 }
4888
4889 static void bnx2x_update_coalesce(struct bnx2x *bp)
4890 {
4891         int port = BP_PORT(bp);
4892         int i;
4893
4894         for_each_queue(bp, i) {
4895                 int sb_id = bp->fp[i].sb_id;
4896
4897                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4898                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4899                         CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4900                                                       U_SB_ETH_RX_CQ_INDEX),
4901                         bp->rx_ticks/(4 * BNX2X_BTR));
4902                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4903                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4904                                                        U_SB_ETH_RX_CQ_INDEX),
4905                          (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
4906
4907                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4908                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4909                         CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4910                                                       C_SB_ETH_TX_CQ_INDEX),
4911                         bp->tx_ticks/(4 * BNX2X_BTR));
4912                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4913                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4914                                                        C_SB_ETH_TX_CQ_INDEX),
4915                          (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
4916         }
4917 }
4918
4919 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4920                                        struct bnx2x_fastpath *fp, int last)
4921 {
4922         int i;
4923
4924         for (i = 0; i < last; i++) {
4925                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4926                 struct sk_buff *skb = rx_buf->skb;
4927
4928                 if (skb == NULL) {
4929                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4930                         continue;
4931                 }
4932
4933                 if (fp->tpa_state[i] == BNX2X_TPA_START)
4934                         pci_unmap_single(bp->pdev,
4935                                          pci_unmap_addr(rx_buf, mapping),
4936                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4937
4938                 dev_kfree_skb(skb);
4939                 rx_buf->skb = NULL;
4940         }
4941 }
4942
4943 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4944 {
4945         int func = BP_FUNC(bp);
4946         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4947                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
4948         u16 ring_prod, cqe_ring_prod;
4949         int i, j;
4950
4951         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4952         DP(NETIF_MSG_IFUP,
4953            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4954
4955         if (bp->flags & TPA_ENABLE_FLAG) {
4956
4957                 for_each_queue(bp, j) {
4958                         struct bnx2x_fastpath *fp = &bp->fp[j];
4959
4960                         for (i = 0; i < max_agg_queues; i++) {
4961                                 fp->tpa_pool[i].skb =
4962                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4963                                 if (!fp->tpa_pool[i].skb) {
4964                                         BNX2X_ERR("Failed to allocate TPA "
4965                                                   "skb pool for queue[%d] - "
4966                                                   "disabling TPA on this "
4967                                                   "queue!\n", j);
4968                                         bnx2x_free_tpa_pool(bp, fp, i);
4969                                         fp->disable_tpa = 1;
4970                                         break;
4971                                 }
4972                                 pci_unmap_addr_set((struct sw_rx_bd *)
4973                                                         &bp->fp->tpa_pool[i],
4974                                                    mapping, 0);
4975                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
4976                         }
4977                 }
4978         }
4979
4980         for_each_queue(bp, j) {
4981                 struct bnx2x_fastpath *fp = &bp->fp[j];
4982
4983                 fp->rx_bd_cons = 0;
4984                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4985                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4986
4987                 /* "next page" elements initialization */
4988                 /* SGE ring */
4989                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4990                         struct eth_rx_sge *sge;
4991
4992                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4993                         sge->addr_hi =
4994                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4995                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4996                         sge->addr_lo =
4997                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4998                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4999                 }
5000
5001                 bnx2x_init_sge_ring_bit_mask(fp);
5002
5003                 /* RX BD ring */
5004                 for (i = 1; i <= NUM_RX_RINGS; i++) {
5005                         struct eth_rx_bd *rx_bd;
5006
5007                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5008                         rx_bd->addr_hi =
5009                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5010                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5011                         rx_bd->addr_lo =
5012                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5013                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5014                 }
5015
5016                 /* CQ ring */
5017                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5018                         struct eth_rx_cqe_next_page *nextpg;
5019
5020                         nextpg = (struct eth_rx_cqe_next_page *)
5021                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5022                         nextpg->addr_hi =
5023                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5024                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5025                         nextpg->addr_lo =
5026                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5027                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5028                 }
5029
5030                 /* Allocate SGEs and initialize the ring elements */
5031                 for (i = 0, ring_prod = 0;
5032                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
5033
5034                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5035                                 BNX2X_ERR("was only able to allocate "
5036                                           "%d rx sges\n", i);
5037                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5038                                 /* Cleanup already allocated elements */
5039                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
5040                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
5041                                 fp->disable_tpa = 1;
5042                                 ring_prod = 0;
5043                                 break;
5044                         }
5045                         ring_prod = NEXT_SGE_IDX(ring_prod);
5046                 }
5047                 fp->rx_sge_prod = ring_prod;
5048
5049                 /* Allocate BDs and initialize BD ring */
5050                 fp->rx_comp_cons = 0;
5051                 cqe_ring_prod = ring_prod = 0;
5052                 for (i = 0; i < bp->rx_ring_size; i++) {
5053                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5054                                 BNX2X_ERR("was only able to allocate "
5055                                           "%d rx skbs on queue[%d]\n", i, j);
5056                                 fp->eth_q_stats.rx_skb_alloc_failed++;
5057                                 break;
5058                         }
5059                         ring_prod = NEXT_RX_IDX(ring_prod);
5060                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5061                         WARN_ON(ring_prod <= i);
5062                 }
5063
5064                 fp->rx_bd_prod = ring_prod;
5065                 /* must not have more available CQEs than BDs */
5066                 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5067                                        cqe_ring_prod);
5068                 fp->rx_pkt = fp->rx_calls = 0;
5069
5070                 /* Warning!
5071                  * this will generate an interrupt (to the TSTORM)
5072                  * must only be done after chip is initialized
5073                  */
5074                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5075                                      fp->rx_sge_prod);
5076                 if (j != 0)
5077                         continue;
5078
5079                 REG_WR(bp, BAR_USTRORM_INTMEM +
5080                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5081                        U64_LO(fp->rx_comp_mapping));
5082                 REG_WR(bp, BAR_USTRORM_INTMEM +
5083                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5084                        U64_HI(fp->rx_comp_mapping));
5085         }
5086 }
5087
5088 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5089 {
5090         int i, j;
5091
5092         for_each_queue(bp, j) {
5093                 struct bnx2x_fastpath *fp = &bp->fp[j];
5094
5095                 for (i = 1; i <= NUM_TX_RINGS; i++) {
5096                         struct eth_tx_next_bd *tx_next_bd =
5097                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5098
5099                         tx_next_bd->addr_hi =
5100                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5101                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5102                         tx_next_bd->addr_lo =
5103                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5104                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5105                 }
5106
5107                 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5108                 fp->tx_db.data.zero_fill1 = 0;
5109                 fp->tx_db.data.prod = 0;
5110
5111                 fp->tx_pkt_prod = 0;
5112                 fp->tx_pkt_cons = 0;
5113                 fp->tx_bd_prod = 0;
5114                 fp->tx_bd_cons = 0;
5115                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5116                 fp->tx_pkt = 0;
5117         }
5118 }
5119
5120 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5121 {
5122         int func = BP_FUNC(bp);
5123
5124         spin_lock_init(&bp->spq_lock);
5125
5126         bp->spq_left = MAX_SPQ_PENDING;
5127         bp->spq_prod_idx = 0;
5128         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5129         bp->spq_prod_bd = bp->spq;
5130         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5131
5132         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5133                U64_LO(bp->spq_mapping));
5134         REG_WR(bp,
5135                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5136                U64_HI(bp->spq_mapping));
5137
5138         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5139                bp->spq_prod_idx);
5140 }
5141
5142 static void bnx2x_init_context(struct bnx2x *bp)
5143 {
5144         int i;
5145
5146         /* Rx */
5147         for_each_queue(bp, i) {
5148                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5149                 struct bnx2x_fastpath *fp = &bp->fp[i];
5150                 u8 cl_id = fp->cl_id;
5151
5152                 context->ustorm_st_context.common.sb_index_numbers =
5153                                                 BNX2X_RX_SB_INDEX_NUM;
5154                 context->ustorm_st_context.common.clientId = cl_id;
5155                 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5156                 context->ustorm_st_context.common.flags =
5157                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5158                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5159                 context->ustorm_st_context.common.statistics_counter_id =
5160                                                 cl_id;
5161                 context->ustorm_st_context.common.mc_alignment_log_size =
5162                                                 BNX2X_RX_ALIGN_SHIFT;
5163                 context->ustorm_st_context.common.bd_buff_size =
5164                                                 bp->rx_buf_size;
5165                 context->ustorm_st_context.common.bd_page_base_hi =
5166                                                 U64_HI(fp->rx_desc_mapping);
5167                 context->ustorm_st_context.common.bd_page_base_lo =
5168                                                 U64_LO(fp->rx_desc_mapping);
5169                 if (!fp->disable_tpa) {
5170                         context->ustorm_st_context.common.flags |=
5171                                 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5172                         context->ustorm_st_context.common.sge_buff_size =
5173                                 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5174                                          (u32)0xffff);
5175                         context->ustorm_st_context.common.sge_page_base_hi =
5176                                                 U64_HI(fp->rx_sge_mapping);
5177                         context->ustorm_st_context.common.sge_page_base_lo =
5178                                                 U64_LO(fp->rx_sge_mapping);
5179
5180                         context->ustorm_st_context.common.max_sges_for_packet =
5181                                 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5182                         context->ustorm_st_context.common.max_sges_for_packet =
5183                                 ((context->ustorm_st_context.common.
5184                                   max_sges_for_packet + PAGES_PER_SGE - 1) &
5185                                  (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5186                 }
5187
5188                 context->ustorm_ag_context.cdu_usage =
5189                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5190                                                CDU_REGION_NUMBER_UCM_AG,
5191                                                ETH_CONNECTION_TYPE);
5192
5193                 context->xstorm_ag_context.cdu_reserved =
5194                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5195                                                CDU_REGION_NUMBER_XCM_AG,
5196                                                ETH_CONNECTION_TYPE);
5197         }
5198
5199         /* Tx */
5200         for_each_queue(bp, i) {
5201                 struct bnx2x_fastpath *fp = &bp->fp[i];
5202                 struct eth_context *context =
5203                         bnx2x_sp(bp, context[i].eth);
5204
5205                 context->cstorm_st_context.sb_index_number =
5206                                                 C_SB_ETH_TX_CQ_INDEX;
5207                 context->cstorm_st_context.status_block_id = fp->sb_id;
5208
5209                 context->xstorm_st_context.tx_bd_page_base_hi =
5210                                                 U64_HI(fp->tx_desc_mapping);
5211                 context->xstorm_st_context.tx_bd_page_base_lo =
5212                                                 U64_LO(fp->tx_desc_mapping);
5213                 context->xstorm_st_context.statistics_data = (fp->cl_id |
5214                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5215         }
5216 }
5217
5218 static void bnx2x_init_ind_table(struct bnx2x *bp)
5219 {
5220         int func = BP_FUNC(bp);
5221         int i;
5222
5223         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5224                 return;
5225
5226         DP(NETIF_MSG_IFUP,
5227            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
5228         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5229                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5230                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5231                         bp->fp->cl_id + (i % bp->num_queues));
5232 }
5233
5234 static void bnx2x_set_client_config(struct bnx2x *bp)
5235 {
5236         struct tstorm_eth_client_config tstorm_client = {0};
5237         int port = BP_PORT(bp);
5238         int i;
5239
5240         tstorm_client.mtu = bp->dev->mtu;
5241         tstorm_client.config_flags =
5242                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5243                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5244 #ifdef BCM_VLAN
5245         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5246                 tstorm_client.config_flags |=
5247                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5248                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5249         }
5250 #endif
5251
5252         for_each_queue(bp, i) {
5253                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5254
5255                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5256                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5257                        ((u32 *)&tstorm_client)[0]);
5258                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5259                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5260                        ((u32 *)&tstorm_client)[1]);
5261         }
5262
5263         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5264            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5265 }
5266
5267 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5268 {
5269         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5270         int mode = bp->rx_mode;
5271         int mask = bp->rx_mode_cl_mask;
5272         int func = BP_FUNC(bp);
5273         int port = BP_PORT(bp);
5274         int i;
5275         /* All but management unicast packets should pass to the host as well */
5276         u32 llh_mask =
5277                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5278                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5279                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5280                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5281
5282         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
5283
5284         switch (mode) {
5285         case BNX2X_RX_MODE_NONE: /* no Rx */
5286                 tstorm_mac_filter.ucast_drop_all = mask;
5287                 tstorm_mac_filter.mcast_drop_all = mask;
5288                 tstorm_mac_filter.bcast_drop_all = mask;
5289                 break;
5290
5291         case BNX2X_RX_MODE_NORMAL:
5292                 tstorm_mac_filter.bcast_accept_all = mask;
5293                 break;
5294
5295         case BNX2X_RX_MODE_ALLMULTI:
5296                 tstorm_mac_filter.mcast_accept_all = mask;
5297                 tstorm_mac_filter.bcast_accept_all = mask;
5298                 break;
5299
5300         case BNX2X_RX_MODE_PROMISC:
5301                 tstorm_mac_filter.ucast_accept_all = mask;
5302                 tstorm_mac_filter.mcast_accept_all = mask;
5303                 tstorm_mac_filter.bcast_accept_all = mask;
5304                 /* pass management unicast packets as well */
5305                 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5306                 break;
5307
5308         default:
5309                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5310                 break;
5311         }
5312
5313         REG_WR(bp,
5314                (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5315                llh_mask);
5316
5317         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5318                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5319                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5320                        ((u32 *)&tstorm_mac_filter)[i]);
5321
5322 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5323                    ((u32 *)&tstorm_mac_filter)[i]); */
5324         }
5325
5326         if (mode != BNX2X_RX_MODE_NONE)
5327                 bnx2x_set_client_config(bp);
5328 }
5329
5330 static void bnx2x_init_internal_common(struct bnx2x *bp)
5331 {
5332         int i;
5333
5334         /* Zero this manually as its initialization is
5335            currently missing in the initTool */
5336         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5337                 REG_WR(bp, BAR_USTRORM_INTMEM +
5338                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
5339 }
5340
5341 static void bnx2x_init_internal_port(struct bnx2x *bp)
5342 {
5343         int port = BP_PORT(bp);
5344
5345         REG_WR(bp,
5346                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5347         REG_WR(bp,
5348                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5349         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5350         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5351 }
5352
5353 static void bnx2x_init_internal_func(struct bnx2x *bp)
5354 {
5355         struct tstorm_eth_function_common_config tstorm_config = {0};
5356         struct stats_indication_flags stats_flags = {0};
5357         int port = BP_PORT(bp);
5358         int func = BP_FUNC(bp);
5359         int i, j;
5360         u32 offset;
5361         u16 max_agg_size;
5362
5363         if (is_multi(bp)) {
5364                 tstorm_config.config_flags = MULTI_FLAGS(bp);
5365                 tstorm_config.rss_result_mask = MULTI_MASK;
5366         }
5367
5368         /* Enable TPA if needed */
5369         if (bp->flags & TPA_ENABLE_FLAG)
5370                 tstorm_config.config_flags |=
5371                         TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5372
5373         if (IS_E1HMF(bp))
5374                 tstorm_config.config_flags |=
5375                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5376
5377         tstorm_config.leading_client_id = BP_L_ID(bp);
5378
5379         REG_WR(bp, BAR_TSTRORM_INTMEM +
5380                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5381                (*(u32 *)&tstorm_config));
5382
5383         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5384         bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
5385         bnx2x_set_storm_rx_mode(bp);
5386
5387         for_each_queue(bp, i) {
5388                 u8 cl_id = bp->fp[i].cl_id;
5389
5390                 /* reset xstorm per client statistics */
5391                 offset = BAR_XSTRORM_INTMEM +
5392                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5393                 for (j = 0;
5394                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5395                         REG_WR(bp, offset + j*4, 0);
5396
5397                 /* reset tstorm per client statistics */
5398                 offset = BAR_TSTRORM_INTMEM +
5399                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5400                 for (j = 0;
5401                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5402                         REG_WR(bp, offset + j*4, 0);
5403
5404                 /* reset ustorm per client statistics */
5405                 offset = BAR_USTRORM_INTMEM +
5406                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5407                 for (j = 0;
5408                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5409                         REG_WR(bp, offset + j*4, 0);
5410         }
5411
5412         /* Init statistics related context */
5413         stats_flags.collect_eth = 1;
5414
5415         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5416                ((u32 *)&stats_flags)[0]);
5417         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5418                ((u32 *)&stats_flags)[1]);
5419
5420         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5421                ((u32 *)&stats_flags)[0]);
5422         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5423                ((u32 *)&stats_flags)[1]);
5424
5425         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5426                ((u32 *)&stats_flags)[0]);
5427         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5428                ((u32 *)&stats_flags)[1]);
5429
5430         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5431                ((u32 *)&stats_flags)[0]);
5432         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5433                ((u32 *)&stats_flags)[1]);
5434
5435         REG_WR(bp, BAR_XSTRORM_INTMEM +
5436                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5437                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5438         REG_WR(bp, BAR_XSTRORM_INTMEM +
5439                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5440                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5441
5442         REG_WR(bp, BAR_TSTRORM_INTMEM +
5443                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5444                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5445         REG_WR(bp, BAR_TSTRORM_INTMEM +
5446                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5447                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5448
5449         REG_WR(bp, BAR_USTRORM_INTMEM +
5450                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5451                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5452         REG_WR(bp, BAR_USTRORM_INTMEM +
5453                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5454                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5455
5456         if (CHIP_IS_E1H(bp)) {
5457                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5458                         IS_E1HMF(bp));
5459                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5460                         IS_E1HMF(bp));
5461                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5462                         IS_E1HMF(bp));
5463                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5464                         IS_E1HMF(bp));
5465
5466                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5467                          bp->e1hov);
5468         }
5469
5470         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5471         max_agg_size =
5472                 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5473                           SGE_PAGE_SIZE * PAGES_PER_SGE),
5474                     (u32)0xffff);
5475         for_each_queue(bp, i) {
5476                 struct bnx2x_fastpath *fp = &bp->fp[i];
5477
5478                 REG_WR(bp, BAR_USTRORM_INTMEM +
5479                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5480                        U64_LO(fp->rx_comp_mapping));
5481                 REG_WR(bp, BAR_USTRORM_INTMEM +
5482                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5483                        U64_HI(fp->rx_comp_mapping));
5484
5485                 /* Next page */
5486                 REG_WR(bp, BAR_USTRORM_INTMEM +
5487                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5488                        U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5489                 REG_WR(bp, BAR_USTRORM_INTMEM +
5490                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5491                        U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5492
5493                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5494                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5495                          max_agg_size);
5496         }
5497
5498         /* dropless flow control */
5499         if (CHIP_IS_E1H(bp)) {
5500                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5501
5502                 rx_pause.bd_thr_low = 250;
5503                 rx_pause.cqe_thr_low = 250;
5504                 rx_pause.cos = 1;
5505                 rx_pause.sge_thr_low = 0;
5506                 rx_pause.bd_thr_high = 350;
5507                 rx_pause.cqe_thr_high = 350;
5508                 rx_pause.sge_thr_high = 0;
5509
5510                 for_each_queue(bp, i) {
5511                         struct bnx2x_fastpath *fp = &bp->fp[i];
5512
5513                         if (!fp->disable_tpa) {
5514                                 rx_pause.sge_thr_low = 150;
5515                                 rx_pause.sge_thr_high = 250;
5516                         }
5517
5518
5519                         offset = BAR_USTRORM_INTMEM +
5520                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5521                                                                    fp->cl_id);
5522                         for (j = 0;
5523                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5524                              j++)
5525                                 REG_WR(bp, offset + j*4,
5526                                        ((u32 *)&rx_pause)[j]);
5527                 }
5528         }
5529
5530         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5531
5532         /* Init rate shaping and fairness contexts */
5533         if (IS_E1HMF(bp)) {
5534                 int vn;
5535
5536                 /* During init there is no active link
5537                    Until link is up, set link rate to 10Gbps */
5538                 bp->link_vars.line_speed = SPEED_10000;
5539                 bnx2x_init_port_minmax(bp);
5540
5541                 if (!BP_NOMCP(bp))
5542                         bp->mf_config =
5543                               SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
5544                 bnx2x_calc_vn_weight_sum(bp);
5545
5546                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5547                         bnx2x_init_vn_minmax(bp, 2*vn + port);
5548
5549                 /* Enable rate shaping and fairness */
5550                 bp->cmng.flags.cmng_enables |=
5551                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5552
5553         } else {
5554                 /* rate shaping and fairness are disabled */
5555                 DP(NETIF_MSG_IFUP,
5556                    "single function mode  minmax will be disabled\n");
5557         }
5558
5559
5560         /* Store it to internal memory */
5561         if (bp->port.pmf)
5562                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5563                         REG_WR(bp, BAR_XSTRORM_INTMEM +
5564                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5565                                ((u32 *)(&bp->cmng))[i]);
5566 }
5567
5568 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5569 {
5570         switch (load_code) {
5571         case FW_MSG_CODE_DRV_LOAD_COMMON:
5572                 bnx2x_init_internal_common(bp);
5573                 /* no break */
5574
5575         case FW_MSG_CODE_DRV_LOAD_PORT:
5576                 bnx2x_init_internal_port(bp);
5577                 /* no break */
5578
5579         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5580                 bnx2x_init_internal_func(bp);
5581                 break;
5582
5583         default:
5584                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5585                 break;
5586         }
5587 }
5588
5589 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5590 {
5591         int i;
5592
5593         for_each_queue(bp, i) {
5594                 struct bnx2x_fastpath *fp = &bp->fp[i];
5595
5596                 fp->bp = bp;
5597                 fp->state = BNX2X_FP_STATE_CLOSED;
5598                 fp->index = i;
5599                 fp->cl_id = BP_L_ID(bp) + i;
5600 #ifdef BCM_CNIC
5601                 fp->sb_id = fp->cl_id + 1;
5602 #else
5603                 fp->sb_id = fp->cl_id;
5604 #endif
5605                 DP(NETIF_MSG_IFUP,
5606                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
5607                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5608                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5609                               fp->sb_id);
5610                 bnx2x_update_fpsb_idx(fp);
5611         }
5612
5613         /* ensure status block indices were read */
5614         rmb();
5615
5616
5617         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5618                           DEF_SB_ID);
5619         bnx2x_update_dsb_idx(bp);
5620         bnx2x_update_coalesce(bp);
5621         bnx2x_init_rx_rings(bp);
5622         bnx2x_init_tx_ring(bp);
5623         bnx2x_init_sp_ring(bp);
5624         bnx2x_init_context(bp);
5625         bnx2x_init_internal(bp, load_code);
5626         bnx2x_init_ind_table(bp);
5627         bnx2x_stats_init(bp);
5628
5629         /* At this point, we are ready for interrupts */
5630         atomic_set(&bp->intr_sem, 0);
5631
5632         /* flush all before enabling interrupts */
5633         mb();
5634         mmiowb();
5635
5636         bnx2x_int_enable(bp);
5637
5638         /* Check for SPIO5 */
5639         bnx2x_attn_int_deasserted0(bp,
5640                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5641                                    AEU_INPUTS_ATTN_BITS_SPIO5);
5642 }
5643
5644 /* end of nic init */
5645
5646 /*
5647  * gzip service functions
5648  */
5649
5650 static int bnx2x_gunzip_init(struct bnx2x *bp)
5651 {
5652         bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5653                                               &bp->gunzip_mapping);
5654         if (bp->gunzip_buf  == NULL)
5655                 goto gunzip_nomem1;
5656
5657         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5658         if (bp->strm  == NULL)
5659                 goto gunzip_nomem2;
5660
5661         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5662                                       GFP_KERNEL);
5663         if (bp->strm->workspace == NULL)
5664                 goto gunzip_nomem3;
5665
5666         return 0;
5667
5668 gunzip_nomem3:
5669         kfree(bp->strm);
5670         bp->strm = NULL;
5671
5672 gunzip_nomem2:
5673         pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5674                             bp->gunzip_mapping);
5675         bp->gunzip_buf = NULL;
5676
5677 gunzip_nomem1:
5678         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5679                " un-compression\n", bp->dev->name);
5680         return -ENOMEM;
5681 }
5682
5683 static void bnx2x_gunzip_end(struct bnx2x *bp)
5684 {
5685         kfree(bp->strm->workspace);
5686
5687         kfree(bp->strm);
5688         bp->strm = NULL;
5689
5690         if (bp->gunzip_buf) {
5691                 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5692                                     bp->gunzip_mapping);
5693                 bp->gunzip_buf = NULL;
5694         }
5695 }
5696
5697 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5698 {
5699         int n, rc;
5700
5701         /* check gzip header */
5702         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5703                 BNX2X_ERR("Bad gzip header\n");
5704                 return -EINVAL;
5705         }
5706
5707         n = 10;
5708
5709 #define FNAME                           0x8
5710
5711         if (zbuf[3] & FNAME)
5712                 while ((zbuf[n++] != 0) && (n < len));
5713
5714         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5715         bp->strm->avail_in = len - n;
5716         bp->strm->next_out = bp->gunzip_buf;
5717         bp->strm->avail_out = FW_BUF_SIZE;
5718
5719         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5720         if (rc != Z_OK)
5721                 return rc;
5722
5723         rc = zlib_inflate(bp->strm, Z_FINISH);
5724         if ((rc != Z_OK) && (rc != Z_STREAM_END))
5725                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5726                        bp->dev->name, bp->strm->msg);
5727
5728         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5729         if (bp->gunzip_outlen & 0x3)
5730                 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5731                                     " gunzip_outlen (%d) not aligned\n",
5732                        bp->dev->name, bp->gunzip_outlen);
5733         bp->gunzip_outlen >>= 2;
5734
5735         zlib_inflateEnd(bp->strm);
5736
5737         if (rc == Z_STREAM_END)
5738                 return 0;
5739
5740         return rc;
5741 }
5742
5743 /* nic load/unload */
5744
5745 /*
5746  * General service functions
5747  */
5748
5749 /* send a NIG loopback debug packet */
5750 static void bnx2x_lb_pckt(struct bnx2x *bp)
5751 {
5752         u32 wb_write[3];
5753
5754         /* Ethernet source and destination addresses */
5755         wb_write[0] = 0x55555555;
5756         wb_write[1] = 0x55555555;
5757         wb_write[2] = 0x20;             /* SOP */
5758         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5759
5760         /* NON-IP protocol */
5761         wb_write[0] = 0x09000000;
5762         wb_write[1] = 0x55555555;
5763         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
5764         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5765 }
5766
5767 /* some of the internal memories
5768  * are not directly readable from the driver
5769  * to test them we send debug packets
5770  */
5771 static int bnx2x_int_mem_test(struct bnx2x *bp)
5772 {
5773         int factor;
5774         int count, i;
5775         u32 val = 0;
5776
5777         if (CHIP_REV_IS_FPGA(bp))
5778                 factor = 120;
5779         else if (CHIP_REV_IS_EMUL(bp))
5780                 factor = 200;
5781         else
5782                 factor = 1;
5783
5784         DP(NETIF_MSG_HW, "start part1\n");
5785
5786         /* Disable inputs of parser neighbor blocks */
5787         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5788         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5789         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5790         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5791
5792         /*  Write 0 to parser credits for CFC search request */
5793         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5794
5795         /* send Ethernet packet */
5796         bnx2x_lb_pckt(bp);
5797
5798         /* TODO do i reset NIG statistic? */
5799         /* Wait until NIG register shows 1 packet of size 0x10 */
5800         count = 1000 * factor;
5801         while (count) {
5802
5803                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5804                 val = *bnx2x_sp(bp, wb_data[0]);
5805                 if (val == 0x10)
5806                         break;
5807
5808                 msleep(10);
5809                 count--;
5810         }
5811         if (val != 0x10) {
5812                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5813                 return -1;
5814         }
5815
5816         /* Wait until PRS register shows 1 packet */
5817         count = 1000 * factor;
5818         while (count) {
5819                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5820                 if (val == 1)
5821                         break;
5822
5823                 msleep(10);
5824                 count--;
5825         }
5826         if (val != 0x1) {
5827                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5828                 return -2;
5829         }
5830
5831         /* Reset and init BRB, PRS */
5832         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5833         msleep(50);
5834         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5835         msleep(50);
5836         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5837         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5838
5839         DP(NETIF_MSG_HW, "part2\n");
5840
5841         /* Disable inputs of parser neighbor blocks */
5842         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5843         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5844         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5845         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5846
5847         /* Write 0 to parser credits for CFC search request */
5848         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5849
5850         /* send 10 Ethernet packets */
5851         for (i = 0; i < 10; i++)
5852                 bnx2x_lb_pckt(bp);
5853
5854         /* Wait until NIG register shows 10 + 1
5855            packets of size 11*0x10 = 0xb0 */
5856         count = 1000 * factor;
5857         while (count) {
5858
5859                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5860                 val = *bnx2x_sp(bp, wb_data[0]);
5861                 if (val == 0xb0)
5862                         break;
5863
5864                 msleep(10);
5865                 count--;
5866         }
5867         if (val != 0xb0) {
5868                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
5869                 return -3;
5870         }
5871
5872         /* Wait until PRS register shows 2 packets */
5873         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5874         if (val != 2)
5875                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5876
5877         /* Write 1 to parser credits for CFC search request */
5878         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5879
5880         /* Wait until PRS register shows 3 packets */
5881         msleep(10 * factor);
5882         /* Wait until NIG register shows 1 packet of size 0x10 */
5883         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5884         if (val != 3)
5885                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
5886
5887         /* clear NIG EOP FIFO */
5888         for (i = 0; i < 11; i++)
5889                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5890         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5891         if (val != 1) {
5892                 BNX2X_ERR("clear of NIG failed\n");
5893                 return -4;
5894         }
5895
5896         /* Reset and init BRB, PRS, NIG */
5897         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5898         msleep(50);
5899         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5900         msleep(50);
5901         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5902         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5903 #ifndef BCM_CNIC
5904         /* set NIC mode */
5905         REG_WR(bp, PRS_REG_NIC_MODE, 1);
5906 #endif
5907
5908         /* Enable inputs of parser neighbor blocks */
5909         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5910         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5911         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5912         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5913
5914         DP(NETIF_MSG_HW, "done\n");
5915
5916         return 0; /* OK */
5917 }
5918
5919 static void enable_blocks_attention(struct bnx2x *bp)
5920 {
5921         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5922         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5923         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5924         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5925         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5926         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5927         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5928         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5929         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5930 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5931 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5932         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5933         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5934         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5935 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5936 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5937         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5938         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5939         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5940         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5941 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5942 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5943         if (CHIP_REV_IS_FPGA(bp))
5944                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5945         else
5946                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5947         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5948         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5949         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5950 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5951 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5952         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5953         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5954 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5955         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
5956 }
5957
5958
5959 static void bnx2x_reset_common(struct bnx2x *bp)
5960 {
5961         /* reset_common */
5962         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5963                0xd3ffff7f);
5964         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5965 }
5966
5967 static void bnx2x_init_pxp(struct bnx2x *bp)
5968 {
5969         u16 devctl;
5970         int r_order, w_order;
5971
5972         pci_read_config_word(bp->pdev,
5973                              bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
5974         DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
5975         w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5976         if (bp->mrrs == -1)
5977                 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5978         else {
5979                 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
5980                 r_order = bp->mrrs;
5981         }
5982
5983         bnx2x_init_pxp_arb(bp, r_order, w_order);
5984 }
5985
5986 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5987 {
5988         u32 val;
5989         u8 port;
5990         u8 is_required = 0;
5991
5992         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5993               SHARED_HW_CFG_FAN_FAILURE_MASK;
5994
5995         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5996                 is_required = 1;
5997
5998         /*
5999          * The fan failure mechanism is usually related to the PHY type since
6000          * the power consumption of the board is affected by the PHY. Currently,
6001          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6002          */
6003         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6004                 for (port = PORT_0; port < PORT_MAX; port++) {
6005                         u32 phy_type =
6006                                 SHMEM_RD(bp, dev_info.port_hw_config[port].
6007                                          external_phy_config) &
6008                                 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6009                         is_required |=
6010                                 ((phy_type ==
6011                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
6012                                  (phy_type ==
6013                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
6014                                  (phy_type ==
6015                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6016                 }
6017
6018         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6019
6020         if (is_required == 0)
6021                 return;
6022
6023         /* Fan failure is indicated by SPIO 5 */
6024         bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6025                        MISC_REGISTERS_SPIO_INPUT_HI_Z);
6026
6027         /* set to active low mode */
6028         val = REG_RD(bp, MISC_REG_SPIO_INT);
6029         val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6030                                 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6031         REG_WR(bp, MISC_REG_SPIO_INT, val);
6032
6033         /* enable interrupt to signal the IGU */
6034         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6035         val |= (1 << MISC_REGISTERS_SPIO_5);
6036         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6037 }
6038
6039 static int bnx2x_init_common(struct bnx2x *bp)
6040 {
6041         u32 val, i;
6042 #ifdef BCM_CNIC
6043         u32 wb_write[2];
6044 #endif
6045
6046         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
6047
6048         bnx2x_reset_common(bp);
6049         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6050         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
6051
6052         bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
6053         if (CHIP_IS_E1H(bp))
6054                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
6055
6056         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6057         msleep(30);
6058         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6059
6060         bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
6061         if (CHIP_IS_E1(bp)) {
6062                 /* enable HW interrupt from PXP on USDM overflow
6063                    bit 16 on INT_MASK_0 */
6064                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6065         }
6066
6067         bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
6068         bnx2x_init_pxp(bp);
6069
6070 #ifdef __BIG_ENDIAN
6071         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6072         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6073         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6074         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6075         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6076         /* make sure this value is 0 */
6077         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6078
6079 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6080         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6081         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6082         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6083         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6084 #endif
6085
6086         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
6087 #ifdef BCM_CNIC
6088         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6089         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6090         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6091 #endif
6092
6093         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6094                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6095
6096         /* let the HW do it's magic ... */
6097         msleep(100);
6098         /* finish PXP init */
6099         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6100         if (val != 1) {
6101                 BNX2X_ERR("PXP2 CFG failed\n");
6102                 return -EBUSY;
6103         }
6104         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6105         if (val != 1) {
6106                 BNX2X_ERR("PXP2 RD_INIT failed\n");
6107                 return -EBUSY;
6108         }
6109
6110         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6111         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6112
6113         bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
6114
6115         /* clean the DMAE memory */
6116         bp->dmae_ready = 1;
6117         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6118
6119         bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6120         bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6121         bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6122         bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
6123
6124         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6125         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6126         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6127         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6128
6129         bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
6130
6131 #ifdef BCM_CNIC
6132         wb_write[0] = 0;
6133         wb_write[1] = 0;
6134         for (i = 0; i < 64; i++) {
6135                 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6136                 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6137
6138                 if (CHIP_IS_E1H(bp)) {
6139                         REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6140                         bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6141                                           wb_write, 2);
6142                 }
6143         }
6144 #endif
6145         /* soft reset pulse */
6146         REG_WR(bp, QM_REG_SOFT_RESET, 1);
6147         REG_WR(bp, QM_REG_SOFT_RESET, 0);
6148
6149 #ifdef BCM_CNIC
6150         bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
6151 #endif
6152
6153         bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
6154         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6155         if (!CHIP_REV_IS_SLOW(bp)) {
6156                 /* enable hw interrupt from doorbell Q */
6157                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6158         }
6159
6160         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6161         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6162         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6163 #ifndef BCM_CNIC
6164         /* set NIC mode */
6165         REG_WR(bp, PRS_REG_NIC_MODE, 1);
6166 #endif
6167         if (CHIP_IS_E1H(bp))
6168                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
6169
6170         bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6171         bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6172         bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6173         bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
6174
6175         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6176         bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6177         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6178         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6179
6180         bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6181         bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6182         bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6183         bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
6184
6185         /* sync semi rtc */
6186         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6187                0x80000000);
6188         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6189                0x80000000);
6190
6191         bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6192         bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6193         bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
6194
6195         REG_WR(bp, SRC_REG_SOFT_RST, 1);
6196         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6197                 REG_WR(bp, i, 0xc0cac01a);
6198                 /* TODO: replace with something meaningful */
6199         }
6200         bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6201 #ifdef BCM_CNIC
6202         REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6203         REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6204         REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6205         REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6206         REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6207         REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6208         REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6209         REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6210         REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6211         REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6212 #endif
6213         REG_WR(bp, SRC_REG_SOFT_RST, 0);
6214
6215         if (sizeof(union cdu_context) != 1024)
6216                 /* we currently assume that a context is 1024 bytes */
6217                 printk(KERN_ALERT PFX "please adjust the size of"
6218                        " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
6219
6220         bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
6221         val = (4 << 24) + (0 << 12) + 1024;
6222         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6223
6224         bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
6225         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6226         /* enable context validation interrupt from CFC */
6227         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6228
6229         /* set the thresholds to prevent CFC/CDU race */
6230         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6231
6232         bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6233         bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
6234
6235         bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
6236         /* Reset PCIE errors for debug */
6237         REG_WR(bp, 0x2814, 0xffffffff);
6238         REG_WR(bp, 0x3820, 0xffffffff);
6239
6240         bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
6241         bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
6242         bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
6243         bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
6244
6245         bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
6246         if (CHIP_IS_E1H(bp)) {
6247                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6248                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6249         }
6250
6251         if (CHIP_REV_IS_SLOW(bp))
6252                 msleep(200);
6253
6254         /* finish CFC init */
6255         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6256         if (val != 1) {
6257                 BNX2X_ERR("CFC LL_INIT failed\n");
6258                 return -EBUSY;
6259         }
6260         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6261         if (val != 1) {
6262                 BNX2X_ERR("CFC AC_INIT failed\n");
6263                 return -EBUSY;
6264         }
6265         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6266         if (val != 1) {
6267                 BNX2X_ERR("CFC CAM_INIT failed\n");
6268                 return -EBUSY;
6269         }
6270         REG_WR(bp, CFC_REG_DEBUG0, 0);
6271
6272         /* read NIG statistic
6273            to see if this is our first up since powerup */
6274         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6275         val = *bnx2x_sp(bp, wb_data[0]);
6276
6277         /* do internal memory self test */
6278         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6279                 BNX2X_ERR("internal mem self test failed\n");
6280                 return -EBUSY;
6281         }
6282
6283         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6284         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6285         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6286         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6287         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6288                 bp->port.need_hw_lock = 1;
6289                 break;
6290
6291         default:
6292                 break;
6293         }
6294
6295         bnx2x_setup_fan_failure_detection(bp);
6296
6297         /* clear PXP2 attentions */
6298         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6299
6300         enable_blocks_attention(bp);
6301
6302         if (!BP_NOMCP(bp)) {
6303                 bnx2x_acquire_phy_lock(bp);
6304                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6305                 bnx2x_release_phy_lock(bp);
6306         } else
6307                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6308
6309         return 0;
6310 }
6311
6312 static int bnx2x_init_port(struct bnx2x *bp)
6313 {
6314         int port = BP_PORT(bp);
6315         int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
6316         u32 low, high;
6317         u32 val;
6318
6319         DP(BNX2X_MSG_MCP, "starting port init  port %x\n", port);
6320
6321         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6322
6323         bnx2x_init_block(bp, PXP_BLOCK, init_stage);
6324         bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
6325
6326         bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6327         bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6328         bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6329         bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6330
6331 #ifdef BCM_CNIC
6332         REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
6333
6334         bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6335         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6336         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
6337 #endif
6338         bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6339
6340         bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
6341         if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6342                 /* no pause for emulation and FPGA */
6343                 low = 0;
6344                 high = 513;
6345         } else {
6346                 if (IS_E1HMF(bp))
6347                         low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6348                 else if (bp->dev->mtu > 4096) {
6349                         if (bp->flags & ONE_PORT_FLAG)
6350                                 low = 160;
6351                         else {
6352                                 val = bp->dev->mtu;
6353                                 /* (24*1024 + val*4)/256 */
6354                                 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6355                         }
6356                 } else
6357                         low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6358                 high = low + 56;        /* 14*1024/256 */
6359         }
6360         REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6361         REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6362
6363
6364         bnx2x_init_block(bp, PRS_BLOCK, init_stage);
6365
6366         bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
6367         bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
6368         bnx2x_init_block(bp, USDM_BLOCK, init_stage);
6369         bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
6370
6371         bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6372         bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6373         bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6374         bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
6375
6376         bnx2x_init_block(bp, UPB_BLOCK, init_stage);
6377         bnx2x_init_block(bp, XPB_BLOCK, init_stage);
6378
6379         bnx2x_init_block(bp, PBF_BLOCK, init_stage);
6380
6381         /* configure PBF to work without PAUSE mtu 9000 */
6382         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6383
6384         /* update threshold */
6385         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6386         /* update init credit */
6387         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6388
6389         /* probe changes */
6390         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6391         msleep(5);
6392         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6393
6394 #ifdef BCM_CNIC
6395         bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
6396 #endif
6397         bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6398         bnx2x_init_block(bp, CFC_BLOCK, init_stage);
6399
6400         if (CHIP_IS_E1(bp)) {
6401                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6402                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6403         }
6404         bnx2x_init_block(bp, HC_BLOCK, init_stage);
6405
6406         bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6407         /* init aeu_mask_attn_func_0/1:
6408          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6409          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6410          *             bits 4-7 are used for "per vn group attention" */
6411         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6412                (IS_E1HMF(bp) ? 0xF7 : 0x7));
6413
6414         bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6415         bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6416         bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6417         bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6418         bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6419
6420         bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6421
6422         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6423
6424         if (CHIP_IS_E1H(bp)) {
6425                 /* 0x2 disable e1hov, 0x1 enable */
6426                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6427                        (IS_E1HMF(bp) ? 0x1 : 0x2));
6428
6429                 {
6430                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6431                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6432                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6433                 }
6434         }
6435
6436         bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6437         bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6438
6439         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6440         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6441                 {
6442                 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6443
6444                 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6445                                MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6446
6447                 /* The GPIO should be swapped if the swap register is
6448                    set and active */
6449                 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6450                 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6451
6452                 /* Select function upon port-swap configuration */
6453                 if (port == 0) {
6454                         offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6455                         aeu_gpio_mask = (swap_val && swap_override) ?
6456                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6457                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6458                 } else {
6459                         offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6460                         aeu_gpio_mask = (swap_val && swap_override) ?
6461                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6462                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6463                 }
6464                 val = REG_RD(bp, offset);
6465                 /* add GPIO3 to group */
6466                 val |= aeu_gpio_mask;
6467                 REG_WR(bp, offset, val);
6468                 }
6469                 break;
6470
6471         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6472         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6473                 /* add SPIO 5 to group 0 */
6474                 {
6475                 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6476                                        MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6477                 val = REG_RD(bp, reg_addr);
6478                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6479                 REG_WR(bp, reg_addr, val);
6480                 }
6481                 break;
6482
6483         default:
6484                 break;
6485         }
6486
6487         bnx2x__link_reset(bp);
6488
6489         return 0;
6490 }
6491
6492 #define ILT_PER_FUNC            (768/2)
6493 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
6494 /* the phys address is shifted right 12 bits and has an added
6495    1=valid bit added to the 53rd bit
6496    then since this is a wide register(TM)
6497    we split it into two 32 bit writes
6498  */
6499 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6500 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
6501 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
6502 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
6503
6504 #ifdef BCM_CNIC
6505 #define CNIC_ILT_LINES          127
6506 #define CNIC_CTX_PER_ILT        16
6507 #else
6508 #define CNIC_ILT_LINES          0
6509 #endif
6510
6511 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6512 {
6513         int reg;
6514
6515         if (CHIP_IS_E1H(bp))
6516                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6517         else /* E1 */
6518                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6519
6520         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6521 }
6522
6523 static int bnx2x_init_func(struct bnx2x *bp)
6524 {
6525         int port = BP_PORT(bp);
6526         int func = BP_FUNC(bp);
6527         u32 addr, val;
6528         int i;
6529
6530         DP(BNX2X_MSG_MCP, "starting func init  func %x\n", func);
6531
6532         /* set MSI reconfigure capability */
6533         addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6534         val = REG_RD(bp, addr);
6535         val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6536         REG_WR(bp, addr, val);
6537
6538         i = FUNC_ILT_BASE(func);
6539
6540         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6541         if (CHIP_IS_E1H(bp)) {
6542                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6543                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6544         } else /* E1 */
6545                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6546                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6547
6548 #ifdef BCM_CNIC
6549         i += 1 + CNIC_ILT_LINES;
6550         bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6551         if (CHIP_IS_E1(bp))
6552                 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6553         else {
6554                 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6555                 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6556         }
6557
6558         i++;
6559         bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6560         if (CHIP_IS_E1(bp))
6561                 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6562         else {
6563                 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
6564                 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
6565         }
6566
6567         i++;
6568         bnx2x_ilt_wr(bp, i, bp->t1_mapping);
6569         if (CHIP_IS_E1(bp))
6570                 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6571         else {
6572                 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
6573                 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
6574         }
6575
6576         /* tell the searcher where the T2 table is */
6577         REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
6578
6579         bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
6580                     U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
6581
6582         bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
6583                     U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
6584                     U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
6585
6586         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
6587 #endif
6588
6589         if (CHIP_IS_E1H(bp)) {
6590                 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6591                 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6592                 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6593                 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6594                 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6595                 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6596                 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6597                 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6598                 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
6599
6600                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6601                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6602         }
6603
6604         /* HC init per function */
6605         if (CHIP_IS_E1H(bp)) {
6606                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6607
6608                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6609                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6610         }
6611         bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
6612
6613         /* Reset PCIE errors for debug */
6614         REG_WR(bp, 0x2114, 0xffffffff);
6615         REG_WR(bp, 0x2120, 0xffffffff);
6616
6617         return 0;
6618 }
6619
6620 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6621 {
6622         int i, rc = 0;
6623
6624         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
6625            BP_FUNC(bp), load_code);
6626
6627         bp->dmae_ready = 0;
6628         mutex_init(&bp->dmae_mutex);
6629         rc = bnx2x_gunzip_init(bp);
6630         if (rc)
6631                 return rc;
6632
6633         switch (load_code) {
6634         case FW_MSG_CODE_DRV_LOAD_COMMON:
6635                 rc = bnx2x_init_common(bp);
6636                 if (rc)
6637                         goto init_hw_err;
6638                 /* no break */
6639
6640         case FW_MSG_CODE_DRV_LOAD_PORT:
6641                 bp->dmae_ready = 1;
6642                 rc = bnx2x_init_port(bp);
6643                 if (rc)
6644                         goto init_hw_err;
6645                 /* no break */
6646
6647         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6648                 bp->dmae_ready = 1;
6649                 rc = bnx2x_init_func(bp);
6650                 if (rc)
6651                         goto init_hw_err;
6652                 break;
6653
6654         default:
6655                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6656                 break;
6657         }
6658
6659         if (!BP_NOMCP(bp)) {
6660                 int func = BP_FUNC(bp);
6661
6662                 bp->fw_drv_pulse_wr_seq =
6663                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6664                                  DRV_PULSE_SEQ_MASK);
6665                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6666         }
6667
6668         /* this needs to be done before gunzip end */
6669         bnx2x_zero_def_sb(bp);
6670         for_each_queue(bp, i)
6671                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6672 #ifdef BCM_CNIC
6673         bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6674 #endif
6675
6676 init_hw_err:
6677         bnx2x_gunzip_end(bp);
6678
6679         return rc;
6680 }
6681
6682 static void bnx2x_free_mem(struct bnx2x *bp)
6683 {
6684
6685 #define BNX2X_PCI_FREE(x, y, size) \
6686         do { \
6687                 if (x) { \
6688                         pci_free_consistent(bp->pdev, size, x, y); \
6689                         x = NULL; \
6690                         y = 0; \
6691                 } \
6692         } while (0)
6693
6694 #define BNX2X_FREE(x) \
6695         do { \
6696                 if (x) { \
6697                         vfree(x); \
6698                         x = NULL; \
6699                 } \
6700         } while (0)
6701
6702         int i;
6703
6704         /* fastpath */
6705         /* Common */
6706         for_each_queue(bp, i) {
6707
6708                 /* status blocks */
6709                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6710                                bnx2x_fp(bp, i, status_blk_mapping),
6711                                sizeof(struct host_status_block));
6712         }
6713         /* Rx */
6714         for_each_queue(bp, i) {
6715
6716                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6717                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6718                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6719                                bnx2x_fp(bp, i, rx_desc_mapping),
6720                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
6721
6722                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6723                                bnx2x_fp(bp, i, rx_comp_mapping),
6724                                sizeof(struct eth_fast_path_rx_cqe) *
6725                                NUM_RCQ_BD);
6726
6727                 /* SGE ring */
6728                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6729                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6730                                bnx2x_fp(bp, i, rx_sge_mapping),
6731                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6732         }
6733         /* Tx */
6734         for_each_queue(bp, i) {
6735
6736                 /* fastpath tx rings: tx_buf tx_desc */
6737                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6738                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6739                                bnx2x_fp(bp, i, tx_desc_mapping),
6740                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6741         }
6742         /* end of fastpath */
6743
6744         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6745                        sizeof(struct host_def_status_block));
6746
6747         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6748                        sizeof(struct bnx2x_slowpath));
6749
6750 #ifdef BCM_CNIC
6751         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6752         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6753         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6754         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6755         BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
6756                        sizeof(struct host_status_block));
6757 #endif
6758         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6759
6760 #undef BNX2X_PCI_FREE
6761 #undef BNX2X_KFREE
6762 }
6763
6764 static int bnx2x_alloc_mem(struct bnx2x *bp)
6765 {
6766
6767 #define BNX2X_PCI_ALLOC(x, y, size) \
6768         do { \
6769                 x = pci_alloc_consistent(bp->pdev, size, y); \
6770                 if (x == NULL) \
6771                         goto alloc_mem_err; \
6772                 memset(x, 0, size); \
6773         } while (0)
6774
6775 #define BNX2X_ALLOC(x, size) \
6776         do { \
6777                 x = vmalloc(size); \
6778                 if (x == NULL) \
6779                         goto alloc_mem_err; \
6780                 memset(x, 0, size); \
6781         } while (0)
6782
6783         int i;
6784
6785         /* fastpath */
6786         /* Common */
6787         for_each_queue(bp, i) {
6788                 bnx2x_fp(bp, i, bp) = bp;
6789
6790                 /* status blocks */
6791                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6792                                 &bnx2x_fp(bp, i, status_blk_mapping),
6793                                 sizeof(struct host_status_block));
6794         }
6795         /* Rx */
6796         for_each_queue(bp, i) {
6797
6798                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6799                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6800                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6801                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6802                                 &bnx2x_fp(bp, i, rx_desc_mapping),
6803                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6804
6805                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6806                                 &bnx2x_fp(bp, i, rx_comp_mapping),
6807                                 sizeof(struct eth_fast_path_rx_cqe) *
6808                                 NUM_RCQ_BD);
6809
6810                 /* SGE ring */
6811                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6812                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6813                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6814                                 &bnx2x_fp(bp, i, rx_sge_mapping),
6815                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6816         }
6817         /* Tx */
6818         for_each_queue(bp, i) {
6819
6820                 /* fastpath tx rings: tx_buf tx_desc */
6821                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6822                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6823                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6824                                 &bnx2x_fp(bp, i, tx_desc_mapping),
6825                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6826         }
6827         /* end of fastpath */
6828
6829         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6830                         sizeof(struct host_def_status_block));
6831
6832         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6833                         sizeof(struct bnx2x_slowpath));
6834
6835 #ifdef BCM_CNIC
6836         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6837
6838         /* allocate searcher T2 table
6839            we allocate 1/4 of alloc num for T2
6840           (which is not entered into the ILT) */
6841         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6842
6843         /* Initialize T2 (for 1024 connections) */
6844         for (i = 0; i < 16*1024; i += 64)
6845                 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6846
6847         /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
6848         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6849
6850         /* QM queues (128*MAX_CONN) */
6851         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6852
6853         BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
6854                         sizeof(struct host_status_block));
6855 #endif
6856
6857         /* Slow path ring */
6858         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6859
6860         return 0;
6861
6862 alloc_mem_err:
6863         bnx2x_free_mem(bp);
6864         return -ENOMEM;
6865
6866 #undef BNX2X_PCI_ALLOC
6867 #undef BNX2X_ALLOC
6868 }
6869
6870 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6871 {
6872         int i;
6873
6874         for_each_queue(bp, i) {
6875                 struct bnx2x_fastpath *fp = &bp->fp[i];
6876
6877                 u16 bd_cons = fp->tx_bd_cons;
6878                 u16 sw_prod = fp->tx_pkt_prod;
6879                 u16 sw_cons = fp->tx_pkt_cons;
6880
6881                 while (sw_cons != sw_prod) {
6882                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6883                         sw_cons++;
6884                 }
6885         }
6886 }
6887
6888 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6889 {
6890         int i, j;
6891
6892         for_each_queue(bp, j) {
6893                 struct bnx2x_fastpath *fp = &bp->fp[j];
6894
6895                 for (i = 0; i < NUM_RX_BD; i++) {
6896                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6897                         struct sk_buff *skb = rx_buf->skb;
6898
6899                         if (skb == NULL)
6900                                 continue;
6901
6902                         pci_unmap_single(bp->pdev,
6903                                          pci_unmap_addr(rx_buf, mapping),
6904                                          bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6905
6906                         rx_buf->skb = NULL;
6907                         dev_kfree_skb(skb);
6908                 }
6909                 if (!fp->disable_tpa)
6910                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6911                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
6912                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
6913         }
6914 }
6915
6916 static void bnx2x_free_skbs(struct bnx2x *bp)
6917 {
6918         bnx2x_free_tx_skbs(bp);
6919         bnx2x_free_rx_skbs(bp);
6920 }
6921
6922 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6923 {
6924         int i, offset = 1;
6925
6926         free_irq(bp->msix_table[0].vector, bp->dev);
6927         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6928            bp->msix_table[0].vector);
6929
6930 #ifdef BCM_CNIC
6931         offset++;
6932 #endif
6933         for_each_queue(bp, i) {
6934                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
6935                    "state %x\n", i, bp->msix_table[i + offset].vector,
6936                    bnx2x_fp(bp, i, state));
6937
6938                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6939         }
6940 }
6941
6942 static void bnx2x_free_irq(struct bnx2x *bp)
6943 {
6944         if (bp->flags & USING_MSIX_FLAG) {
6945                 bnx2x_free_msix_irqs(bp);
6946                 pci_disable_msix(bp->pdev);
6947                 bp->flags &= ~USING_MSIX_FLAG;
6948
6949         } else if (bp->flags & USING_MSI_FLAG) {
6950                 free_irq(bp->pdev->irq, bp->dev);
6951                 pci_disable_msi(bp->pdev);
6952                 bp->flags &= ~USING_MSI_FLAG;
6953
6954         } else
6955                 free_irq(bp->pdev->irq, bp->dev);
6956 }
6957
6958 static int bnx2x_enable_msix(struct bnx2x *bp)
6959 {
6960         int i, rc, offset = 1;
6961         int igu_vec = 0;
6962
6963         bp->msix_table[0].entry = igu_vec;
6964         DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6965
6966 #ifdef BCM_CNIC
6967         igu_vec = BP_L_ID(bp) + offset;
6968         bp->msix_table[1].entry = igu_vec;
6969         DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
6970         offset++;
6971 #endif
6972         for_each_queue(bp, i) {
6973                 igu_vec = BP_L_ID(bp) + offset + i;
6974                 bp->msix_table[i + offset].entry = igu_vec;
6975                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6976                    "(fastpath #%u)\n", i + offset, igu_vec, i);
6977         }
6978
6979         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6980                              BNX2X_NUM_QUEUES(bp) + offset);
6981         if (rc) {
6982                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
6983                 return rc;
6984         }
6985
6986         bp->flags |= USING_MSIX_FLAG;
6987
6988         return 0;
6989 }
6990
6991 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6992 {
6993         int i, rc, offset = 1;
6994
6995         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6996                          bp->dev->name, bp->dev);
6997         if (rc) {
6998                 BNX2X_ERR("request sp irq failed\n");
6999                 return -EBUSY;
7000         }
7001
7002 #ifdef BCM_CNIC
7003         offset++;
7004 #endif
7005         for_each_queue(bp, i) {
7006                 struct bnx2x_fastpath *fp = &bp->fp[i];
7007                 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7008                          bp->dev->name, i);
7009
7010                 rc = request_irq(bp->msix_table[i + offset].vector,
7011                                  bnx2x_msix_fp_int, 0, fp->name, fp);
7012                 if (rc) {
7013                         BNX2X_ERR("request fp #%d irq failed  rc %d\n", i, rc);
7014                         bnx2x_free_msix_irqs(bp);
7015                         return -EBUSY;
7016                 }
7017
7018                 fp->state = BNX2X_FP_STATE_IRQ;
7019         }
7020
7021         i = BNX2X_NUM_QUEUES(bp);
7022         printk(KERN_INFO PFX "%s: using MSI-X  IRQs: sp %d  fp[%d] %d"
7023                " ... fp[%d] %d\n",
7024                bp->dev->name, bp->msix_table[0].vector,
7025                0, bp->msix_table[offset].vector,
7026                i - 1, bp->msix_table[offset + i - 1].vector);
7027
7028         return 0;
7029 }
7030
7031 static int bnx2x_enable_msi(struct bnx2x *bp)
7032 {
7033         int rc;
7034
7035         rc = pci_enable_msi(bp->pdev);
7036         if (rc) {
7037                 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7038                 return -1;
7039         }
7040         bp->flags |= USING_MSI_FLAG;
7041
7042         return 0;
7043 }
7044
7045 static int bnx2x_req_irq(struct bnx2x *bp)
7046 {
7047         unsigned long flags;
7048         int rc;
7049
7050         if (bp->flags & USING_MSI_FLAG)
7051                 flags = 0;
7052         else
7053                 flags = IRQF_SHARED;
7054
7055         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
7056                          bp->dev->name, bp->dev);
7057         if (!rc)
7058                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7059
7060         return rc;
7061 }
7062
7063 static void bnx2x_napi_enable(struct bnx2x *bp)
7064 {
7065         int i;
7066
7067         for_each_queue(bp, i)
7068                 napi_enable(&bnx2x_fp(bp, i, napi));
7069 }
7070
7071 static void bnx2x_napi_disable(struct bnx2x *bp)
7072 {
7073         int i;
7074
7075         for_each_queue(bp, i)
7076                 napi_disable(&bnx2x_fp(bp, i, napi));
7077 }
7078
7079 static void bnx2x_netif_start(struct bnx2x *bp)
7080 {
7081         int intr_sem;
7082
7083         intr_sem = atomic_dec_and_test(&bp->intr_sem);
7084         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7085
7086         if (intr_sem) {
7087                 if (netif_running(bp->dev)) {
7088                         bnx2x_napi_enable(bp);
7089                         bnx2x_int_enable(bp);
7090                         if (bp->state == BNX2X_STATE_OPEN)
7091                                 netif_tx_wake_all_queues(bp->dev);
7092                 }
7093         }
7094 }
7095
7096 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
7097 {
7098         bnx2x_int_disable_sync(bp, disable_hw);
7099         bnx2x_napi_disable(bp);
7100         netif_tx_disable(bp->dev);
7101         bp->dev->trans_start = jiffies; /* prevent tx timeout */
7102 }
7103
7104 /*
7105  * Init service functions
7106  */
7107
7108 /**
7109  * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7110  *
7111  * @param bp driver descriptor
7112  * @param set set or clear an entry (1 or 0)
7113  * @param mac pointer to a buffer containing a MAC
7114  * @param cl_bit_vec bit vector of clients to register a MAC for
7115  * @param cam_offset offset in a CAM to use
7116  * @param with_bcast set broadcast MAC as well
7117  */
7118 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7119                                       u32 cl_bit_vec, u8 cam_offset,
7120                                       u8 with_bcast)
7121 {
7122         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
7123         int port = BP_PORT(bp);
7124
7125         /* CAM allocation
7126          * unicasts 0-31:port0 32-63:port1
7127          * multicast 64-127:port0 128-191:port1
7128          */
7129         config->hdr.length = 1 + (with_bcast ? 1 : 0);
7130         config->hdr.offset = cam_offset;
7131         config->hdr.client_id = 0xff;
7132         config->hdr.reserved1 = 0;
7133
7134         /* primary MAC */
7135         config->config_table[0].cam_entry.msb_mac_addr =
7136                                         swab16(*(u16 *)&mac[0]);
7137         config->config_table[0].cam_entry.middle_mac_addr =
7138                                         swab16(*(u16 *)&mac[2]);
7139         config->config_table[0].cam_entry.lsb_mac_addr =
7140                                         swab16(*(u16 *)&mac[4]);
7141         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
7142         if (set)
7143                 config->config_table[0].target_table_entry.flags = 0;
7144         else
7145                 CAM_INVALIDATE(config->config_table[0]);
7146         config->config_table[0].target_table_entry.clients_bit_vector =
7147                                                 cpu_to_le32(cl_bit_vec);
7148         config->config_table[0].target_table_entry.vlan_id = 0;
7149
7150         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7151            (set ? "setting" : "clearing"),
7152            config->config_table[0].cam_entry.msb_mac_addr,
7153            config->config_table[0].cam_entry.middle_mac_addr,
7154            config->config_table[0].cam_entry.lsb_mac_addr);
7155
7156         /* broadcast */
7157         if (with_bcast) {
7158                 config->config_table[1].cam_entry.msb_mac_addr =
7159                         cpu_to_le16(0xffff);
7160                 config->config_table[1].cam_entry.middle_mac_addr =
7161                         cpu_to_le16(0xffff);
7162                 config->config_table[1].cam_entry.lsb_mac_addr =
7163                         cpu_to_le16(0xffff);
7164                 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7165                 if (set)
7166                         config->config_table[1].target_table_entry.flags =
7167                                         TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7168                 else
7169                         CAM_INVALIDATE(config->config_table[1]);
7170                 config->config_table[1].target_table_entry.clients_bit_vector =
7171                                                         cpu_to_le32(cl_bit_vec);
7172                 config->config_table[1].target_table_entry.vlan_id = 0;
7173         }
7174
7175         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7176                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7177                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7178 }
7179
7180 /**
7181  * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7182  *
7183  * @param bp driver descriptor
7184  * @param set set or clear an entry (1 or 0)
7185  * @param mac pointer to a buffer containing a MAC
7186  * @param cl_bit_vec bit vector of clients to register a MAC for
7187  * @param cam_offset offset in a CAM to use
7188  */
7189 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7190                                        u32 cl_bit_vec, u8 cam_offset)
7191 {
7192         struct mac_configuration_cmd_e1h *config =
7193                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7194
7195         config->hdr.length = 1;
7196         config->hdr.offset = cam_offset;
7197         config->hdr.client_id = 0xff;
7198         config->hdr.reserved1 = 0;
7199
7200         /* primary MAC */
7201         config->config_table[0].msb_mac_addr =
7202                                         swab16(*(u16 *)&mac[0]);
7203         config->config_table[0].middle_mac_addr =
7204                                         swab16(*(u16 *)&mac[2]);
7205         config->config_table[0].lsb_mac_addr =
7206                                         swab16(*(u16 *)&mac[4]);
7207         config->config_table[0].clients_bit_vector =
7208                                         cpu_to_le32(cl_bit_vec);
7209         config->config_table[0].vlan_id = 0;
7210         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
7211         if (set)
7212                 config->config_table[0].flags = BP_PORT(bp);
7213         else
7214                 config->config_table[0].flags =
7215                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
7216
7217         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID mask %d\n",
7218            (set ? "setting" : "clearing"),
7219            config->config_table[0].msb_mac_addr,
7220            config->config_table[0].middle_mac_addr,
7221            config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
7222
7223         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7224                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7225                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7226 }
7227
7228 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7229                              int *state_p, int poll)
7230 {
7231         /* can take a while if any port is running */
7232         int cnt = 5000;
7233
7234         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7235            poll ? "polling" : "waiting", state, idx);
7236
7237         might_sleep();
7238         while (cnt--) {
7239                 if (poll) {
7240                         bnx2x_rx_int(bp->fp, 10);
7241                         /* if index is different from 0
7242                          * the reply for some commands will
7243                          * be on the non default queue
7244                          */
7245                         if (idx)
7246                                 bnx2x_rx_int(&bp->fp[idx], 10);
7247                 }
7248
7249                 mb(); /* state is changed by bnx2x_sp_event() */
7250                 if (*state_p == state) {
7251 #ifdef BNX2X_STOP_ON_ERROR
7252                         DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
7253 #endif
7254                         return 0;
7255                 }
7256
7257                 msleep(1);
7258
7259                 if (bp->panic)
7260                         return -EIO;
7261         }
7262
7263         /* timeout! */
7264         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7265                   poll ? "polling" : "waiting", state, idx);
7266 #ifdef BNX2X_STOP_ON_ERROR
7267         bnx2x_panic();
7268 #endif
7269
7270         return -EBUSY;
7271 }
7272
7273 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7274 {
7275         bp->set_mac_pending++;
7276         smp_wmb();
7277
7278         bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7279                                    (1 << bp->fp->cl_id), BP_FUNC(bp));
7280
7281         /* Wait for a completion */
7282         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7283 }
7284
7285 static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7286 {
7287         bp->set_mac_pending++;
7288         smp_wmb();
7289
7290         bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7291                                   (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7292                                   1);
7293
7294         /* Wait for a completion */
7295         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7296 }
7297
7298 #ifdef BCM_CNIC
7299 /**
7300  * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7301  * MAC(s). This function will wait until the ramdord completion
7302  * returns.
7303  *
7304  * @param bp driver handle
7305  * @param set set or clear the CAM entry
7306  *
7307  * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7308  */
7309 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7310 {
7311         u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7312
7313         bp->set_mac_pending++;
7314         smp_wmb();
7315
7316         /* Send a SET_MAC ramrod */
7317         if (CHIP_IS_E1(bp))
7318                 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7319                                   cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7320                                   1);
7321         else
7322                 /* CAM allocation for E1H
7323                 * unicasts: by func number
7324                 * multicast: 20+FUNC*20, 20 each
7325                 */
7326                 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7327                                    cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7328
7329         /* Wait for a completion when setting */
7330         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7331
7332         return 0;
7333 }
7334 #endif
7335
7336 static int bnx2x_setup_leading(struct bnx2x *bp)
7337 {
7338         int rc;
7339
7340         /* reset IGU state */
7341         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7342
7343         /* SETUP ramrod */
7344         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7345
7346         /* Wait for completion */
7347         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7348
7349         return rc;
7350 }
7351
7352 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7353 {
7354         struct bnx2x_fastpath *fp = &bp->fp[index];
7355
7356         /* reset IGU state */
7357         bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7358
7359         /* SETUP ramrod */
7360         fp->state = BNX2X_FP_STATE_OPENING;
7361         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7362                       fp->cl_id, 0);
7363
7364         /* Wait for completion */
7365         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7366                                  &(fp->state), 0);
7367 }
7368
7369 static int bnx2x_poll(struct napi_struct *napi, int budget);
7370
7371 static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
7372 {
7373
7374         switch (bp->multi_mode) {
7375         case ETH_RSS_MODE_DISABLED:
7376                 bp->num_queues = 1;
7377                 break;
7378
7379         case ETH_RSS_MODE_REGULAR:
7380                 if (num_queues)
7381                         bp->num_queues = min_t(u32, num_queues,
7382                                                   BNX2X_MAX_QUEUES(bp));
7383                 else
7384                         bp->num_queues = min_t(u32, num_online_cpus(),
7385                                                   BNX2X_MAX_QUEUES(bp));
7386                 break;
7387
7388
7389         default:
7390                 bp->num_queues = 1;
7391                 break;
7392         }
7393 }
7394
7395 static int bnx2x_set_num_queues(struct bnx2x *bp)
7396 {
7397         int rc = 0;
7398
7399         switch (int_mode) {
7400         case INT_MODE_INTx:
7401         case INT_MODE_MSI:
7402                 bp->num_queues = 1;
7403                 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7404                 break;
7405
7406         case INT_MODE_MSIX:
7407         default:
7408                 /* Set number of queues according to bp->multi_mode value */
7409                 bnx2x_set_num_queues_msix(bp);
7410
7411                 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7412                    bp->num_queues);
7413
7414                 /* if we can't use MSI-X we only need one fp,
7415                  * so try to enable MSI-X with the requested number of fp's
7416                  * and fallback to MSI or legacy INTx with one fp
7417                  */
7418                 rc = bnx2x_enable_msix(bp);
7419                 if (rc)
7420                         /* failed to enable MSI-X */
7421                         bp->num_queues = 1;
7422                 break;
7423         }
7424         bp->dev->real_num_tx_queues = bp->num_queues;
7425         return rc;
7426 }
7427
7428 #ifdef BCM_CNIC
7429 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7430 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7431 #endif
7432
7433 /* must be called with rtnl_lock */
7434 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7435 {
7436         u32 load_code;
7437         int i, rc;
7438
7439 #ifdef BNX2X_STOP_ON_ERROR
7440         if (unlikely(bp->panic))
7441                 return -EPERM;
7442 #endif
7443
7444         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7445
7446         rc = bnx2x_set_num_queues(bp);
7447
7448         if (bnx2x_alloc_mem(bp))
7449                 return -ENOMEM;
7450
7451         for_each_queue(bp, i)
7452                 bnx2x_fp(bp, i, disable_tpa) =
7453                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
7454
7455         for_each_queue(bp, i)
7456                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7457                                bnx2x_poll, 128);
7458
7459         bnx2x_napi_enable(bp);
7460
7461         if (bp->flags & USING_MSIX_FLAG) {
7462                 rc = bnx2x_req_msix_irqs(bp);
7463                 if (rc) {
7464                         pci_disable_msix(bp->pdev);
7465                         goto load_error1;
7466                 }
7467         } else {
7468                 /* Fall to INTx if failed to enable MSI-X due to lack of
7469                    memory (in bnx2x_set_num_queues()) */
7470                 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7471                         bnx2x_enable_msi(bp);
7472                 bnx2x_ack_int(bp);
7473                 rc = bnx2x_req_irq(bp);
7474                 if (rc) {
7475                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
7476                         if (bp->flags & USING_MSI_FLAG)
7477                                 pci_disable_msi(bp->pdev);
7478                         goto load_error1;
7479                 }
7480                 if (bp->flags & USING_MSI_FLAG) {
7481                         bp->dev->irq = bp->pdev->irq;
7482                         printk(KERN_INFO PFX "%s: using MSI  IRQ %d\n",
7483                                bp->dev->name, bp->pdev->irq);
7484                 }
7485         }
7486
7487         /* Send LOAD_REQUEST command to MCP
7488            Returns the type of LOAD command:
7489            if it is the first port to be initialized
7490            common blocks should be initialized, otherwise - not
7491         */
7492         if (!BP_NOMCP(bp)) {
7493                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7494                 if (!load_code) {
7495                         BNX2X_ERR("MCP response failure, aborting\n");
7496                         rc = -EBUSY;
7497                         goto load_error2;
7498                 }
7499                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7500                         rc = -EBUSY; /* other port in diagnostic mode */
7501                         goto load_error2;
7502                 }
7503
7504         } else {
7505                 int port = BP_PORT(bp);
7506
7507                 DP(NETIF_MSG_IFUP, "NO MCP - load counts      %d, %d, %d\n",
7508                    load_count[0], load_count[1], load_count[2]);
7509                 load_count[0]++;
7510                 load_count[1 + port]++;
7511                 DP(NETIF_MSG_IFUP, "NO MCP - new load counts  %d, %d, %d\n",
7512                    load_count[0], load_count[1], load_count[2]);
7513                 if (load_count[0] == 1)
7514                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7515                 else if (load_count[1 + port] == 1)
7516                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7517                 else
7518                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7519         }
7520
7521         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7522             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7523                 bp->port.pmf = 1;
7524         else
7525                 bp->port.pmf = 0;
7526         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7527
7528         /* Initialize HW */
7529         rc = bnx2x_init_hw(bp, load_code);
7530         if (rc) {
7531                 BNX2X_ERR("HW init failed, aborting\n");
7532                 goto load_error2;
7533         }
7534
7535         /* Setup NIC internals and enable interrupts */
7536         bnx2x_nic_init(bp, load_code);
7537
7538         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7539             (bp->common.shmem2_base))
7540                 SHMEM2_WR(bp, dcc_support,
7541                           (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7542                            SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7543
7544         /* Send LOAD_DONE command to MCP */
7545         if (!BP_NOMCP(bp)) {
7546                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7547                 if (!load_code) {
7548                         BNX2X_ERR("MCP response failure, aborting\n");
7549                         rc = -EBUSY;
7550                         goto load_error3;
7551                 }
7552         }
7553
7554         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7555
7556         rc = bnx2x_setup_leading(bp);
7557         if (rc) {
7558                 BNX2X_ERR("Setup leading failed!\n");
7559 #ifndef BNX2X_STOP_ON_ERROR
7560                 goto load_error3;
7561 #else
7562                 bp->panic = 1;
7563                 return -EBUSY;
7564 #endif
7565         }
7566
7567         if (CHIP_IS_E1H(bp))
7568                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
7569                         DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
7570                         bp->flags |= MF_FUNC_DIS;
7571                 }
7572
7573         if (bp->state == BNX2X_STATE_OPEN) {
7574 #ifdef BCM_CNIC
7575                 /* Enable Timer scan */
7576                 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
7577 #endif
7578                 for_each_nondefault_queue(bp, i) {
7579                         rc = bnx2x_setup_multi(bp, i);
7580                         if (rc)
7581 #ifdef BCM_CNIC
7582                                 goto load_error4;
7583 #else
7584                                 goto load_error3;
7585 #endif
7586                 }
7587
7588                 if (CHIP_IS_E1(bp))
7589                         bnx2x_set_eth_mac_addr_e1(bp, 1);
7590                 else
7591                         bnx2x_set_eth_mac_addr_e1h(bp, 1);
7592 #ifdef BCM_CNIC
7593                 /* Set iSCSI L2 MAC */
7594                 mutex_lock(&bp->cnic_mutex);
7595                 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
7596                         bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7597                         bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7598                 }
7599                 mutex_unlock(&bp->cnic_mutex);
7600 #endif
7601         }
7602
7603         if (bp->port.pmf)
7604                 bnx2x_initial_phy_init(bp, load_mode);
7605
7606         /* Start fast path */
7607         switch (load_mode) {
7608         case LOAD_NORMAL:
7609                 if (bp->state == BNX2X_STATE_OPEN) {
7610                         /* Tx queue should be only reenabled */
7611                         netif_tx_wake_all_queues(bp->dev);
7612                 }
7613                 /* Initialize the receive filter. */
7614                 bnx2x_set_rx_mode(bp->dev);
7615                 break;
7616
7617         case LOAD_OPEN:
7618                 netif_tx_start_all_queues(bp->dev);
7619                 if (bp->state != BNX2X_STATE_OPEN)
7620                         netif_tx_disable(bp->dev);
7621                 /* Initialize the receive filter. */
7622                 bnx2x_set_rx_mode(bp->dev);
7623                 break;
7624
7625         case LOAD_DIAG:
7626                 /* Initialize the receive filter. */
7627                 bnx2x_set_rx_mode(bp->dev);
7628                 bp->state = BNX2X_STATE_DIAG;
7629                 break;
7630
7631         default:
7632                 break;
7633         }
7634
7635         if (!bp->port.pmf)
7636                 bnx2x__link_status_update(bp);
7637
7638         /* start the timer */
7639         mod_timer(&bp->timer, jiffies + bp->current_interval);
7640
7641 #ifdef BCM_CNIC
7642         bnx2x_setup_cnic_irq_info(bp);
7643         if (bp->state == BNX2X_STATE_OPEN)
7644                 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
7645 #endif
7646
7647         return 0;
7648
7649 #ifdef BCM_CNIC
7650 load_error4:
7651         /* Disable Timer scan */
7652         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
7653 #endif
7654 load_error3:
7655         bnx2x_int_disable_sync(bp, 1);
7656         if (!BP_NOMCP(bp)) {
7657                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7658                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7659         }
7660         bp->port.pmf = 0;
7661         /* Free SKBs, SGEs, TPA pool and driver internals */
7662         bnx2x_free_skbs(bp);
7663         for_each_queue(bp, i)
7664                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7665 load_error2:
7666         /* Release IRQs */
7667         bnx2x_free_irq(bp);
7668 load_error1:
7669         bnx2x_napi_disable(bp);
7670         for_each_queue(bp, i)
7671                 netif_napi_del(&bnx2x_fp(bp, i, napi));
7672         bnx2x_free_mem(bp);
7673
7674         return rc;
7675 }
7676
7677 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7678 {
7679         struct bnx2x_fastpath *fp = &bp->fp[index];
7680         int rc;
7681
7682         /* halt the connection */
7683         fp->state = BNX2X_FP_STATE_HALTING;
7684         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7685
7686         /* Wait for completion */
7687         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7688                                &(fp->state), 1);
7689         if (rc) /* timeout */
7690                 return rc;
7691
7692         /* delete cfc entry */
7693         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7694
7695         /* Wait for completion */
7696         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7697                                &(fp->state), 1);
7698         return rc;
7699 }
7700
7701 static int bnx2x_stop_leading(struct bnx2x *bp)
7702 {
7703         __le16 dsb_sp_prod_idx;
7704         /* if the other port is handling traffic,
7705            this can take a lot of time */
7706         int cnt = 500;
7707         int rc;
7708
7709         might_sleep();
7710
7711         /* Send HALT ramrod */
7712         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7713         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7714
7715         /* Wait for completion */
7716         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7717                                &(bp->fp[0].state), 1);
7718         if (rc) /* timeout */
7719                 return rc;
7720
7721         dsb_sp_prod_idx = *bp->dsb_sp_prod;
7722
7723         /* Send PORT_DELETE ramrod */
7724         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7725
7726         /* Wait for completion to arrive on default status block
7727            we are going to reset the chip anyway
7728            so there is not much to do if this times out
7729          */
7730         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7731                 if (!cnt) {
7732                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7733                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7734                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
7735 #ifdef BNX2X_STOP_ON_ERROR
7736                         bnx2x_panic();
7737 #endif
7738                         rc = -EBUSY;
7739                         break;
7740                 }
7741                 cnt--;
7742                 msleep(1);
7743                 rmb(); /* Refresh the dsb_sp_prod */
7744         }
7745         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7746         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7747
7748         return rc;
7749 }
7750
7751 static void bnx2x_reset_func(struct bnx2x *bp)
7752 {
7753         int port = BP_PORT(bp);
7754         int func = BP_FUNC(bp);
7755         int base, i;
7756
7757         /* Configure IGU */
7758         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7759         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7760
7761 #ifdef BCM_CNIC
7762         /* Disable Timer scan */
7763         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
7764         /*
7765          * Wait for at least 10ms and up to 2 second for the timers scan to
7766          * complete
7767          */
7768         for (i = 0; i < 200; i++) {
7769                 msleep(10);
7770                 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7771                         break;
7772         }
7773 #endif
7774         /* Clear ILT */
7775         base = FUNC_ILT_BASE(func);
7776         for (i = base; i < base + ILT_PER_FUNC; i++)
7777                 bnx2x_ilt_wr(bp, i, 0);
7778 }
7779
7780 static void bnx2x_reset_port(struct bnx2x *bp)
7781 {
7782         int port = BP_PORT(bp);
7783         u32 val;
7784
7785         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7786
7787         /* Do not rcv packets to BRB */
7788         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7789         /* Do not direct rcv packets that are not for MCP to the BRB */
7790         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7791                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7792
7793         /* Configure AEU */
7794         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7795
7796         msleep(100);
7797         /* Check for BRB port occupancy */
7798         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7799         if (val)
7800                 DP(NETIF_MSG_IFDOWN,
7801                    "BRB1 is not empty  %d blocks are occupied\n", val);
7802
7803         /* TODO: Close Doorbell port? */
7804 }
7805
7806 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7807 {
7808         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
7809            BP_FUNC(bp), reset_code);
7810
7811         switch (reset_code) {
7812         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7813                 bnx2x_reset_port(bp);
7814                 bnx2x_reset_func(bp);
7815                 bnx2x_reset_common(bp);
7816                 break;
7817
7818         case FW_MSG_CODE_DRV_UNLOAD_PORT:
7819                 bnx2x_reset_port(bp);
7820                 bnx2x_reset_func(bp);
7821                 break;
7822
7823         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7824                 bnx2x_reset_func(bp);
7825                 break;
7826
7827         default:
7828                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7829                 break;
7830         }
7831 }
7832
7833 /* must be called with rtnl_lock */
7834 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7835 {
7836         int port = BP_PORT(bp);
7837         u32 reset_code = 0;
7838         int i, cnt, rc;
7839
7840 #ifdef BCM_CNIC
7841         bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
7842 #endif
7843         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7844
7845         /* Set "drop all" */
7846         bp->rx_mode = BNX2X_RX_MODE_NONE;
7847         bnx2x_set_storm_rx_mode(bp);
7848
7849         /* Disable HW interrupts, NAPI and Tx */
7850         bnx2x_netif_stop(bp, 1);
7851
7852         del_timer_sync(&bp->timer);
7853         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7854                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7855         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7856
7857         /* Release IRQs */
7858         bnx2x_free_irq(bp);
7859
7860         /* Wait until tx fastpath tasks complete */
7861         for_each_queue(bp, i) {
7862                 struct bnx2x_fastpath *fp = &bp->fp[i];
7863
7864                 cnt = 1000;
7865                 while (bnx2x_has_tx_work_unload(fp)) {
7866
7867                         bnx2x_tx_int(fp);
7868                         if (!cnt) {
7869                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
7870                                           i);
7871 #ifdef BNX2X_STOP_ON_ERROR
7872                                 bnx2x_panic();
7873                                 return -EBUSY;
7874 #else
7875                                 break;
7876 #endif
7877                         }
7878                         cnt--;
7879                         msleep(1);
7880                 }
7881         }
7882         /* Give HW time to discard old tx messages */
7883         msleep(1);
7884
7885         if (CHIP_IS_E1(bp)) {
7886                 struct mac_configuration_cmd *config =
7887                                                 bnx2x_sp(bp, mcast_config);
7888
7889                 bnx2x_set_eth_mac_addr_e1(bp, 0);
7890
7891                 for (i = 0; i < config->hdr.length; i++)
7892                         CAM_INVALIDATE(config->config_table[i]);
7893
7894                 config->hdr.length = i;
7895                 if (CHIP_REV_IS_SLOW(bp))
7896                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7897                 else
7898                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7899                 config->hdr.client_id = bp->fp->cl_id;
7900                 config->hdr.reserved1 = 0;
7901
7902                 bp->set_mac_pending++;
7903                 smp_wmb();
7904
7905                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7906                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7907                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7908
7909         } else { /* E1H */
7910                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7911
7912                 bnx2x_set_eth_mac_addr_e1h(bp, 0);
7913
7914                 for (i = 0; i < MC_HASH_SIZE; i++)
7915                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7916
7917                 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7918         }
7919 #ifdef BCM_CNIC
7920         /* Clear iSCSI L2 MAC */
7921         mutex_lock(&bp->cnic_mutex);
7922         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7923                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7924                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7925         }
7926         mutex_unlock(&bp->cnic_mutex);
7927 #endif
7928
7929         if (unload_mode == UNLOAD_NORMAL)
7930                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7931
7932         else if (bp->flags & NO_WOL_FLAG)
7933                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7934
7935         else if (bp->wol) {
7936                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7937                 u8 *mac_addr = bp->dev->dev_addr;
7938                 u32 val;
7939                 /* The mac address is written to entries 1-4 to
7940                    preserve entry 0 which is used by the PMF */
7941                 u8 entry = (BP_E1HVN(bp) + 1)*8;
7942
7943                 val = (mac_addr[0] << 8) | mac_addr[1];
7944                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7945
7946                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7947                       (mac_addr[4] << 8) | mac_addr[5];
7948                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7949
7950                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7951
7952         } else
7953                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7954
7955         /* Close multi and leading connections
7956            Completions for ramrods are collected in a synchronous way */
7957         for_each_nondefault_queue(bp, i)
7958                 if (bnx2x_stop_multi(bp, i))
7959                         goto unload_error;
7960
7961         rc = bnx2x_stop_leading(bp);
7962         if (rc) {
7963                 BNX2X_ERR("Stop leading failed!\n");
7964 #ifdef BNX2X_STOP_ON_ERROR
7965                 return -EBUSY;
7966 #else
7967                 goto unload_error;
7968 #endif
7969         }
7970
7971 unload_error:
7972         if (!BP_NOMCP(bp))
7973                 reset_code = bnx2x_fw_command(bp, reset_code);
7974         else {
7975                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts      %d, %d, %d\n",
7976                    load_count[0], load_count[1], load_count[2]);
7977                 load_count[0]--;
7978                 load_count[1 + port]--;
7979                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts  %d, %d, %d\n",
7980                    load_count[0], load_count[1], load_count[2]);
7981                 if (load_count[0] == 0)
7982                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7983                 else if (load_count[1 + port] == 0)
7984                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7985                 else
7986                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7987         }
7988
7989         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7990             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7991                 bnx2x__link_reset(bp);
7992
7993         /* Reset the chip */
7994         bnx2x_reset_chip(bp, reset_code);
7995
7996         /* Report UNLOAD_DONE to MCP */
7997         if (!BP_NOMCP(bp))
7998                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7999
8000         bp->port.pmf = 0;
8001
8002         /* Free SKBs, SGEs, TPA pool and driver internals */
8003         bnx2x_free_skbs(bp);
8004         for_each_queue(bp, i)
8005                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8006         for_each_queue(bp, i)
8007                 netif_napi_del(&bnx2x_fp(bp, i, napi));
8008         bnx2x_free_mem(bp);
8009
8010         bp->state = BNX2X_STATE_CLOSED;
8011
8012         netif_carrier_off(bp->dev);
8013
8014         return 0;
8015 }
8016
8017 static void bnx2x_reset_task(struct work_struct *work)
8018 {
8019         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
8020
8021 #ifdef BNX2X_STOP_ON_ERROR
8022         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8023                   " so reset not done to allow debug dump,\n"
8024                   " you will need to reboot when done\n");
8025         return;
8026 #endif
8027
8028         rtnl_lock();
8029
8030         if (!netif_running(bp->dev))
8031                 goto reset_task_exit;
8032
8033         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8034         bnx2x_nic_load(bp, LOAD_NORMAL);
8035
8036 reset_task_exit:
8037         rtnl_unlock();
8038 }
8039
8040 /* end of nic load/unload */
8041
8042 /* ethtool_ops */
8043
8044 /*
8045  * Init service functions
8046  */
8047
8048 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8049 {
8050         switch (func) {
8051         case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8052         case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8053         case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8054         case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
8055         case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
8056         case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
8057         case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
8058         case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
8059         default:
8060                 BNX2X_ERR("Unsupported function index: %d\n", func);
8061                 return (u32)(-1);
8062         }
8063 }
8064
8065 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
8066 {
8067         u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
8068
8069         /* Flush all outstanding writes */
8070         mmiowb();
8071
8072         /* Pretend to be function 0 */
8073         REG_WR(bp, reg, 0);
8074         /* Flush the GRC transaction (in the chip) */
8075         new_val = REG_RD(bp, reg);
8076         if (new_val != 0) {
8077                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
8078                           new_val);
8079                 BUG();
8080         }
8081
8082         /* From now we are in the "like-E1" mode */
8083         bnx2x_int_disable(bp);
8084
8085         /* Flush all outstanding writes */
8086         mmiowb();
8087
8088         /* Restore the original funtion settings */
8089         REG_WR(bp, reg, orig_func);
8090         new_val = REG_RD(bp, reg);
8091         if (new_val != orig_func) {
8092                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
8093                           orig_func, new_val);
8094                 BUG();
8095         }
8096 }
8097
8098 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
8099 {
8100         if (CHIP_IS_E1H(bp))
8101                 bnx2x_undi_int_disable_e1h(bp, func);
8102         else
8103                 bnx2x_int_disable(bp);
8104 }
8105
8106 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8107 {
8108         u32 val;
8109
8110         /* Check if there is any driver already loaded */
8111         val = REG_RD(bp, MISC_REG_UNPREPARED);
8112         if (val == 0x1) {
8113                 /* Check if it is the UNDI driver
8114                  * UNDI driver initializes CID offset for normal bell to 0x7
8115                  */
8116                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8117                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8118                 if (val == 0x7) {
8119                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8120                         /* save our func */
8121                         int func = BP_FUNC(bp);
8122                         u32 swap_en;
8123                         u32 swap_val;
8124
8125                         /* clear the UNDI indication */
8126                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
8127
8128                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
8129
8130                         /* try unload UNDI on port 0 */
8131                         bp->func = 0;
8132                         bp->fw_seq =
8133                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8134                                 DRV_MSG_SEQ_NUMBER_MASK);
8135                         reset_code = bnx2x_fw_command(bp, reset_code);
8136
8137                         /* if UNDI is loaded on the other port */
8138                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
8139
8140                                 /* send "DONE" for previous unload */
8141                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8142
8143                                 /* unload UNDI on port 1 */
8144                                 bp->func = 1;
8145                                 bp->fw_seq =
8146                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8147                                         DRV_MSG_SEQ_NUMBER_MASK);
8148                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8149
8150                                 bnx2x_fw_command(bp, reset_code);
8151                         }
8152
8153                         /* now it's safe to release the lock */
8154                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8155
8156                         bnx2x_undi_int_disable(bp, func);
8157
8158                         /* close input traffic and wait for it */
8159                         /* Do not rcv packets to BRB */
8160                         REG_WR(bp,
8161                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
8162                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
8163                         /* Do not direct rcv packets that are not for MCP to
8164                          * the BRB */
8165                         REG_WR(bp,
8166                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
8167                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8168                         /* clear AEU */
8169                         REG_WR(bp,
8170                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8171                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
8172                         msleep(10);
8173
8174                         /* save NIG port swap info */
8175                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
8176                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
8177                         /* reset device */
8178                         REG_WR(bp,
8179                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8180                                0xd3ffffff);
8181                         REG_WR(bp,
8182                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8183                                0x1403);
8184                         /* take the NIG out of reset and restore swap values */
8185                         REG_WR(bp,
8186                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
8187                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
8188                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
8189                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
8190
8191                         /* send unload done to the MCP */
8192                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8193
8194                         /* restore our func and fw_seq */
8195                         bp->func = func;
8196                         bp->fw_seq =
8197                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8198                                 DRV_MSG_SEQ_NUMBER_MASK);
8199
8200                 } else
8201                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
8202         }
8203 }
8204
8205 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8206 {
8207         u32 val, val2, val3, val4, id;
8208         u16 pmc;
8209
8210         /* Get the chip revision id and number. */
8211         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
8212         val = REG_RD(bp, MISC_REG_CHIP_NUM);
8213         id = ((val & 0xffff) << 16);
8214         val = REG_RD(bp, MISC_REG_CHIP_REV);
8215         id |= ((val & 0xf) << 12);
8216         val = REG_RD(bp, MISC_REG_CHIP_METAL);
8217         id |= ((val & 0xff) << 4);
8218         val = REG_RD(bp, MISC_REG_BOND_ID);
8219         id |= (val & 0xf);
8220         bp->common.chip_id = id;
8221         bp->link_params.chip_id = bp->common.chip_id;
8222         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
8223
8224         val = (REG_RD(bp, 0x2874) & 0x55);
8225         if ((bp->common.chip_id & 0x1) ||
8226             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
8227                 bp->flags |= ONE_PORT_FLAG;
8228                 BNX2X_DEV_INFO("single port device\n");
8229         }
8230
8231         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8232         bp->common.flash_size = (NVRAM_1MB_SIZE <<
8233                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
8234         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8235                        bp->common.flash_size, bp->common.flash_size);
8236
8237         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8238         bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
8239         bp->link_params.shmem_base = bp->common.shmem_base;
8240         BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
8241                        bp->common.shmem_base, bp->common.shmem2_base);
8242
8243         if (!bp->common.shmem_base ||
8244             (bp->common.shmem_base < 0xA0000) ||
8245             (bp->common.shmem_base >= 0xC0000)) {
8246                 BNX2X_DEV_INFO("MCP not active\n");
8247                 bp->flags |= NO_MCP_FLAG;
8248                 return;
8249         }
8250
8251         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8252         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8253                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8254                 BNX2X_ERR("BAD MCP validity signature\n");
8255
8256         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
8257         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
8258
8259         bp->link_params.hw_led_mode = ((bp->common.hw_config &
8260                                         SHARED_HW_CFG_LED_MODE_MASK) >>
8261                                        SHARED_HW_CFG_LED_MODE_SHIFT);
8262
8263         bp->link_params.feature_config_flags = 0;
8264         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8265         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8266                 bp->link_params.feature_config_flags |=
8267                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8268         else
8269                 bp->link_params.feature_config_flags &=
8270                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8271
8272         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8273         bp->common.bc_ver = val;
8274         BNX2X_DEV_INFO("bc_ver %X\n", val);
8275         if (val < BNX2X_BC_VER) {
8276                 /* for now only warn
8277                  * later we might need to enforce this */
8278                 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8279                           " please upgrade BC\n", BNX2X_BC_VER, val);
8280         }
8281         bp->link_params.feature_config_flags |=
8282                 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8283                 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
8284
8285         if (BP_E1HVN(bp) == 0) {
8286                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8287                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8288         } else {
8289                 /* no WOL capability for E1HVN != 0 */
8290                 bp->flags |= NO_WOL_FLAG;
8291         }
8292         BNX2X_DEV_INFO("%sWoL capable\n",
8293                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
8294
8295         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8296         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8297         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8298         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8299
8300         printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8301                val, val2, val3, val4);
8302 }
8303
8304 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8305                                                     u32 switch_cfg)
8306 {
8307         int port = BP_PORT(bp);
8308         u32 ext_phy_type;
8309
8310         switch (switch_cfg) {
8311         case SWITCH_CFG_1G:
8312                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8313
8314                 ext_phy_type =
8315                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8316                 switch (ext_phy_type) {
8317                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8318                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8319                                        ext_phy_type);
8320
8321                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8322                                                SUPPORTED_10baseT_Full |
8323                                                SUPPORTED_100baseT_Half |
8324                                                SUPPORTED_100baseT_Full |
8325                                                SUPPORTED_1000baseT_Full |
8326                                                SUPPORTED_2500baseX_Full |
8327                                                SUPPORTED_TP |
8328                                                SUPPORTED_FIBRE |
8329                                                SUPPORTED_Autoneg |
8330                                                SUPPORTED_Pause |
8331                                                SUPPORTED_Asym_Pause);
8332                         break;
8333
8334                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8335                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8336                                        ext_phy_type);
8337
8338                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8339                                                SUPPORTED_10baseT_Full |
8340                                                SUPPORTED_100baseT_Half |
8341                                                SUPPORTED_100baseT_Full |
8342                                                SUPPORTED_1000baseT_Full |
8343                                                SUPPORTED_TP |
8344                                                SUPPORTED_FIBRE |
8345                                                SUPPORTED_Autoneg |
8346                                                SUPPORTED_Pause |
8347                                                SUPPORTED_Asym_Pause);
8348                         break;
8349
8350                 default:
8351                         BNX2X_ERR("NVRAM config error. "
8352                                   "BAD SerDes ext_phy_config 0x%x\n",
8353                                   bp->link_params.ext_phy_config);
8354                         return;
8355                 }
8356
8357                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8358                                            port*0x10);
8359                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8360                 break;
8361
8362         case SWITCH_CFG_10G:
8363                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8364
8365                 ext_phy_type =
8366                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8367                 switch (ext_phy_type) {
8368                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8369                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8370                                        ext_phy_type);
8371
8372                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8373                                                SUPPORTED_10baseT_Full |
8374                                                SUPPORTED_100baseT_Half |
8375                                                SUPPORTED_100baseT_Full |
8376                                                SUPPORTED_1000baseT_Full |
8377                                                SUPPORTED_2500baseX_Full |
8378                                                SUPPORTED_10000baseT_Full |
8379                                                SUPPORTED_TP |
8380                                                SUPPORTED_FIBRE |
8381                                                SUPPORTED_Autoneg |
8382                                                SUPPORTED_Pause |
8383                                                SUPPORTED_Asym_Pause);
8384                         break;
8385
8386                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8387                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
8388                                        ext_phy_type);
8389
8390                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8391                                                SUPPORTED_1000baseT_Full |
8392                                                SUPPORTED_FIBRE |
8393                                                SUPPORTED_Autoneg |
8394                                                SUPPORTED_Pause |
8395                                                SUPPORTED_Asym_Pause);
8396                         break;
8397
8398                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8399                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
8400                                        ext_phy_type);
8401
8402                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8403                                                SUPPORTED_2500baseX_Full |
8404                                                SUPPORTED_1000baseT_Full |
8405                                                SUPPORTED_FIBRE |
8406                                                SUPPORTED_Autoneg |
8407                                                SUPPORTED_Pause |
8408                                                SUPPORTED_Asym_Pause);
8409                         break;
8410
8411                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8412                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8413                                        ext_phy_type);
8414
8415                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8416                                                SUPPORTED_FIBRE |
8417                                                SUPPORTED_Pause |
8418                                                SUPPORTED_Asym_Pause);
8419                         break;
8420
8421                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8422                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
8423                                        ext_phy_type);
8424
8425                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8426                                                SUPPORTED_1000baseT_Full |
8427                                                SUPPORTED_FIBRE |
8428                                                SUPPORTED_Pause |
8429                                                SUPPORTED_Asym_Pause);
8430                         break;
8431
8432                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8433                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
8434                                        ext_phy_type);
8435
8436                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8437                                                SUPPORTED_1000baseT_Full |
8438                                                SUPPORTED_Autoneg |
8439                                                SUPPORTED_FIBRE |
8440                                                SUPPORTED_Pause |
8441                                                SUPPORTED_Asym_Pause);
8442                         break;
8443
8444                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8445                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8446                                        ext_phy_type);
8447
8448                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8449                                                SUPPORTED_1000baseT_Full |
8450                                                SUPPORTED_Autoneg |
8451                                                SUPPORTED_FIBRE |
8452                                                SUPPORTED_Pause |
8453                                                SUPPORTED_Asym_Pause);
8454                         break;
8455
8456                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8457                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8458                                        ext_phy_type);
8459
8460                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
8461                                                SUPPORTED_TP |
8462                                                SUPPORTED_Autoneg |
8463                                                SUPPORTED_Pause |
8464                                                SUPPORTED_Asym_Pause);
8465                         break;
8466
8467                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8468                         BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8469                                        ext_phy_type);
8470
8471                         bp->port.supported |= (SUPPORTED_10baseT_Half |
8472                                                SUPPORTED_10baseT_Full |
8473                                                SUPPORTED_100baseT_Half |
8474                                                SUPPORTED_100baseT_Full |
8475                                                SUPPORTED_1000baseT_Full |
8476                                                SUPPORTED_10000baseT_Full |
8477                                                SUPPORTED_TP |
8478                                                SUPPORTED_Autoneg |
8479                                                SUPPORTED_Pause |
8480                                                SUPPORTED_Asym_Pause);
8481                         break;
8482
8483                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8484                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8485                                   bp->link_params.ext_phy_config);
8486                         break;
8487
8488                 default:
8489                         BNX2X_ERR("NVRAM config error. "
8490                                   "BAD XGXS ext_phy_config 0x%x\n",
8491                                   bp->link_params.ext_phy_config);
8492                         return;
8493                 }
8494
8495                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8496                                            port*0x18);
8497                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8498
8499                 break;
8500
8501         default:
8502                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
8503                           bp->port.link_config);
8504                 return;
8505         }
8506         bp->link_params.phy_addr = bp->port.phy_addr;
8507
8508         /* mask what we support according to speed_cap_mask */
8509         if (!(bp->link_params.speed_cap_mask &
8510                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
8511                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
8512
8513         if (!(bp->link_params.speed_cap_mask &
8514                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
8515                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
8516
8517         if (!(bp->link_params.speed_cap_mask &
8518                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
8519                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
8520
8521         if (!(bp->link_params.speed_cap_mask &
8522                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
8523                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
8524
8525         if (!(bp->link_params.speed_cap_mask &
8526                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
8527                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8528                                         SUPPORTED_1000baseT_Full);
8529
8530         if (!(bp->link_params.speed_cap_mask &
8531                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
8532                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
8533
8534         if (!(bp->link_params.speed_cap_mask &
8535                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
8536                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
8537
8538         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
8539 }
8540
8541 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8542 {
8543         bp->link_params.req_duplex = DUPLEX_FULL;
8544
8545         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
8546         case PORT_FEATURE_LINK_SPEED_AUTO:
8547                 if (bp->port.supported & SUPPORTED_Autoneg) {
8548                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8549                         bp->port.advertising = bp->port.supported;
8550                 } else {
8551                         u32 ext_phy_type =
8552                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8553
8554                         if ((ext_phy_type ==
8555                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8556                             (ext_phy_type ==
8557                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
8558                                 /* force 10G, no AN */
8559                                 bp->link_params.req_line_speed = SPEED_10000;
8560                                 bp->port.advertising =
8561                                                 (ADVERTISED_10000baseT_Full |
8562                                                  ADVERTISED_FIBRE);
8563                                 break;
8564                         }
8565                         BNX2X_ERR("NVRAM config error. "
8566                                   "Invalid link_config 0x%x"
8567                                   "  Autoneg not supported\n",
8568                                   bp->port.link_config);
8569                         return;
8570                 }
8571                 break;
8572
8573         case PORT_FEATURE_LINK_SPEED_10M_FULL:
8574                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
8575                         bp->link_params.req_line_speed = SPEED_10;
8576                         bp->port.advertising = (ADVERTISED_10baseT_Full |
8577                                                 ADVERTISED_TP);
8578                 } else {
8579                         BNX2X_ERR("NVRAM config error. "
8580                                   "Invalid link_config 0x%x"
8581                                   "  speed_cap_mask 0x%x\n",
8582                                   bp->port.link_config,
8583                                   bp->link_params.speed_cap_mask);
8584                         return;
8585                 }
8586                 break;
8587
8588         case PORT_FEATURE_LINK_SPEED_10M_HALF:
8589                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
8590                         bp->link_params.req_line_speed = SPEED_10;
8591                         bp->link_params.req_duplex = DUPLEX_HALF;
8592                         bp->port.advertising = (ADVERTISED_10baseT_Half |
8593                                                 ADVERTISED_TP);
8594                 } else {
8595                         BNX2X_ERR("NVRAM config error. "
8596                                   "Invalid link_config 0x%x"
8597                                   "  speed_cap_mask 0x%x\n",
8598                                   bp->port.link_config,
8599                                   bp->link_params.speed_cap_mask);
8600                         return;
8601                 }
8602                 break;
8603
8604         case PORT_FEATURE_LINK_SPEED_100M_FULL:
8605                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
8606                         bp->link_params.req_line_speed = SPEED_100;
8607                         bp->port.advertising = (ADVERTISED_100baseT_Full |
8608                                                 ADVERTISED_TP);
8609                 } else {
8610                         BNX2X_ERR("NVRAM config error. "
8611                                   "Invalid link_config 0x%x"
8612                                   "  speed_cap_mask 0x%x\n",
8613                                   bp->port.link_config,
8614                                   bp->link_params.speed_cap_mask);
8615                         return;
8616                 }
8617                 break;
8618
8619         case PORT_FEATURE_LINK_SPEED_100M_HALF:
8620                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
8621                         bp->link_params.req_line_speed = SPEED_100;
8622                         bp->link_params.req_duplex = DUPLEX_HALF;
8623                         bp->port.advertising = (ADVERTISED_100baseT_Half |
8624                                                 ADVERTISED_TP);
8625                 } else {
8626                         BNX2X_ERR("NVRAM config error. "
8627                                   "Invalid link_config 0x%x"
8628                                   "  speed_cap_mask 0x%x\n",
8629                                   bp->port.link_config,
8630                                   bp->link_params.speed_cap_mask);
8631                         return;
8632                 }
8633                 break;
8634
8635         case PORT_FEATURE_LINK_SPEED_1G:
8636                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
8637                         bp->link_params.req_line_speed = SPEED_1000;
8638                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
8639                                                 ADVERTISED_TP);
8640                 } else {
8641                         BNX2X_ERR("NVRAM config error. "
8642                                   "Invalid link_config 0x%x"
8643                                   "  speed_cap_mask 0x%x\n",
8644                                   bp->port.link_config,
8645                                   bp->link_params.speed_cap_mask);
8646                         return;
8647                 }
8648                 break;
8649
8650         case PORT_FEATURE_LINK_SPEED_2_5G:
8651                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
8652                         bp->link_params.req_line_speed = SPEED_2500;
8653                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
8654                                                 ADVERTISED_TP);
8655                 } else {
8656                         BNX2X_ERR("NVRAM config error. "
8657                                   "Invalid link_config 0x%x"
8658                                   "  speed_cap_mask 0x%x\n",
8659                                   bp->port.link_config,
8660                                   bp->link_params.speed_cap_mask);
8661                         return;
8662                 }
8663                 break;
8664
8665         case PORT_FEATURE_LINK_SPEED_10G_CX4:
8666         case PORT_FEATURE_LINK_SPEED_10G_KX4:
8667         case PORT_FEATURE_LINK_SPEED_10G_KR:
8668                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
8669                         bp->link_params.req_line_speed = SPEED_10000;
8670                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
8671                                                 ADVERTISED_FIBRE);
8672                 } else {
8673                         BNX2X_ERR("NVRAM config error. "
8674                                   "Invalid link_config 0x%x"
8675                                   "  speed_cap_mask 0x%x\n",
8676                                   bp->port.link_config,
8677                                   bp->link_params.speed_cap_mask);
8678                         return;
8679                 }
8680                 break;
8681
8682         default:
8683                 BNX2X_ERR("NVRAM config error. "
8684                           "BAD link speed link_config 0x%x\n",
8685                           bp->port.link_config);
8686                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8687                 bp->port.advertising = bp->port.supported;
8688                 break;
8689         }
8690
8691         bp->link_params.req_flow_ctrl = (bp->port.link_config &
8692                                          PORT_FEATURE_FLOW_CONTROL_MASK);
8693         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8694             !(bp->port.supported & SUPPORTED_Autoneg))
8695                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8696
8697         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
8698                        "  advertising 0x%x\n",
8699                        bp->link_params.req_line_speed,
8700                        bp->link_params.req_duplex,
8701                        bp->link_params.req_flow_ctrl, bp->port.advertising);
8702 }
8703
8704 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8705 {
8706         mac_hi = cpu_to_be16(mac_hi);
8707         mac_lo = cpu_to_be32(mac_lo);
8708         memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8709         memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8710 }
8711
8712 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8713 {
8714         int port = BP_PORT(bp);
8715         u32 val, val2;
8716         u32 config;
8717         u16 i;
8718         u32 ext_phy_type;
8719
8720         bp->link_params.bp = bp;
8721         bp->link_params.port = port;
8722
8723         bp->link_params.lane_config =
8724                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8725         bp->link_params.ext_phy_config =
8726                 SHMEM_RD(bp,
8727                          dev_info.port_hw_config[port].external_phy_config);
8728         /* BCM8727_NOC => BCM8727 no over current */
8729         if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8730             PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8731                 bp->link_params.ext_phy_config &=
8732                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8733                 bp->link_params.ext_phy_config |=
8734                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8735                 bp->link_params.feature_config_flags |=
8736                         FEATURE_CONFIG_BCM8727_NOC;
8737         }
8738
8739         bp->link_params.speed_cap_mask =
8740                 SHMEM_RD(bp,
8741                          dev_info.port_hw_config[port].speed_capability_mask);
8742
8743         bp->port.link_config =
8744                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8745
8746         /* Get the 4 lanes xgxs config rx and tx */
8747         for (i = 0; i < 2; i++) {
8748                 val = SHMEM_RD(bp,
8749                            dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8750                 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8751                 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8752
8753                 val = SHMEM_RD(bp,
8754                            dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8755                 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8756                 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8757         }
8758
8759         /* If the device is capable of WoL, set the default state according
8760          * to the HW
8761          */
8762         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8763         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8764                    (config & PORT_FEATURE_WOL_ENABLED));
8765
8766         BNX2X_DEV_INFO("lane_config 0x%08x  ext_phy_config 0x%08x"
8767                        "  speed_cap_mask 0x%08x  link_config 0x%08x\n",
8768                        bp->link_params.lane_config,
8769                        bp->link_params.ext_phy_config,
8770                        bp->link_params.speed_cap_mask, bp->port.link_config);
8771
8772         bp->link_params.switch_cfg |= (bp->port.link_config &
8773                                        PORT_FEATURE_CONNECTED_SWITCH_MASK);
8774         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8775
8776         bnx2x_link_settings_requested(bp);
8777
8778         /*
8779          * If connected directly, work with the internal PHY, otherwise, work
8780          * with the external PHY
8781          */
8782         ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8783         if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8784                 bp->mdio.prtad = bp->link_params.phy_addr;
8785
8786         else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8787                  (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8788                 bp->mdio.prtad =
8789                         XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
8790
8791         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8792         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8793         bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8794         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8795         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8796
8797 #ifdef BCM_CNIC
8798         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8799         val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8800         bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8801 #endif
8802 }
8803
8804 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8805 {
8806         int func = BP_FUNC(bp);
8807         u32 val, val2;
8808         int rc = 0;
8809
8810         bnx2x_get_common_hwinfo(bp);
8811
8812         bp->e1hov = 0;
8813         bp->e1hmf = 0;
8814         if (CHIP_IS_E1H(bp)) {
8815                 bp->mf_config =
8816                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8817
8818                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
8819                        FUNC_MF_CFG_E1HOV_TAG_MASK);
8820                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
8821                         bp->e1hmf = 1;
8822                 BNX2X_DEV_INFO("%s function mode\n",
8823                                IS_E1HMF(bp) ? "multi" : "single");
8824
8825                 if (IS_E1HMF(bp)) {
8826                         val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8827                                                                 e1hov_tag) &
8828                                FUNC_MF_CFG_E1HOV_TAG_MASK);
8829                         if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8830                                 bp->e1hov = val;
8831                                 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8832                                                "(0x%04x)\n",
8833                                                func, bp->e1hov, bp->e1hov);
8834                         } else {
8835                                 BNX2X_ERR("!!!  No valid E1HOV for func %d,"
8836                                           "  aborting\n", func);
8837                                 rc = -EPERM;
8838                         }
8839                 } else {
8840                         if (BP_E1HVN(bp)) {
8841                                 BNX2X_ERR("!!!  VN %d in single function mode,"
8842                                           "  aborting\n", BP_E1HVN(bp));
8843                                 rc = -EPERM;
8844                         }
8845                 }
8846         }
8847
8848         if (!BP_NOMCP(bp)) {
8849                 bnx2x_get_port_hwinfo(bp);
8850
8851                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8852                               DRV_MSG_SEQ_NUMBER_MASK);
8853                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8854         }
8855
8856         if (IS_E1HMF(bp)) {
8857                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8858                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
8859                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8860                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8861                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8862                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8863                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8864                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8865                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
8866                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
8867                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8868                                ETH_ALEN);
8869                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8870                                ETH_ALEN);
8871                 }
8872
8873                 return rc;
8874         }
8875
8876         if (BP_NOMCP(bp)) {
8877                 /* only supposed to happen on emulation/FPGA */
8878                 BNX2X_ERR("warning random MAC workaround active\n");
8879                 random_ether_addr(bp->dev->dev_addr);
8880                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8881         }
8882
8883         return rc;
8884 }
8885
8886 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8887 {
8888         int func = BP_FUNC(bp);
8889         int timer_interval;
8890         int rc;
8891
8892         /* Disable interrupt handling until HW is initialized */
8893         atomic_set(&bp->intr_sem, 1);
8894         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8895
8896         mutex_init(&bp->port.phy_mutex);
8897         mutex_init(&bp->fw_mb_mutex);
8898 #ifdef BCM_CNIC
8899         mutex_init(&bp->cnic_mutex);
8900 #endif
8901
8902         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8903         INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8904
8905         rc = bnx2x_get_hwinfo(bp);
8906
8907         /* need to reset chip if undi was active */
8908         if (!BP_NOMCP(bp))
8909                 bnx2x_undi_unload(bp);
8910
8911         if (CHIP_REV_IS_FPGA(bp))
8912                 printk(KERN_ERR PFX "FPGA detected\n");
8913
8914         if (BP_NOMCP(bp) && (func == 0))
8915                 printk(KERN_ERR PFX
8916                        "MCP disabled, must load devices in order!\n");
8917
8918         /* Set multi queue mode */
8919         if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8920             ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8921                 printk(KERN_ERR PFX
8922                       "Multi disabled since int_mode requested is not MSI-X\n");
8923                 multi_mode = ETH_RSS_MODE_DISABLED;
8924         }
8925         bp->multi_mode = multi_mode;
8926
8927
8928         /* Set TPA flags */
8929         if (disable_tpa) {
8930                 bp->flags &= ~TPA_ENABLE_FLAG;
8931                 bp->dev->features &= ~NETIF_F_LRO;
8932         } else {
8933                 bp->flags |= TPA_ENABLE_FLAG;
8934                 bp->dev->features |= NETIF_F_LRO;
8935         }
8936
8937         if (CHIP_IS_E1(bp))
8938                 bp->dropless_fc = 0;
8939         else
8940                 bp->dropless_fc = dropless_fc;
8941
8942         bp->mrrs = mrrs;
8943
8944         bp->tx_ring_size = MAX_TX_AVAIL;
8945         bp->rx_ring_size = MAX_RX_AVAIL;
8946
8947         bp->rx_csum = 1;
8948
8949         /* make sure that the numbers are in the right granularity */
8950         bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
8951         bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
8952
8953         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8954         bp->current_interval = (poll ? poll : timer_interval);
8955
8956         init_timer(&bp->timer);
8957         bp->timer.expires = jiffies + bp->current_interval;
8958         bp->timer.data = (unsigned long) bp;
8959         bp->timer.function = bnx2x_timer;
8960
8961         return rc;
8962 }
8963
8964 /*
8965  * ethtool service functions
8966  */
8967
8968 /* All ethtool functions called with rtnl_lock */
8969
8970 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8971 {
8972         struct bnx2x *bp = netdev_priv(dev);
8973
8974         cmd->supported = bp->port.supported;
8975         cmd->advertising = bp->port.advertising;
8976
8977         if ((bp->state == BNX2X_STATE_OPEN) &&
8978             !(bp->flags & MF_FUNC_DIS) &&
8979             (bp->link_vars.link_up)) {
8980                 cmd->speed = bp->link_vars.line_speed;
8981                 cmd->duplex = bp->link_vars.duplex;
8982                 if (IS_E1HMF(bp)) {
8983                         u16 vn_max_rate;
8984
8985                         vn_max_rate =
8986                                 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8987                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8988                         if (vn_max_rate < cmd->speed)
8989                                 cmd->speed = vn_max_rate;
8990                 }
8991         } else {
8992                 cmd->speed = -1;
8993                 cmd->duplex = -1;
8994         }
8995
8996         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8997                 u32 ext_phy_type =
8998                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8999
9000                 switch (ext_phy_type) {
9001                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
9002                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
9003                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
9004                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9005                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9006                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
9007                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
9008                         cmd->port = PORT_FIBRE;
9009                         break;
9010
9011                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
9012                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
9013                         cmd->port = PORT_TP;
9014                         break;
9015
9016                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9017                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9018                                   bp->link_params.ext_phy_config);
9019                         break;
9020
9021                 default:
9022                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
9023                            bp->link_params.ext_phy_config);
9024                         break;
9025                 }
9026         } else
9027                 cmd->port = PORT_TP;
9028
9029         cmd->phy_address = bp->mdio.prtad;
9030         cmd->transceiver = XCVR_INTERNAL;
9031
9032         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9033                 cmd->autoneg = AUTONEG_ENABLE;
9034         else
9035                 cmd->autoneg = AUTONEG_DISABLE;
9036
9037         cmd->maxtxpkt = 0;
9038         cmd->maxrxpkt = 0;
9039
9040         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9041            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
9042            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
9043            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
9044            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9045            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9046            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9047
9048         return 0;
9049 }
9050
9051 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9052 {
9053         struct bnx2x *bp = netdev_priv(dev);
9054         u32 advertising;
9055
9056         if (IS_E1HMF(bp))
9057                 return 0;
9058
9059         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9060            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
9061            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
9062            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
9063            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9064            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9065            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9066
9067         if (cmd->autoneg == AUTONEG_ENABLE) {
9068                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9069                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
9070                         return -EINVAL;
9071                 }
9072
9073                 /* advertise the requested speed and duplex if supported */
9074                 cmd->advertising &= bp->port.supported;
9075
9076                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9077                 bp->link_params.req_duplex = DUPLEX_FULL;
9078                 bp->port.advertising |= (ADVERTISED_Autoneg |
9079                                          cmd->advertising);
9080
9081         } else { /* forced speed */
9082                 /* advertise the requested speed and duplex if supported */
9083                 switch (cmd->speed) {
9084                 case SPEED_10:
9085                         if (cmd->duplex == DUPLEX_FULL) {
9086                                 if (!(bp->port.supported &
9087                                       SUPPORTED_10baseT_Full)) {
9088                                         DP(NETIF_MSG_LINK,
9089                                            "10M full not supported\n");
9090                                         return -EINVAL;
9091                                 }
9092
9093                                 advertising = (ADVERTISED_10baseT_Full |
9094                                                ADVERTISED_TP);
9095                         } else {
9096                                 if (!(bp->port.supported &
9097                                       SUPPORTED_10baseT_Half)) {
9098                                         DP(NETIF_MSG_LINK,
9099                                            "10M half not supported\n");
9100                                         return -EINVAL;
9101                                 }
9102
9103                                 advertising = (ADVERTISED_10baseT_Half |
9104                                                ADVERTISED_TP);
9105                         }
9106                         break;
9107
9108                 case SPEED_100:
9109                         if (cmd->duplex == DUPLEX_FULL) {
9110                                 if (!(bp->port.supported &
9111                                                 SUPPORTED_100baseT_Full)) {
9112                                         DP(NETIF_MSG_LINK,
9113                                            "100M full not supported\n");
9114                                         return -EINVAL;
9115                                 }
9116
9117                                 advertising = (ADVERTISED_100baseT_Full |
9118                                                ADVERTISED_TP);
9119                         } else {
9120                                 if (!(bp->port.supported &
9121                                                 SUPPORTED_100baseT_Half)) {
9122                                         DP(NETIF_MSG_LINK,
9123                                            "100M half not supported\n");
9124                                         return -EINVAL;
9125                                 }
9126
9127                                 advertising = (ADVERTISED_100baseT_Half |
9128                                                ADVERTISED_TP);
9129                         }
9130                         break;
9131
9132                 case SPEED_1000:
9133                         if (cmd->duplex != DUPLEX_FULL) {
9134                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
9135                                 return -EINVAL;
9136                         }
9137
9138                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
9139                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
9140                                 return -EINVAL;
9141                         }
9142
9143                         advertising = (ADVERTISED_1000baseT_Full |
9144                                        ADVERTISED_TP);
9145                         break;
9146
9147                 case SPEED_2500:
9148                         if (cmd->duplex != DUPLEX_FULL) {
9149                                 DP(NETIF_MSG_LINK,
9150                                    "2.5G half not supported\n");
9151                                 return -EINVAL;
9152                         }
9153
9154                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
9155                                 DP(NETIF_MSG_LINK,
9156                                    "2.5G full not supported\n");
9157                                 return -EINVAL;
9158                         }
9159
9160                         advertising = (ADVERTISED_2500baseX_Full |
9161                                        ADVERTISED_TP);
9162                         break;
9163
9164                 case SPEED_10000:
9165                         if (cmd->duplex != DUPLEX_FULL) {
9166                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
9167                                 return -EINVAL;
9168                         }
9169
9170                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
9171                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
9172                                 return -EINVAL;
9173                         }
9174
9175                         advertising = (ADVERTISED_10000baseT_Full |
9176                                        ADVERTISED_FIBRE);
9177                         break;
9178
9179                 default:
9180                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
9181                         return -EINVAL;
9182                 }
9183
9184                 bp->link_params.req_line_speed = cmd->speed;
9185                 bp->link_params.req_duplex = cmd->duplex;
9186                 bp->port.advertising = advertising;
9187         }
9188
9189         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
9190            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
9191            bp->link_params.req_line_speed, bp->link_params.req_duplex,
9192            bp->port.advertising);
9193
9194         if (netif_running(dev)) {
9195                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9196                 bnx2x_link_set(bp);
9197         }
9198
9199         return 0;
9200 }
9201
9202 #define IS_E1_ONLINE(info)      (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
9203 #define IS_E1H_ONLINE(info)     (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
9204
9205 static int bnx2x_get_regs_len(struct net_device *dev)
9206 {
9207         struct bnx2x *bp = netdev_priv(dev);
9208         int regdump_len = 0;
9209         int i;
9210
9211         if (CHIP_IS_E1(bp)) {
9212                 for (i = 0; i < REGS_COUNT; i++)
9213                         if (IS_E1_ONLINE(reg_addrs[i].info))
9214                                 regdump_len += reg_addrs[i].size;
9215
9216                 for (i = 0; i < WREGS_COUNT_E1; i++)
9217                         if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
9218                                 regdump_len += wreg_addrs_e1[i].size *
9219                                         (1 + wreg_addrs_e1[i].read_regs_count);
9220
9221         } else { /* E1H */
9222                 for (i = 0; i < REGS_COUNT; i++)
9223                         if (IS_E1H_ONLINE(reg_addrs[i].info))
9224                                 regdump_len += reg_addrs[i].size;
9225
9226                 for (i = 0; i < WREGS_COUNT_E1H; i++)
9227                         if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
9228                                 regdump_len += wreg_addrs_e1h[i].size *
9229                                         (1 + wreg_addrs_e1h[i].read_regs_count);
9230         }
9231         regdump_len *= 4;
9232         regdump_len += sizeof(struct dump_hdr);
9233
9234         return regdump_len;
9235 }
9236
9237 static void bnx2x_get_regs(struct net_device *dev,
9238                            struct ethtool_regs *regs, void *_p)
9239 {
9240         u32 *p = _p, i, j;
9241         struct bnx2x *bp = netdev_priv(dev);
9242         struct dump_hdr dump_hdr = {0};
9243
9244         regs->version = 0;
9245         memset(p, 0, regs->len);
9246
9247         if (!netif_running(bp->dev))
9248                 return;
9249
9250         dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9251         dump_hdr.dump_sign = dump_sign_all;
9252         dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9253         dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9254         dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9255         dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9256         dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9257
9258         memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9259         p += dump_hdr.hdr_size + 1;
9260
9261         if (CHIP_IS_E1(bp)) {
9262                 for (i = 0; i < REGS_COUNT; i++)
9263                         if (IS_E1_ONLINE(reg_addrs[i].info))
9264                                 for (j = 0; j < reg_addrs[i].size; j++)
9265                                         *p++ = REG_RD(bp,
9266                                                       reg_addrs[i].addr + j*4);
9267
9268         } else { /* E1H */
9269                 for (i = 0; i < REGS_COUNT; i++)
9270                         if (IS_E1H_ONLINE(reg_addrs[i].info))
9271                                 for (j = 0; j < reg_addrs[i].size; j++)
9272                                         *p++ = REG_RD(bp,
9273                                                       reg_addrs[i].addr + j*4);
9274         }
9275 }
9276
9277 #define PHY_FW_VER_LEN                  10
9278
9279 static void bnx2x_get_drvinfo(struct net_device *dev,
9280                               struct ethtool_drvinfo *info)
9281 {
9282         struct bnx2x *bp = netdev_priv(dev);
9283         u8 phy_fw_ver[PHY_FW_VER_LEN];
9284
9285         strcpy(info->driver, DRV_MODULE_NAME);
9286         strcpy(info->version, DRV_MODULE_VERSION);
9287
9288         phy_fw_ver[0] = '\0';
9289         if (bp->port.pmf) {
9290                 bnx2x_acquire_phy_lock(bp);
9291                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9292                                              (bp->state != BNX2X_STATE_CLOSED),
9293                                              phy_fw_ver, PHY_FW_VER_LEN);
9294                 bnx2x_release_phy_lock(bp);
9295         }
9296
9297         snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9298                  (bp->common.bc_ver & 0xff0000) >> 16,
9299                  (bp->common.bc_ver & 0xff00) >> 8,
9300                  (bp->common.bc_ver & 0xff),
9301                  ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9302         strcpy(info->bus_info, pci_name(bp->pdev));
9303         info->n_stats = BNX2X_NUM_STATS;
9304         info->testinfo_len = BNX2X_NUM_TESTS;
9305         info->eedump_len = bp->common.flash_size;
9306         info->regdump_len = bnx2x_get_regs_len(dev);
9307 }
9308
9309 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9310 {
9311         struct bnx2x *bp = netdev_priv(dev);
9312
9313         if (bp->flags & NO_WOL_FLAG) {
9314                 wol->supported = 0;
9315                 wol->wolopts = 0;
9316         } else {
9317                 wol->supported = WAKE_MAGIC;
9318                 if (bp->wol)
9319                         wol->wolopts = WAKE_MAGIC;
9320                 else
9321                         wol->wolopts = 0;
9322         }
9323         memset(&wol->sopass, 0, sizeof(wol->sopass));
9324 }
9325
9326 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9327 {
9328         struct bnx2x *bp = netdev_priv(dev);
9329
9330         if (wol->wolopts & ~WAKE_MAGIC)
9331                 return -EINVAL;
9332
9333         if (wol->wolopts & WAKE_MAGIC) {
9334                 if (bp->flags & NO_WOL_FLAG)
9335                         return -EINVAL;
9336
9337                 bp->wol = 1;
9338         } else
9339                 bp->wol = 0;
9340
9341         return 0;
9342 }
9343
9344 static u32 bnx2x_get_msglevel(struct net_device *dev)
9345 {
9346         struct bnx2x *bp = netdev_priv(dev);
9347
9348         return bp->msglevel;
9349 }
9350
9351 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9352 {
9353         struct bnx2x *bp = netdev_priv(dev);
9354
9355         if (capable(CAP_NET_ADMIN))
9356                 bp->msglevel = level;
9357 }
9358
9359 static int bnx2x_nway_reset(struct net_device *dev)
9360 {
9361         struct bnx2x *bp = netdev_priv(dev);
9362
9363         if (!bp->port.pmf)
9364                 return 0;
9365
9366         if (netif_running(dev)) {
9367                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9368                 bnx2x_link_set(bp);
9369         }
9370
9371         return 0;
9372 }
9373
9374 static u32 bnx2x_get_link(struct net_device *dev)
9375 {
9376         struct bnx2x *bp = netdev_priv(dev);
9377
9378         if (bp->flags & MF_FUNC_DIS)
9379                 return 0;
9380
9381         return bp->link_vars.link_up;
9382 }
9383
9384 static int bnx2x_get_eeprom_len(struct net_device *dev)
9385 {
9386         struct bnx2x *bp = netdev_priv(dev);
9387
9388         return bp->common.flash_size;
9389 }
9390
9391 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9392 {
9393         int port = BP_PORT(bp);
9394         int count, i;
9395         u32 val = 0;
9396
9397         /* adjust timeout for emulation/FPGA */
9398         count = NVRAM_TIMEOUT_COUNT;
9399         if (CHIP_REV_IS_SLOW(bp))
9400                 count *= 100;
9401
9402         /* request access to nvram interface */
9403         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9404                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9405
9406         for (i = 0; i < count*10; i++) {
9407                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9408                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9409                         break;
9410
9411                 udelay(5);
9412         }
9413
9414         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
9415                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
9416                 return -EBUSY;
9417         }
9418
9419         return 0;
9420 }
9421
9422 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9423 {
9424         int port = BP_PORT(bp);
9425         int count, i;
9426         u32 val = 0;
9427
9428         /* adjust timeout for emulation/FPGA */
9429         count = NVRAM_TIMEOUT_COUNT;
9430         if (CHIP_REV_IS_SLOW(bp))
9431                 count *= 100;
9432
9433         /* relinquish nvram interface */
9434         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9435                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9436
9437         for (i = 0; i < count*10; i++) {
9438                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9439                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9440                         break;
9441
9442                 udelay(5);
9443         }
9444
9445         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
9446                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
9447                 return -EBUSY;
9448         }
9449
9450         return 0;
9451 }
9452
9453 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9454 {
9455         u32 val;
9456
9457         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9458
9459         /* enable both bits, even on read */
9460         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9461                (val | MCPR_NVM_ACCESS_ENABLE_EN |
9462                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
9463 }
9464
9465 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9466 {
9467         u32 val;
9468
9469         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9470
9471         /* disable both bits, even after read */
9472         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9473                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9474                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9475 }
9476
9477 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
9478                                   u32 cmd_flags)
9479 {
9480         int count, i, rc;
9481         u32 val;
9482
9483         /* build the command word */
9484         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9485
9486         /* need to clear DONE bit separately */
9487         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9488
9489         /* address of the NVRAM to read from */
9490         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9491                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9492
9493         /* issue a read command */
9494         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9495
9496         /* adjust timeout for emulation/FPGA */
9497         count = NVRAM_TIMEOUT_COUNT;
9498         if (CHIP_REV_IS_SLOW(bp))
9499                 count *= 100;
9500
9501         /* wait for completion */
9502         *ret_val = 0;
9503         rc = -EBUSY;
9504         for (i = 0; i < count; i++) {
9505                 udelay(5);
9506                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9507
9508                 if (val & MCPR_NVM_COMMAND_DONE) {
9509                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
9510                         /* we read nvram data in cpu order
9511                          * but ethtool sees it as an array of bytes
9512                          * converting to big-endian will do the work */
9513                         *ret_val = cpu_to_be32(val);
9514                         rc = 0;
9515                         break;
9516                 }
9517         }
9518
9519         return rc;
9520 }
9521
9522 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9523                             int buf_size)
9524 {
9525         int rc;
9526         u32 cmd_flags;
9527         __be32 val;
9528
9529         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9530                 DP(BNX2X_MSG_NVM,
9531                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
9532                    offset, buf_size);
9533                 return -EINVAL;
9534         }
9535
9536         if (offset + buf_size > bp->common.flash_size) {
9537                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9538                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9539                    offset, buf_size, bp->common.flash_size);
9540                 return -EINVAL;
9541         }
9542
9543         /* request access to nvram interface */
9544         rc = bnx2x_acquire_nvram_lock(bp);
9545         if (rc)
9546                 return rc;
9547
9548         /* enable access to nvram interface */
9549         bnx2x_enable_nvram_access(bp);
9550
9551         /* read the first word(s) */
9552         cmd_flags = MCPR_NVM_COMMAND_FIRST;
9553         while ((buf_size > sizeof(u32)) && (rc == 0)) {
9554                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9555                 memcpy(ret_buf, &val, 4);
9556
9557                 /* advance to the next dword */
9558                 offset += sizeof(u32);
9559                 ret_buf += sizeof(u32);
9560                 buf_size -= sizeof(u32);
9561                 cmd_flags = 0;
9562         }
9563
9564         if (rc == 0) {
9565                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9566                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9567                 memcpy(ret_buf, &val, 4);
9568         }
9569
9570         /* disable access to nvram interface */
9571         bnx2x_disable_nvram_access(bp);
9572         bnx2x_release_nvram_lock(bp);
9573
9574         return rc;
9575 }
9576
9577 static int bnx2x_get_eeprom(struct net_device *dev,
9578                             struct ethtool_eeprom *eeprom, u8 *eebuf)
9579 {
9580         struct bnx2x *bp = netdev_priv(dev);
9581         int rc;
9582
9583         if (!netif_running(dev))
9584                 return -EAGAIN;
9585
9586         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9587            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
9588            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9589            eeprom->len, eeprom->len);
9590
9591         /* parameters already validated in ethtool_get_eeprom */
9592
9593         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9594
9595         return rc;
9596 }
9597
9598 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9599                                    u32 cmd_flags)
9600 {
9601         int count, i, rc;
9602
9603         /* build the command word */
9604         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9605
9606         /* need to clear DONE bit separately */
9607         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9608
9609         /* write the data */
9610         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9611
9612         /* address of the NVRAM to write to */
9613         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9614                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9615
9616         /* issue the write command */
9617         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9618
9619         /* adjust timeout for emulation/FPGA */
9620         count = NVRAM_TIMEOUT_COUNT;
9621         if (CHIP_REV_IS_SLOW(bp))
9622                 count *= 100;
9623
9624         /* wait for completion */
9625         rc = -EBUSY;
9626         for (i = 0; i < count; i++) {
9627                 udelay(5);
9628                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9629                 if (val & MCPR_NVM_COMMAND_DONE) {
9630                         rc = 0;
9631                         break;
9632                 }
9633         }
9634
9635         return rc;
9636 }
9637
9638 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
9639
9640 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9641                               int buf_size)
9642 {
9643         int rc;
9644         u32 cmd_flags;
9645         u32 align_offset;
9646         __be32 val;
9647
9648         if (offset + buf_size > bp->common.flash_size) {
9649                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9650                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9651                    offset, buf_size, bp->common.flash_size);
9652                 return -EINVAL;
9653         }
9654
9655         /* request access to nvram interface */
9656         rc = bnx2x_acquire_nvram_lock(bp);
9657         if (rc)
9658                 return rc;
9659
9660         /* enable access to nvram interface */
9661         bnx2x_enable_nvram_access(bp);
9662
9663         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9664         align_offset = (offset & ~0x03);
9665         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9666
9667         if (rc == 0) {
9668                 val &= ~(0xff << BYTE_OFFSET(offset));
9669                 val |= (*data_buf << BYTE_OFFSET(offset));
9670
9671                 /* nvram data is returned as an array of bytes
9672                  * convert it back to cpu order */
9673                 val = be32_to_cpu(val);
9674
9675                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9676                                              cmd_flags);
9677         }
9678
9679         /* disable access to nvram interface */
9680         bnx2x_disable_nvram_access(bp);
9681         bnx2x_release_nvram_lock(bp);
9682
9683         return rc;
9684 }
9685
9686 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9687                              int buf_size)
9688 {
9689         int rc;
9690         u32 cmd_flags;
9691         u32 val;
9692         u32 written_so_far;
9693
9694         if (buf_size == 1)      /* ethtool */
9695                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
9696
9697         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9698                 DP(BNX2X_MSG_NVM,
9699                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
9700                    offset, buf_size);
9701                 return -EINVAL;
9702         }
9703
9704         if (offset + buf_size > bp->common.flash_size) {
9705                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9706                                   " buf_size (0x%x) > flash_size (0x%x)\n",
9707                    offset, buf_size, bp->common.flash_size);
9708                 return -EINVAL;
9709         }
9710
9711         /* request access to nvram interface */
9712         rc = bnx2x_acquire_nvram_lock(bp);
9713         if (rc)
9714                 return rc;
9715
9716         /* enable access to nvram interface */
9717         bnx2x_enable_nvram_access(bp);
9718
9719         written_so_far = 0;
9720         cmd_flags = MCPR_NVM_COMMAND_FIRST;
9721         while ((written_so_far < buf_size) && (rc == 0)) {
9722                 if (written_so_far == (buf_size - sizeof(u32)))
9723                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
9724                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9725                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
9726                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9727                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9728
9729                 memcpy(&val, data_buf, 4);
9730
9731                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9732
9733                 /* advance to the next dword */
9734                 offset += sizeof(u32);
9735                 data_buf += sizeof(u32);
9736                 written_so_far += sizeof(u32);
9737                 cmd_flags = 0;
9738         }
9739
9740         /* disable access to nvram interface */
9741         bnx2x_disable_nvram_access(bp);
9742         bnx2x_release_nvram_lock(bp);
9743
9744         return rc;
9745 }
9746
9747 static int bnx2x_set_eeprom(struct net_device *dev,
9748                             struct ethtool_eeprom *eeprom, u8 *eebuf)
9749 {
9750         struct bnx2x *bp = netdev_priv(dev);
9751         int port = BP_PORT(bp);
9752         int rc = 0;
9753
9754         if (!netif_running(dev))
9755                 return -EAGAIN;
9756
9757         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9758            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
9759            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9760            eeprom->len, eeprom->len);
9761
9762         /* parameters already validated in ethtool_set_eeprom */
9763
9764         /* PHY eeprom can be accessed only by the PMF */
9765         if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9766             !bp->port.pmf)
9767                 return -EINVAL;
9768
9769         if (eeprom->magic == 0x50485950) {
9770                 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9771                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9772
9773                 bnx2x_acquire_phy_lock(bp);
9774                 rc |= bnx2x_link_reset(&bp->link_params,
9775                                        &bp->link_vars, 0);
9776                 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9777                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9778                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9779                                        MISC_REGISTERS_GPIO_HIGH, port);
9780                 bnx2x_release_phy_lock(bp);
9781                 bnx2x_link_report(bp);
9782
9783         } else if (eeprom->magic == 0x50485952) {
9784                 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
9785                 if (bp->state == BNX2X_STATE_OPEN) {
9786                         bnx2x_acquire_phy_lock(bp);
9787                         rc |= bnx2x_link_reset(&bp->link_params,
9788                                                &bp->link_vars, 1);
9789
9790                         rc |= bnx2x_phy_init(&bp->link_params,
9791                                              &bp->link_vars);
9792                         bnx2x_release_phy_lock(bp);
9793                         bnx2x_calc_fc_adv(bp);
9794                 }
9795         } else if (eeprom->magic == 0x53985943) {
9796                 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9797                 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9798                                        PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9799                         u8 ext_phy_addr =
9800                              XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
9801
9802                         /* DSP Remove Download Mode */
9803                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9804                                        MISC_REGISTERS_GPIO_LOW, port);
9805
9806                         bnx2x_acquire_phy_lock(bp);
9807
9808                         bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9809
9810                         /* wait 0.5 sec to allow it to run */
9811                         msleep(500);
9812                         bnx2x_ext_phy_hw_reset(bp, port);
9813                         msleep(500);
9814                         bnx2x_release_phy_lock(bp);
9815                 }
9816         } else
9817                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
9818
9819         return rc;
9820 }
9821
9822 static int bnx2x_get_coalesce(struct net_device *dev,
9823                               struct ethtool_coalesce *coal)
9824 {
9825         struct bnx2x *bp = netdev_priv(dev);
9826
9827         memset(coal, 0, sizeof(struct ethtool_coalesce));
9828
9829         coal->rx_coalesce_usecs = bp->rx_ticks;
9830         coal->tx_coalesce_usecs = bp->tx_ticks;
9831
9832         return 0;
9833 }
9834
9835 #define BNX2X_MAX_COALES_TOUT  (0xf0*12) /* Maximal coalescing timeout in us */
9836 static int bnx2x_set_coalesce(struct net_device *dev,
9837                               struct ethtool_coalesce *coal)
9838 {
9839         struct bnx2x *bp = netdev_priv(dev);
9840
9841         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
9842         if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9843                 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
9844
9845         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
9846         if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9847                 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
9848
9849         if (netif_running(dev))
9850                 bnx2x_update_coalesce(bp);
9851
9852         return 0;
9853 }
9854
9855 static void bnx2x_get_ringparam(struct net_device *dev,
9856                                 struct ethtool_ringparam *ering)
9857 {
9858         struct bnx2x *bp = netdev_priv(dev);
9859
9860         ering->rx_max_pending = MAX_RX_AVAIL;
9861         ering->rx_mini_max_pending = 0;
9862         ering->rx_jumbo_max_pending = 0;
9863
9864         ering->rx_pending = bp->rx_ring_size;
9865         ering->rx_mini_pending = 0;
9866         ering->rx_jumbo_pending = 0;
9867
9868         ering->tx_max_pending = MAX_TX_AVAIL;
9869         ering->tx_pending = bp->tx_ring_size;
9870 }
9871
9872 static int bnx2x_set_ringparam(struct net_device *dev,
9873                                struct ethtool_ringparam *ering)
9874 {
9875         struct bnx2x *bp = netdev_priv(dev);
9876         int rc = 0;
9877
9878         if ((ering->rx_pending > MAX_RX_AVAIL) ||
9879             (ering->tx_pending > MAX_TX_AVAIL) ||
9880             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9881                 return -EINVAL;
9882
9883         bp->rx_ring_size = ering->rx_pending;
9884         bp->tx_ring_size = ering->tx_pending;
9885
9886         if (netif_running(dev)) {
9887                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9888                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9889         }
9890
9891         return rc;
9892 }
9893
9894 static void bnx2x_get_pauseparam(struct net_device *dev,
9895                                  struct ethtool_pauseparam *epause)
9896 {
9897         struct bnx2x *bp = netdev_priv(dev);
9898
9899         epause->autoneg = (bp->link_params.req_flow_ctrl ==
9900                            BNX2X_FLOW_CTRL_AUTO) &&
9901                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9902
9903         epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9904                             BNX2X_FLOW_CTRL_RX);
9905         epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9906                             BNX2X_FLOW_CTRL_TX);
9907
9908         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9909            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9910            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9911 }
9912
9913 static int bnx2x_set_pauseparam(struct net_device *dev,
9914                                 struct ethtool_pauseparam *epause)
9915 {
9916         struct bnx2x *bp = netdev_priv(dev);
9917
9918         if (IS_E1HMF(bp))
9919                 return 0;
9920
9921         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9922            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
9923            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9924
9925         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9926
9927         if (epause->rx_pause)
9928                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9929
9930         if (epause->tx_pause)
9931                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9932
9933         if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9934                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9935
9936         if (epause->autoneg) {
9937                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9938                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
9939                         return -EINVAL;
9940                 }
9941
9942                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9943                         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9944         }
9945
9946         DP(NETIF_MSG_LINK,
9947            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9948
9949         if (netif_running(dev)) {
9950                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9951                 bnx2x_link_set(bp);
9952         }
9953
9954         return 0;
9955 }
9956
9957 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9958 {
9959         struct bnx2x *bp = netdev_priv(dev);
9960         int changed = 0;
9961         int rc = 0;
9962
9963         /* TPA requires Rx CSUM offloading */
9964         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9965                 if (!(dev->features & NETIF_F_LRO)) {
9966                         dev->features |= NETIF_F_LRO;
9967                         bp->flags |= TPA_ENABLE_FLAG;
9968                         changed = 1;
9969                 }
9970
9971         } else if (dev->features & NETIF_F_LRO) {
9972                 dev->features &= ~NETIF_F_LRO;
9973                 bp->flags &= ~TPA_ENABLE_FLAG;
9974                 changed = 1;
9975         }
9976
9977         if (changed && netif_running(dev)) {
9978                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9979                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9980         }
9981
9982         return rc;
9983 }
9984
9985 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9986 {
9987         struct bnx2x *bp = netdev_priv(dev);
9988
9989         return bp->rx_csum;
9990 }
9991
9992 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9993 {
9994         struct bnx2x *bp = netdev_priv(dev);
9995         int rc = 0;
9996
9997         bp->rx_csum = data;
9998
9999         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
10000            TPA'ed packets will be discarded due to wrong TCP CSUM */
10001         if (!data) {
10002                 u32 flags = ethtool_op_get_flags(dev);
10003
10004                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
10005         }
10006
10007         return rc;
10008 }
10009
10010 static int bnx2x_set_tso(struct net_device *dev, u32 data)
10011 {
10012         if (data) {
10013                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10014                 dev->features |= NETIF_F_TSO6;
10015         } else {
10016                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
10017                 dev->features &= ~NETIF_F_TSO6;
10018         }
10019
10020         return 0;
10021 }
10022
10023 static const struct {
10024         char string[ETH_GSTRING_LEN];
10025 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
10026         { "register_test (offline)" },
10027         { "memory_test (offline)" },
10028         { "loopback_test (offline)" },
10029         { "nvram_test (online)" },
10030         { "interrupt_test (online)" },
10031         { "link_test (online)" },
10032         { "idle check (online)" }
10033 };
10034
10035 static int bnx2x_test_registers(struct bnx2x *bp)
10036 {
10037         int idx, i, rc = -ENODEV;
10038         u32 wr_val = 0;
10039         int port = BP_PORT(bp);
10040         static const struct {
10041                 u32  offset0;
10042                 u32  offset1;
10043                 u32  mask;
10044         } reg_tbl[] = {
10045 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
10046                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
10047                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
10048                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
10049                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
10050                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
10051                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
10052                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
10053                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
10054                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
10055 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
10056                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
10057                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
10058                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
10059                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
10060                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
10061                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
10062                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
10063                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
10064                 { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
10065 /* 20 */        { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
10066                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
10067                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
10068                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
10069                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
10070                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
10071                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
10072                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
10073                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
10074                 { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
10075 /* 30 */        { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
10076                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
10077                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
10078                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
10079                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
10080                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
10081                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
10082
10083                 { 0xffffffff, 0, 0x00000000 }
10084         };
10085
10086         if (!netif_running(bp->dev))
10087                 return rc;
10088
10089         /* Repeat the test twice:
10090            First by writing 0x00000000, second by writing 0xffffffff */
10091         for (idx = 0; idx < 2; idx++) {
10092
10093                 switch (idx) {
10094                 case 0:
10095                         wr_val = 0;
10096                         break;
10097                 case 1:
10098                         wr_val = 0xffffffff;
10099                         break;
10100                 }
10101
10102                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
10103                         u32 offset, mask, save_val, val;
10104
10105                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
10106                         mask = reg_tbl[i].mask;
10107
10108                         save_val = REG_RD(bp, offset);
10109
10110                         REG_WR(bp, offset, wr_val);
10111                         val = REG_RD(bp, offset);
10112
10113                         /* Restore the original register's value */
10114                         REG_WR(bp, offset, save_val);
10115
10116                         /* verify that value is as expected value */
10117                         if ((val & mask) != (wr_val & mask))
10118                                 goto test_reg_exit;
10119                 }
10120         }
10121
10122         rc = 0;
10123
10124 test_reg_exit:
10125         return rc;
10126 }
10127
10128 static int bnx2x_test_memory(struct bnx2x *bp)
10129 {
10130         int i, j, rc = -ENODEV;
10131         u32 val;
10132         static const struct {
10133                 u32 offset;
10134                 int size;
10135         } mem_tbl[] = {
10136                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
10137                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
10138                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
10139                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
10140                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
10141                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
10142                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
10143
10144                 { 0xffffffff, 0 }
10145         };
10146         static const struct {
10147                 char *name;
10148                 u32 offset;
10149                 u32 e1_mask;
10150                 u32 e1h_mask;
10151         } prty_tbl[] = {
10152                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
10153                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
10154                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
10155                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
10156                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
10157                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
10158
10159                 { NULL, 0xffffffff, 0, 0 }
10160         };
10161
10162         if (!netif_running(bp->dev))
10163                 return rc;
10164
10165         /* Go through all the memories */
10166         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
10167                 for (j = 0; j < mem_tbl[i].size; j++)
10168                         REG_RD(bp, mem_tbl[i].offset + j*4);
10169
10170         /* Check the parity status */
10171         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
10172                 val = REG_RD(bp, prty_tbl[i].offset);
10173                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
10174                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
10175                         DP(NETIF_MSG_HW,
10176                            "%s is 0x%x\n", prty_tbl[i].name, val);
10177                         goto test_mem_exit;
10178                 }
10179         }
10180
10181         rc = 0;
10182
10183 test_mem_exit:
10184         return rc;
10185 }
10186
10187 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
10188 {
10189         int cnt = 1000;
10190
10191         if (link_up)
10192                 while (bnx2x_link_test(bp) && cnt--)
10193                         msleep(10);
10194 }
10195
10196 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10197 {
10198         unsigned int pkt_size, num_pkts, i;
10199         struct sk_buff *skb;
10200         unsigned char *packet;
10201         struct bnx2x_fastpath *fp_rx = &bp->fp[0];
10202         struct bnx2x_fastpath *fp_tx = &bp->fp[0];
10203         u16 tx_start_idx, tx_idx;
10204         u16 rx_start_idx, rx_idx;
10205         u16 pkt_prod, bd_prod;
10206         struct sw_tx_bd *tx_buf;
10207         struct eth_tx_start_bd *tx_start_bd;
10208         struct eth_tx_parse_bd *pbd = NULL;
10209         dma_addr_t mapping;
10210         union eth_rx_cqe *cqe;
10211         u8 cqe_fp_flags;
10212         struct sw_rx_bd *rx_buf;
10213         u16 len;
10214         int rc = -ENODEV;
10215
10216         /* check the loopback mode */
10217         switch (loopback_mode) {
10218         case BNX2X_PHY_LOOPBACK:
10219                 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
10220                         return -EINVAL;
10221                 break;
10222         case BNX2X_MAC_LOOPBACK:
10223                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
10224                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
10225                 break;
10226         default:
10227                 return -EINVAL;
10228         }
10229
10230         /* prepare the loopback packet */
10231         pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
10232                      bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
10233         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
10234         if (!skb) {
10235                 rc = -ENOMEM;
10236                 goto test_loopback_exit;
10237         }
10238         packet = skb_put(skb, pkt_size);
10239         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
10240         memset(packet + ETH_ALEN, 0, ETH_ALEN);
10241         memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
10242         for (i = ETH_HLEN; i < pkt_size; i++)
10243                 packet[i] = (unsigned char) (i & 0xff);
10244
10245         /* send the loopback packet */
10246         num_pkts = 0;
10247         tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10248         rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10249
10250         pkt_prod = fp_tx->tx_pkt_prod++;
10251         tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10252         tx_buf->first_bd = fp_tx->tx_bd_prod;
10253         tx_buf->skb = skb;
10254         tx_buf->flags = 0;
10255
10256         bd_prod = TX_BD(fp_tx->tx_bd_prod);
10257         tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
10258         mapping = pci_map_single(bp->pdev, skb->data,
10259                                  skb_headlen(skb), PCI_DMA_TODEVICE);
10260         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10261         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10262         tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10263         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10264         tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10265         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10266         tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10267                                 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10268
10269         /* turn on parsing and get a BD */
10270         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10271         pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10272
10273         memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10274
10275         wmb();
10276
10277         fp_tx->tx_db.data.prod += 2;
10278         barrier();
10279         DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
10280
10281         mmiowb();
10282
10283         num_pkts++;
10284         fp_tx->tx_bd_prod += 2; /* start + pbd */
10285         bp->dev->trans_start = jiffies;
10286
10287         udelay(100);
10288
10289         tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10290         if (tx_idx != tx_start_idx + num_pkts)
10291                 goto test_loopback_exit;
10292
10293         rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10294         if (rx_idx != rx_start_idx + num_pkts)
10295                 goto test_loopback_exit;
10296
10297         cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
10298         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10299         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10300                 goto test_loopback_rx_exit;
10301
10302         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10303         if (len != pkt_size)
10304                 goto test_loopback_rx_exit;
10305
10306         rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
10307         skb = rx_buf->skb;
10308         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10309         for (i = ETH_HLEN; i < pkt_size; i++)
10310                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10311                         goto test_loopback_rx_exit;
10312
10313         rc = 0;
10314
10315 test_loopback_rx_exit:
10316
10317         fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10318         fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10319         fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10320         fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
10321
10322         /* Update producers */
10323         bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10324                              fp_rx->rx_sge_prod);
10325
10326 test_loopback_exit:
10327         bp->link_params.loopback_mode = LOOPBACK_NONE;
10328
10329         return rc;
10330 }
10331
10332 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10333 {
10334         int rc = 0, res;
10335
10336         if (!netif_running(bp->dev))
10337                 return BNX2X_LOOPBACK_FAILED;
10338
10339         bnx2x_netif_stop(bp, 1);
10340         bnx2x_acquire_phy_lock(bp);
10341
10342         res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10343         if (res) {
10344                 DP(NETIF_MSG_PROBE, "  PHY loopback failed  (res %d)\n", res);
10345                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
10346         }
10347
10348         res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10349         if (res) {
10350                 DP(NETIF_MSG_PROBE, "  MAC loopback failed  (res %d)\n", res);
10351                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
10352         }
10353
10354         bnx2x_release_phy_lock(bp);
10355         bnx2x_netif_start(bp);
10356
10357         return rc;
10358 }
10359
10360 #define CRC32_RESIDUAL                  0xdebb20e3
10361
10362 static int bnx2x_test_nvram(struct bnx2x *bp)
10363 {
10364         static const struct {
10365                 int offset;
10366                 int size;
10367         } nvram_tbl[] = {
10368                 {     0,  0x14 }, /* bootstrap */
10369                 {  0x14,  0xec }, /* dir */
10370                 { 0x100, 0x350 }, /* manuf_info */
10371                 { 0x450,  0xf0 }, /* feature_info */
10372                 { 0x640,  0x64 }, /* upgrade_key_info */
10373                 { 0x6a4,  0x64 },
10374                 { 0x708,  0x70 }, /* manuf_key_info */
10375                 { 0x778,  0x70 },
10376                 {     0,     0 }
10377         };
10378         __be32 buf[0x350 / 4];
10379         u8 *data = (u8 *)buf;
10380         int i, rc;
10381         u32 magic, crc;
10382
10383         rc = bnx2x_nvram_read(bp, 0, data, 4);
10384         if (rc) {
10385                 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
10386                 goto test_nvram_exit;
10387         }
10388
10389         magic = be32_to_cpu(buf[0]);
10390         if (magic != 0x669955aa) {
10391                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10392                 rc = -ENODEV;
10393                 goto test_nvram_exit;
10394         }
10395
10396         for (i = 0; nvram_tbl[i].size; i++) {
10397
10398                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10399                                       nvram_tbl[i].size);
10400                 if (rc) {
10401                         DP(NETIF_MSG_PROBE,
10402                            "nvram_tbl[%d] read data (rc %d)\n", i, rc);
10403                         goto test_nvram_exit;
10404                 }
10405
10406                 crc = ether_crc_le(nvram_tbl[i].size, data);
10407                 if (crc != CRC32_RESIDUAL) {
10408                         DP(NETIF_MSG_PROBE,
10409                            "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
10410                         rc = -ENODEV;
10411                         goto test_nvram_exit;
10412                 }
10413         }
10414
10415 test_nvram_exit:
10416         return rc;
10417 }
10418
10419 static int bnx2x_test_intr(struct bnx2x *bp)
10420 {
10421         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10422         int i, rc;
10423
10424         if (!netif_running(bp->dev))
10425                 return -ENODEV;
10426
10427         config->hdr.length = 0;
10428         if (CHIP_IS_E1(bp))
10429                 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10430         else
10431                 config->hdr.offset = BP_FUNC(bp);
10432         config->hdr.client_id = bp->fp->cl_id;
10433         config->hdr.reserved1 = 0;
10434
10435         bp->set_mac_pending++;
10436         smp_wmb();
10437         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10438                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10439                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10440         if (rc == 0) {
10441                 for (i = 0; i < 10; i++) {
10442                         if (!bp->set_mac_pending)
10443                                 break;
10444                         smp_rmb();
10445                         msleep_interruptible(10);
10446                 }
10447                 if (i == 10)
10448                         rc = -ENODEV;
10449         }
10450
10451         return rc;
10452 }
10453
10454 static void bnx2x_self_test(struct net_device *dev,
10455                             struct ethtool_test *etest, u64 *buf)
10456 {
10457         struct bnx2x *bp = netdev_priv(dev);
10458
10459         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10460
10461         if (!netif_running(dev))
10462                 return;
10463
10464         /* offline tests are not supported in MF mode */
10465         if (IS_E1HMF(bp))
10466                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10467
10468         if (etest->flags & ETH_TEST_FL_OFFLINE) {
10469                 int port = BP_PORT(bp);
10470                 u32 val;
10471                 u8 link_up;
10472
10473                 /* save current value of input enable for TX port IF */
10474                 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10475                 /* disable input for TX port IF */
10476                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10477
10478                 link_up = (bnx2x_link_test(bp) == 0);
10479                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10480                 bnx2x_nic_load(bp, LOAD_DIAG);
10481                 /* wait until link state is restored */
10482                 bnx2x_wait_for_link(bp, link_up);
10483
10484                 if (bnx2x_test_registers(bp) != 0) {
10485                         buf[0] = 1;
10486                         etest->flags |= ETH_TEST_FL_FAILED;
10487                 }
10488                 if (bnx2x_test_memory(bp) != 0) {
10489                         buf[1] = 1;
10490                         etest->flags |= ETH_TEST_FL_FAILED;
10491                 }
10492                 buf[2] = bnx2x_test_loopback(bp, link_up);
10493                 if (buf[2] != 0)
10494                         etest->flags |= ETH_TEST_FL_FAILED;
10495
10496                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10497
10498                 /* restore input for TX port IF */
10499                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10500
10501                 bnx2x_nic_load(bp, LOAD_NORMAL);
10502                 /* wait until link state is restored */
10503                 bnx2x_wait_for_link(bp, link_up);
10504         }
10505         if (bnx2x_test_nvram(bp) != 0) {
10506                 buf[3] = 1;
10507                 etest->flags |= ETH_TEST_FL_FAILED;
10508         }
10509         if (bnx2x_test_intr(bp) != 0) {
10510                 buf[4] = 1;
10511                 etest->flags |= ETH_TEST_FL_FAILED;
10512         }
10513         if (bp->port.pmf)
10514                 if (bnx2x_link_test(bp) != 0) {
10515                         buf[5] = 1;
10516                         etest->flags |= ETH_TEST_FL_FAILED;
10517                 }
10518
10519 #ifdef BNX2X_EXTRA_DEBUG
10520         bnx2x_panic_dump(bp);
10521 #endif
10522 }
10523
10524 static const struct {
10525         long offset;
10526         int size;
10527         u8 string[ETH_GSTRING_LEN];
10528 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10529 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10530         { Q_STATS_OFFSET32(error_bytes_received_hi),
10531                                                 8, "[%d]: rx_error_bytes" },
10532         { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10533                                                 8, "[%d]: rx_ucast_packets" },
10534         { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10535                                                 8, "[%d]: rx_mcast_packets" },
10536         { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10537                                                 8, "[%d]: rx_bcast_packets" },
10538         { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10539         { Q_STATS_OFFSET32(rx_err_discard_pkt),
10540                                          4, "[%d]: rx_phy_ip_err_discards"},
10541         { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10542                                          4, "[%d]: rx_skb_alloc_discard" },
10543         { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10544
10545 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10546         { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10547                                                         8, "[%d]: tx_packets" }
10548 };
10549
10550 static const struct {
10551         long offset;
10552         int size;
10553         u32 flags;
10554 #define STATS_FLAGS_PORT                1
10555 #define STATS_FLAGS_FUNC                2
10556 #define STATS_FLAGS_BOTH                (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
10557         u8 string[ETH_GSTRING_LEN];
10558 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
10559 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10560                                 8, STATS_FLAGS_BOTH, "rx_bytes" },
10561         { STATS_OFFSET32(error_bytes_received_hi),
10562                                 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
10563         { STATS_OFFSET32(total_unicast_packets_received_hi),
10564                                 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
10565         { STATS_OFFSET32(total_multicast_packets_received_hi),
10566                                 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
10567         { STATS_OFFSET32(total_broadcast_packets_received_hi),
10568                                 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
10569         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
10570                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
10571         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
10572                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
10573         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10574                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10575         { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10576                                 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10577 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10578                                 8, STATS_FLAGS_PORT, "rx_fragments" },
10579         { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10580                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
10581         { STATS_OFFSET32(no_buff_discard_hi),
10582                                 8, STATS_FLAGS_BOTH, "rx_discards" },
10583         { STATS_OFFSET32(mac_filter_discard),
10584                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10585         { STATS_OFFSET32(xxoverflow_discard),
10586                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10587         { STATS_OFFSET32(brb_drop_hi),
10588                                 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10589         { STATS_OFFSET32(brb_truncate_hi),
10590                                 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10591         { STATS_OFFSET32(pause_frames_received_hi),
10592                                 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10593         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10594                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10595         { STATS_OFFSET32(nig_timer_max),
10596                         4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10597 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10598                                 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10599         { STATS_OFFSET32(rx_skb_alloc_failed),
10600                                 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10601         { STATS_OFFSET32(hw_csum_err),
10602                                 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10603
10604         { STATS_OFFSET32(total_bytes_transmitted_hi),
10605                                 8, STATS_FLAGS_BOTH, "tx_bytes" },
10606         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10607                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10608         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10609                                 8, STATS_FLAGS_BOTH, "tx_packets" },
10610         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10611                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10612         { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10613                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
10614         { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
10615                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
10616         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
10617                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
10618 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
10619                                 8, STATS_FLAGS_PORT, "tx_deferred" },
10620         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
10621                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
10622         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
10623                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
10624         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
10625                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
10626         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
10627                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
10628         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
10629                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
10630         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
10631                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
10632         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
10633                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
10634         { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
10635                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
10636         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
10637                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
10638 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
10639                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
10640         { STATS_OFFSET32(pause_frames_sent_hi),
10641                                 8, STATS_FLAGS_PORT, "tx_pause_frames" }
10642 };
10643
10644 #define IS_PORT_STAT(i) \
10645         ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10646 #define IS_FUNC_STAT(i)         (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10647 #define IS_E1HMF_MODE_STAT(bp) \
10648                         (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
10649
10650 static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10651 {
10652         struct bnx2x *bp = netdev_priv(dev);
10653         int i, num_stats;
10654
10655         switch(stringset) {
10656         case ETH_SS_STATS:
10657                 if (is_multi(bp)) {
10658                         num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
10659                         if (!IS_E1HMF_MODE_STAT(bp))
10660                                 num_stats += BNX2X_NUM_STATS;
10661                 } else {
10662                         if (IS_E1HMF_MODE_STAT(bp)) {
10663                                 num_stats = 0;
10664                                 for (i = 0; i < BNX2X_NUM_STATS; i++)
10665                                         if (IS_FUNC_STAT(i))
10666                                                 num_stats++;
10667                         } else
10668                                 num_stats = BNX2X_NUM_STATS;
10669                 }
10670                 return num_stats;
10671
10672         case ETH_SS_TEST:
10673                 return BNX2X_NUM_TESTS;
10674
10675         default:
10676                 return -EINVAL;
10677         }
10678 }
10679
10680 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10681 {
10682         struct bnx2x *bp = netdev_priv(dev);
10683         int i, j, k;
10684
10685         switch (stringset) {
10686         case ETH_SS_STATS:
10687                 if (is_multi(bp)) {
10688                         k = 0;
10689                         for_each_queue(bp, i) {
10690                                 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10691                                         sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10692                                                 bnx2x_q_stats_arr[j].string, i);
10693                                 k += BNX2X_NUM_Q_STATS;
10694                         }
10695                         if (IS_E1HMF_MODE_STAT(bp))
10696                                 break;
10697                         for (j = 0; j < BNX2X_NUM_STATS; j++)
10698                                 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10699                                        bnx2x_stats_arr[j].string);
10700                 } else {
10701                         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10702                                 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10703                                         continue;
10704                                 strcpy(buf + j*ETH_GSTRING_LEN,
10705                                        bnx2x_stats_arr[i].string);
10706                                 j++;
10707                         }
10708                 }
10709                 break;
10710
10711         case ETH_SS_TEST:
10712                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10713                 break;
10714         }
10715 }
10716
10717 static void bnx2x_get_ethtool_stats(struct net_device *dev,
10718                                     struct ethtool_stats *stats, u64 *buf)
10719 {
10720         struct bnx2x *bp = netdev_priv(dev);
10721         u32 *hw_stats, *offset;
10722         int i, j, k;
10723
10724         if (is_multi(bp)) {
10725                 k = 0;
10726                 for_each_queue(bp, i) {
10727                         hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10728                         for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10729                                 if (bnx2x_q_stats_arr[j].size == 0) {
10730                                         /* skip this counter */
10731                                         buf[k + j] = 0;
10732                                         continue;
10733                                 }
10734                                 offset = (hw_stats +
10735                                           bnx2x_q_stats_arr[j].offset);
10736                                 if (bnx2x_q_stats_arr[j].size == 4) {
10737                                         /* 4-byte counter */
10738                                         buf[k + j] = (u64) *offset;
10739                                         continue;
10740                                 }
10741                                 /* 8-byte counter */
10742                                 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10743                         }
10744                         k += BNX2X_NUM_Q_STATS;
10745                 }
10746                 if (IS_E1HMF_MODE_STAT(bp))
10747                         return;
10748                 hw_stats = (u32 *)&bp->eth_stats;
10749                 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10750                         if (bnx2x_stats_arr[j].size == 0) {
10751                                 /* skip this counter */
10752                                 buf[k + j] = 0;
10753                                 continue;
10754                         }
10755                         offset = (hw_stats + bnx2x_stats_arr[j].offset);
10756                         if (bnx2x_stats_arr[j].size == 4) {
10757                                 /* 4-byte counter */
10758                                 buf[k + j] = (u64) *offset;
10759                                 continue;
10760                         }
10761                         /* 8-byte counter */
10762                         buf[k + j] = HILO_U64(*offset, *(offset + 1));
10763                 }
10764         } else {
10765                 hw_stats = (u32 *)&bp->eth_stats;
10766                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10767                         if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10768                                 continue;
10769                         if (bnx2x_stats_arr[i].size == 0) {
10770                                 /* skip this counter */
10771                                 buf[j] = 0;
10772                                 j++;
10773                                 continue;
10774                         }
10775                         offset = (hw_stats + bnx2x_stats_arr[i].offset);
10776                         if (bnx2x_stats_arr[i].size == 4) {
10777                                 /* 4-byte counter */
10778                                 buf[j] = (u64) *offset;
10779                                 j++;
10780                                 continue;
10781                         }
10782                         /* 8-byte counter */
10783                         buf[j] = HILO_U64(*offset, *(offset + 1));
10784                         j++;
10785                 }
10786         }
10787 }
10788
10789 static int bnx2x_phys_id(struct net_device *dev, u32 data)
10790 {
10791         struct bnx2x *bp = netdev_priv(dev);
10792         int i;
10793
10794         if (!netif_running(dev))
10795                 return 0;
10796
10797         if (!bp->port.pmf)
10798                 return 0;
10799
10800         if (data == 0)
10801                 data = 2;
10802
10803         for (i = 0; i < (data * 2); i++) {
10804                 if ((i % 2) == 0)
10805                         bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10806                                       SPEED_1000);
10807                 else
10808                         bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
10809
10810                 msleep_interruptible(500);
10811                 if (signal_pending(current))
10812                         break;
10813         }
10814
10815         if (bp->link_vars.link_up)
10816                 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10817                               bp->link_vars.line_speed);
10818
10819         return 0;
10820 }
10821
10822 static const struct ethtool_ops bnx2x_ethtool_ops = {
10823         .get_settings           = bnx2x_get_settings,
10824         .set_settings           = bnx2x_set_settings,
10825         .get_drvinfo            = bnx2x_get_drvinfo,
10826         .get_regs_len           = bnx2x_get_regs_len,
10827         .get_regs               = bnx2x_get_regs,
10828         .get_wol                = bnx2x_get_wol,
10829         .set_wol                = bnx2x_set_wol,
10830         .get_msglevel           = bnx2x_get_msglevel,
10831         .set_msglevel           = bnx2x_set_msglevel,
10832         .nway_reset             = bnx2x_nway_reset,
10833         .get_link               = bnx2x_get_link,
10834         .get_eeprom_len         = bnx2x_get_eeprom_len,
10835         .get_eeprom             = bnx2x_get_eeprom,
10836         .set_eeprom             = bnx2x_set_eeprom,
10837         .get_coalesce           = bnx2x_get_coalesce,
10838         .set_coalesce           = bnx2x_set_coalesce,
10839         .get_ringparam          = bnx2x_get_ringparam,
10840         .set_ringparam          = bnx2x_set_ringparam,
10841         .get_pauseparam         = bnx2x_get_pauseparam,
10842         .set_pauseparam         = bnx2x_set_pauseparam,
10843         .get_rx_csum            = bnx2x_get_rx_csum,
10844         .set_rx_csum            = bnx2x_set_rx_csum,
10845         .get_tx_csum            = ethtool_op_get_tx_csum,
10846         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
10847         .set_flags              = bnx2x_set_flags,
10848         .get_flags              = ethtool_op_get_flags,
10849         .get_sg                 = ethtool_op_get_sg,
10850         .set_sg                 = ethtool_op_set_sg,
10851         .get_tso                = ethtool_op_get_tso,
10852         .set_tso                = bnx2x_set_tso,
10853         .self_test              = bnx2x_self_test,
10854         .get_sset_count         = bnx2x_get_sset_count,
10855         .get_strings            = bnx2x_get_strings,
10856         .phys_id                = bnx2x_phys_id,
10857         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
10858 };
10859
10860 /* end of ethtool_ops */
10861
10862 /****************************************************************************
10863 * General service functions
10864 ****************************************************************************/
10865
10866 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10867 {
10868         u16 pmcsr;
10869
10870         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10871
10872         switch (state) {
10873         case PCI_D0:
10874                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10875                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10876                                        PCI_PM_CTRL_PME_STATUS));
10877
10878                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10879                         /* delay required during transition out of D3hot */
10880                         msleep(20);
10881                 break;
10882
10883         case PCI_D3hot:
10884                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10885                 pmcsr |= 3;
10886
10887                 if (bp->wol)
10888                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10889
10890                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10891                                       pmcsr);
10892
10893                 /* No more memory access after this point until
10894                 * device is brought back to D0.
10895                 */
10896                 break;
10897
10898         default:
10899                 return -EINVAL;
10900         }
10901         return 0;
10902 }
10903
10904 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10905 {
10906         u16 rx_cons_sb;
10907
10908         /* Tell compiler that status block fields can change */
10909         barrier();
10910         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10911         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10912                 rx_cons_sb++;
10913         return (fp->rx_comp_cons != rx_cons_sb);
10914 }
10915
10916 /*
10917  * net_device service functions
10918  */
10919
10920 static int bnx2x_poll(struct napi_struct *napi, int budget)
10921 {
10922         int work_done = 0;
10923         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10924                                                  napi);
10925         struct bnx2x *bp = fp->bp;
10926
10927         while (1) {
10928 #ifdef BNX2X_STOP_ON_ERROR
10929                 if (unlikely(bp->panic)) {
10930                         napi_complete(napi);
10931                         return 0;
10932                 }
10933 #endif
10934
10935                 if (bnx2x_has_tx_work(fp))
10936                         bnx2x_tx_int(fp);
10937
10938                 if (bnx2x_has_rx_work(fp)) {
10939                         work_done += bnx2x_rx_int(fp, budget - work_done);
10940
10941                         /* must not complete if we consumed full budget */
10942                         if (work_done >= budget)
10943                                 break;
10944                 }
10945
10946                 /* Fall out from the NAPI loop if needed */
10947                 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
10948                         bnx2x_update_fpsb_idx(fp);
10949                 /* bnx2x_has_rx_work() reads the status block, thus we need
10950                  * to ensure that status block indices have been actually read
10951                  * (bnx2x_update_fpsb_idx) prior to this check
10952                  * (bnx2x_has_rx_work) so that we won't write the "newer"
10953                  * value of the status block to IGU (if there was a DMA right
10954                  * after bnx2x_has_rx_work and if there is no rmb, the memory
10955                  * reading (bnx2x_update_fpsb_idx) may be postponed to right
10956                  * before bnx2x_ack_sb). In this case there will never be
10957                  * another interrupt until there is another update of the
10958                  * status block, while there is still unhandled work.
10959                  */
10960                         rmb();
10961
10962                         if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
10963                                 napi_complete(napi);
10964                                 /* Re-enable interrupts */
10965                                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10966                                              le16_to_cpu(fp->fp_c_idx),
10967                                              IGU_INT_NOP, 1);
10968                                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10969                                              le16_to_cpu(fp->fp_u_idx),
10970                                              IGU_INT_ENABLE, 1);
10971                                 break;
10972                         }
10973                 }
10974         }
10975
10976         return work_done;
10977 }
10978
10979
10980 /* we split the first BD into headers and data BDs
10981  * to ease the pain of our fellow microcode engineers
10982  * we use one mapping for both BDs
10983  * So far this has only been observed to happen
10984  * in Other Operating Systems(TM)
10985  */
10986 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10987                                    struct bnx2x_fastpath *fp,
10988                                    struct sw_tx_bd *tx_buf,
10989                                    struct eth_tx_start_bd **tx_bd, u16 hlen,
10990                                    u16 bd_prod, int nbd)
10991 {
10992         struct eth_tx_start_bd *h_tx_bd = *tx_bd;
10993         struct eth_tx_bd *d_tx_bd;
10994         dma_addr_t mapping;
10995         int old_len = le16_to_cpu(h_tx_bd->nbytes);
10996
10997         /* first fix first BD */
10998         h_tx_bd->nbd = cpu_to_le16(nbd);
10999         h_tx_bd->nbytes = cpu_to_le16(hlen);
11000
11001         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
11002            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
11003            h_tx_bd->addr_lo, h_tx_bd->nbd);
11004
11005         /* now get a new data BD
11006          * (after the pbd) and fill it */
11007         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11008         d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11009
11010         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
11011                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
11012
11013         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11014         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11015         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
11016
11017         /* this marks the BD as one that has no individual mapping */
11018         tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
11019
11020         DP(NETIF_MSG_TX_QUEUED,
11021            "TSO split data size is %d (%x:%x)\n",
11022            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
11023
11024         /* update tx_bd */
11025         *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
11026
11027         return bd_prod;
11028 }
11029
11030 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
11031 {
11032         if (fix > 0)
11033                 csum = (u16) ~csum_fold(csum_sub(csum,
11034                                 csum_partial(t_header - fix, fix, 0)));
11035
11036         else if (fix < 0)
11037                 csum = (u16) ~csum_fold(csum_add(csum,
11038                                 csum_partial(t_header, -fix, 0)));
11039
11040         return swab16(csum);
11041 }
11042
11043 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
11044 {
11045         u32 rc;
11046
11047         if (skb->ip_summed != CHECKSUM_PARTIAL)
11048                 rc = XMIT_PLAIN;
11049
11050         else {
11051                 if (skb->protocol == htons(ETH_P_IPV6)) {
11052                         rc = XMIT_CSUM_V6;
11053                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
11054                                 rc |= XMIT_CSUM_TCP;
11055
11056                 } else {
11057                         rc = XMIT_CSUM_V4;
11058                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
11059                                 rc |= XMIT_CSUM_TCP;
11060                 }
11061         }
11062
11063         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
11064                 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
11065
11066         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
11067                 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
11068
11069         return rc;
11070 }
11071
11072 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
11073 /* check if packet requires linearization (packet is too fragmented)
11074    no need to check fragmentation if page size > 8K (there will be no
11075    violation to FW restrictions) */
11076 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
11077                              u32 xmit_type)
11078 {
11079         int to_copy = 0;
11080         int hlen = 0;
11081         int first_bd_sz = 0;
11082
11083         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
11084         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
11085
11086                 if (xmit_type & XMIT_GSO) {
11087                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
11088                         /* Check if LSO packet needs to be copied:
11089                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
11090                         int wnd_size = MAX_FETCH_BD - 3;
11091                         /* Number of windows to check */
11092                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
11093                         int wnd_idx = 0;
11094                         int frag_idx = 0;
11095                         u32 wnd_sum = 0;
11096
11097                         /* Headers length */
11098                         hlen = (int)(skb_transport_header(skb) - skb->data) +
11099                                 tcp_hdrlen(skb);
11100
11101                         /* Amount of data (w/o headers) on linear part of SKB*/
11102                         first_bd_sz = skb_headlen(skb) - hlen;
11103
11104                         wnd_sum  = first_bd_sz;
11105
11106                         /* Calculate the first sum - it's special */
11107                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
11108                                 wnd_sum +=
11109                                         skb_shinfo(skb)->frags[frag_idx].size;
11110
11111                         /* If there was data on linear skb data - check it */
11112                         if (first_bd_sz > 0) {
11113                                 if (unlikely(wnd_sum < lso_mss)) {
11114                                         to_copy = 1;
11115                                         goto exit_lbl;
11116                                 }
11117
11118                                 wnd_sum -= first_bd_sz;
11119                         }
11120
11121                         /* Others are easier: run through the frag list and
11122                            check all windows */
11123                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
11124                                 wnd_sum +=
11125                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
11126
11127                                 if (unlikely(wnd_sum < lso_mss)) {
11128                                         to_copy = 1;
11129                                         break;
11130                                 }
11131                                 wnd_sum -=
11132                                         skb_shinfo(skb)->frags[wnd_idx].size;
11133                         }
11134                 } else {
11135                         /* in non-LSO too fragmented packet should always
11136                            be linearized */
11137                         to_copy = 1;
11138                 }
11139         }
11140
11141 exit_lbl:
11142         if (unlikely(to_copy))
11143                 DP(NETIF_MSG_TX_QUEUED,
11144                    "Linearization IS REQUIRED for %s packet. "
11145                    "num_frags %d  hlen %d  first_bd_sz %d\n",
11146                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
11147                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
11148
11149         return to_copy;
11150 }
11151 #endif
11152
11153 /* called with netif_tx_lock
11154  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
11155  * netif_wake_queue()
11156  */
11157 static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11158 {
11159         struct bnx2x *bp = netdev_priv(dev);
11160         struct bnx2x_fastpath *fp;
11161         struct netdev_queue *txq;
11162         struct sw_tx_bd *tx_buf;
11163         struct eth_tx_start_bd *tx_start_bd;
11164         struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
11165         struct eth_tx_parse_bd *pbd = NULL;
11166         u16 pkt_prod, bd_prod;
11167         int nbd, fp_index;
11168         dma_addr_t mapping;
11169         u32 xmit_type = bnx2x_xmit_type(bp, skb);
11170         int i;
11171         u8 hlen = 0;
11172         __le16 pkt_size = 0;
11173
11174 #ifdef BNX2X_STOP_ON_ERROR
11175         if (unlikely(bp->panic))
11176                 return NETDEV_TX_BUSY;
11177 #endif
11178
11179         fp_index = skb_get_queue_mapping(skb);
11180         txq = netdev_get_tx_queue(dev, fp_index);
11181
11182         fp = &bp->fp[fp_index];
11183
11184         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
11185                 fp->eth_q_stats.driver_xoff++;
11186                 netif_tx_stop_queue(txq);
11187                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
11188                 return NETDEV_TX_BUSY;
11189         }
11190
11191         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
11192            "  gso type %x  xmit_type %x\n",
11193            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
11194            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
11195
11196 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
11197         /* First, check if we need to linearize the skb (due to FW
11198            restrictions). No need to check fragmentation if page size > 8K
11199            (there will be no violation to FW restrictions) */
11200         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
11201                 /* Statistics of linearization */
11202                 bp->lin_cnt++;
11203                 if (skb_linearize(skb) != 0) {
11204                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
11205                            "silently dropping this SKB\n");
11206                         dev_kfree_skb_any(skb);
11207                         return NETDEV_TX_OK;
11208                 }
11209         }
11210 #endif
11211
11212         /*
11213         Please read carefully. First we use one BD which we mark as start,
11214         then we have a parsing info BD (used for TSO or xsum),
11215         and only then we have the rest of the TSO BDs.
11216         (don't forget to mark the last one as last,
11217         and to unmap only AFTER you write to the BD ...)
11218         And above all, all pdb sizes are in words - NOT DWORDS!
11219         */
11220
11221         pkt_prod = fp->tx_pkt_prod++;
11222         bd_prod = TX_BD(fp->tx_bd_prod);
11223
11224         /* get a tx_buf and first BD */
11225         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
11226         tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
11227
11228         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11229         tx_start_bd->general_data = (UNICAST_ADDRESS <<
11230                                      ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
11231         /* header nbd */
11232         tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
11233
11234         /* remember the first BD of the packet */
11235         tx_buf->first_bd = fp->tx_bd_prod;
11236         tx_buf->skb = skb;
11237         tx_buf->flags = 0;
11238
11239         DP(NETIF_MSG_TX_QUEUED,
11240            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
11241            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
11242
11243 #ifdef BCM_VLAN
11244         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
11245             (bp->flags & HW_VLAN_TX_FLAG)) {
11246                 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
11247                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
11248         } else
11249 #endif
11250                 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11251
11252         /* turn on parsing and get a BD */
11253         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11254         pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
11255
11256         memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
11257
11258         if (xmit_type & XMIT_CSUM) {
11259                 hlen = (skb_network_header(skb) - skb->data) / 2;
11260
11261                 /* for now NS flag is not used in Linux */
11262                 pbd->global_data =
11263                         (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11264                                  ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
11265
11266                 pbd->ip_hlen = (skb_transport_header(skb) -
11267                                 skb_network_header(skb)) / 2;
11268
11269                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
11270
11271                 pbd->total_hlen = cpu_to_le16(hlen);
11272                 hlen = hlen*2;
11273
11274                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
11275
11276                 if (xmit_type & XMIT_CSUM_V4)
11277                         tx_start_bd->bd_flags.as_bitfield |=
11278                                                 ETH_TX_BD_FLAGS_IP_CSUM;
11279                 else
11280                         tx_start_bd->bd_flags.as_bitfield |=
11281                                                 ETH_TX_BD_FLAGS_IPV6;
11282
11283                 if (xmit_type & XMIT_CSUM_TCP) {
11284                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11285
11286                 } else {
11287                         s8 fix = SKB_CS_OFF(skb); /* signed! */
11288
11289                         pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
11290
11291                         DP(NETIF_MSG_TX_QUEUED,
11292                            "hlen %d  fix %d  csum before fix %x\n",
11293                            le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
11294
11295                         /* HW bug: fixup the CSUM */
11296                         pbd->tcp_pseudo_csum =
11297                                 bnx2x_csum_fix(skb_transport_header(skb),
11298                                                SKB_CS(skb), fix);
11299
11300                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11301                            pbd->tcp_pseudo_csum);
11302                 }
11303         }
11304
11305         mapping = pci_map_single(bp->pdev, skb->data,
11306                                  skb_headlen(skb), PCI_DMA_TODEVICE);
11307
11308         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11309         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11310         nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11311         tx_start_bd->nbd = cpu_to_le16(nbd);
11312         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11313         pkt_size = tx_start_bd->nbytes;
11314
11315         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
11316            "  nbytes %d  flags %x  vlan %x\n",
11317            tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11318            le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11319            tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
11320
11321         if (xmit_type & XMIT_GSO) {
11322
11323                 DP(NETIF_MSG_TX_QUEUED,
11324                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
11325                    skb->len, hlen, skb_headlen(skb),
11326                    skb_shinfo(skb)->gso_size);
11327
11328                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
11329
11330                 if (unlikely(skb_headlen(skb) > hlen))
11331                         bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11332                                                  hlen, bd_prod, ++nbd);
11333
11334                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11335                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
11336                 pbd->tcp_flags = pbd_tcp_flags(skb);
11337
11338                 if (xmit_type & XMIT_GSO_V4) {
11339                         pbd->ip_id = swab16(ip_hdr(skb)->id);
11340                         pbd->tcp_pseudo_csum =
11341                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11342                                                           ip_hdr(skb)->daddr,
11343                                                           0, IPPROTO_TCP, 0));
11344
11345                 } else
11346                         pbd->tcp_pseudo_csum =
11347                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11348                                                         &ipv6_hdr(skb)->daddr,
11349                                                         0, IPPROTO_TCP, 0));
11350
11351                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11352         }
11353         tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
11354
11355         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11356                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
11357
11358                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11359                 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11360                 if (total_pkt_bd == NULL)
11361                         total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11362
11363                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11364                                        frag->size, PCI_DMA_TODEVICE);
11365
11366                 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11367                 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11368                 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11369                 le16_add_cpu(&pkt_size, frag->size);
11370
11371                 DP(NETIF_MSG_TX_QUEUED,
11372                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
11373                    i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11374                    le16_to_cpu(tx_data_bd->nbytes));
11375         }
11376
11377         DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
11378
11379         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11380
11381         /* now send a tx doorbell, counting the next BD
11382          * if the packet contains or ends with it
11383          */
11384         if (TX_BD_POFF(bd_prod) < nbd)
11385                 nbd++;
11386
11387         if (total_pkt_bd != NULL)
11388                 total_pkt_bd->total_pkt_bytes = pkt_size;
11389
11390         if (pbd)
11391                 DP(NETIF_MSG_TX_QUEUED,
11392                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
11393                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
11394                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11395                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
11396                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
11397
11398         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
11399
11400         /*
11401          * Make sure that the BD data is updated before updating the producer
11402          * since FW might read the BD right after the producer is updated.
11403          * This is only applicable for weak-ordered memory model archs such
11404          * as IA-64. The following barrier is also mandatory since FW will
11405          * assumes packets must have BDs.
11406          */
11407         wmb();
11408
11409         fp->tx_db.data.prod += nbd;
11410         barrier();
11411         DOORBELL(bp, fp->index, fp->tx_db.raw);
11412
11413         mmiowb();
11414
11415         fp->tx_bd_prod += nbd;
11416
11417         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
11418                 netif_tx_stop_queue(txq);
11419                 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11420                    if we put Tx into XOFF state. */
11421                 smp_mb();
11422                 fp->eth_q_stats.driver_xoff++;
11423                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
11424                         netif_tx_wake_queue(txq);
11425         }
11426         fp->tx_pkt++;
11427
11428         return NETDEV_TX_OK;
11429 }
11430
11431 /* called with rtnl_lock */
11432 static int bnx2x_open(struct net_device *dev)
11433 {
11434         struct bnx2x *bp = netdev_priv(dev);
11435
11436         netif_carrier_off(dev);
11437
11438         bnx2x_set_power_state(bp, PCI_D0);
11439
11440         return bnx2x_nic_load(bp, LOAD_OPEN);
11441 }
11442
11443 /* called with rtnl_lock */
11444 static int bnx2x_close(struct net_device *dev)
11445 {
11446         struct bnx2x *bp = netdev_priv(dev);
11447
11448         /* Unload the driver, release IRQs */
11449         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11450         if (atomic_read(&bp->pdev->enable_cnt) == 1)
11451                 if (!CHIP_REV_IS_SLOW(bp))
11452                         bnx2x_set_power_state(bp, PCI_D3hot);
11453
11454         return 0;
11455 }
11456
11457 /* called with netif_tx_lock from dev_mcast.c */
11458 static void bnx2x_set_rx_mode(struct net_device *dev)
11459 {
11460         struct bnx2x *bp = netdev_priv(dev);
11461         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11462         int port = BP_PORT(bp);
11463
11464         if (bp->state != BNX2X_STATE_OPEN) {
11465                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11466                 return;
11467         }
11468
11469         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11470
11471         if (dev->flags & IFF_PROMISC)
11472                 rx_mode = BNX2X_RX_MODE_PROMISC;
11473
11474         else if ((dev->flags & IFF_ALLMULTI) ||
11475                  ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11476                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11477
11478         else { /* some multicasts */
11479                 if (CHIP_IS_E1(bp)) {
11480                         int i, old, offset;
11481                         struct dev_mc_list *mclist;
11482                         struct mac_configuration_cmd *config =
11483                                                 bnx2x_sp(bp, mcast_config);
11484
11485                         for (i = 0, mclist = dev->mc_list;
11486                              mclist && (i < dev->mc_count);
11487                              i++, mclist = mclist->next) {
11488
11489                                 config->config_table[i].
11490                                         cam_entry.msb_mac_addr =
11491                                         swab16(*(u16 *)&mclist->dmi_addr[0]);
11492                                 config->config_table[i].
11493                                         cam_entry.middle_mac_addr =
11494                                         swab16(*(u16 *)&mclist->dmi_addr[2]);
11495                                 config->config_table[i].
11496                                         cam_entry.lsb_mac_addr =
11497                                         swab16(*(u16 *)&mclist->dmi_addr[4]);
11498                                 config->config_table[i].cam_entry.flags =
11499                                                         cpu_to_le16(port);
11500                                 config->config_table[i].
11501                                         target_table_entry.flags = 0;
11502                                 config->config_table[i].target_table_entry.
11503                                         clients_bit_vector =
11504                                                 cpu_to_le32(1 << BP_L_ID(bp));
11505                                 config->config_table[i].
11506                                         target_table_entry.vlan_id = 0;
11507
11508                                 DP(NETIF_MSG_IFUP,
11509                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11510                                    config->config_table[i].
11511                                                 cam_entry.msb_mac_addr,
11512                                    config->config_table[i].
11513                                                 cam_entry.middle_mac_addr,
11514                                    config->config_table[i].
11515                                                 cam_entry.lsb_mac_addr);
11516                         }
11517                         old = config->hdr.length;
11518                         if (old > i) {
11519                                 for (; i < old; i++) {
11520                                         if (CAM_IS_INVALID(config->
11521                                                            config_table[i])) {
11522                                                 /* already invalidated */
11523                                                 break;
11524                                         }
11525                                         /* invalidate */
11526                                         CAM_INVALIDATE(config->
11527                                                        config_table[i]);
11528                                 }
11529                         }
11530
11531                         if (CHIP_REV_IS_SLOW(bp))
11532                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11533                         else
11534                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
11535
11536                         config->hdr.length = i;
11537                         config->hdr.offset = offset;
11538                         config->hdr.client_id = bp->fp->cl_id;
11539                         config->hdr.reserved1 = 0;
11540
11541                         bp->set_mac_pending++;
11542                         smp_wmb();
11543
11544                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11545                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11546                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11547                                       0);
11548                 } else { /* E1H */
11549                         /* Accept one or more multicasts */
11550                         struct dev_mc_list *mclist;
11551                         u32 mc_filter[MC_HASH_SIZE];
11552                         u32 crc, bit, regidx;
11553                         int i;
11554
11555                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11556
11557                         for (i = 0, mclist = dev->mc_list;
11558                              mclist && (i < dev->mc_count);
11559                              i++, mclist = mclist->next) {
11560
11561                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11562                                    mclist->dmi_addr);
11563
11564                                 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11565                                 bit = (crc >> 24) & 0xff;
11566                                 regidx = bit >> 5;
11567                                 bit &= 0x1f;
11568                                 mc_filter[regidx] |= (1 << bit);
11569                         }
11570
11571                         for (i = 0; i < MC_HASH_SIZE; i++)
11572                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11573                                        mc_filter[i]);
11574                 }
11575         }
11576
11577         bp->rx_mode = rx_mode;
11578         bnx2x_set_storm_rx_mode(bp);
11579 }
11580
11581 /* called with rtnl_lock */
11582 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11583 {
11584         struct sockaddr *addr = p;
11585         struct bnx2x *bp = netdev_priv(dev);
11586
11587         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
11588                 return -EINVAL;
11589
11590         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
11591         if (netif_running(dev)) {
11592                 if (CHIP_IS_E1(bp))
11593                         bnx2x_set_eth_mac_addr_e1(bp, 1);
11594                 else
11595                         bnx2x_set_eth_mac_addr_e1h(bp, 1);
11596         }
11597
11598         return 0;
11599 }
11600
11601 /* called with rtnl_lock */
11602 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11603                            int devad, u16 addr)
11604 {
11605         struct bnx2x *bp = netdev_priv(netdev);
11606         u16 value;
11607         int rc;
11608         u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11609
11610         DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11611            prtad, devad, addr);
11612
11613         if (prtad != bp->mdio.prtad) {
11614                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11615                    prtad, bp->mdio.prtad);
11616                 return -EINVAL;
11617         }
11618
11619         /* The HW expects different devad if CL22 is used */
11620         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11621
11622         bnx2x_acquire_phy_lock(bp);
11623         rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11624                              devad, addr, &value);
11625         bnx2x_release_phy_lock(bp);
11626         DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
11627
11628         if (!rc)
11629                 rc = value;
11630         return rc;
11631 }
11632
11633 /* called with rtnl_lock */
11634 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11635                             u16 addr, u16 value)
11636 {
11637         struct bnx2x *bp = netdev_priv(netdev);
11638         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11639         int rc;
11640
11641         DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11642                            " value 0x%x\n", prtad, devad, addr, value);
11643
11644         if (prtad != bp->mdio.prtad) {
11645                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11646                    prtad, bp->mdio.prtad);
11647                 return -EINVAL;
11648         }
11649
11650         /* The HW expects different devad if CL22 is used */
11651         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11652
11653         bnx2x_acquire_phy_lock(bp);
11654         rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11655                               devad, addr, value);
11656         bnx2x_release_phy_lock(bp);
11657         return rc;
11658 }
11659
11660 /* called with rtnl_lock */
11661 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11662 {
11663         struct bnx2x *bp = netdev_priv(dev);
11664         struct mii_ioctl_data *mdio = if_mii(ifr);
11665
11666         DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11667            mdio->phy_id, mdio->reg_num, mdio->val_in);
11668
11669         if (!netif_running(dev))
11670                 return -EAGAIN;
11671
11672         return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
11673 }
11674
11675 /* called with rtnl_lock */
11676 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11677 {
11678         struct bnx2x *bp = netdev_priv(dev);
11679         int rc = 0;
11680
11681         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11682             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11683                 return -EINVAL;
11684
11685         /* This does not race with packet allocation
11686          * because the actual alloc size is
11687          * only updated as part of load
11688          */
11689         dev->mtu = new_mtu;
11690
11691         if (netif_running(dev)) {
11692                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11693                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11694         }
11695
11696         return rc;
11697 }
11698
11699 static void bnx2x_tx_timeout(struct net_device *dev)
11700 {
11701         struct bnx2x *bp = netdev_priv(dev);
11702
11703 #ifdef BNX2X_STOP_ON_ERROR
11704         if (!bp->panic)
11705                 bnx2x_panic();
11706 #endif
11707         /* This allows the netif to be shutdown gracefully before resetting */
11708         schedule_work(&bp->reset_task);
11709 }
11710
11711 #ifdef BCM_VLAN
11712 /* called with rtnl_lock */
11713 static void bnx2x_vlan_rx_register(struct net_device *dev,
11714                                    struct vlan_group *vlgrp)
11715 {
11716         struct bnx2x *bp = netdev_priv(dev);
11717
11718         bp->vlgrp = vlgrp;
11719
11720         /* Set flags according to the required capabilities */
11721         bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11722
11723         if (dev->features & NETIF_F_HW_VLAN_TX)
11724                 bp->flags |= HW_VLAN_TX_FLAG;
11725
11726         if (dev->features & NETIF_F_HW_VLAN_RX)
11727                 bp->flags |= HW_VLAN_RX_FLAG;
11728
11729         if (netif_running(dev))
11730                 bnx2x_set_client_config(bp);
11731 }
11732
11733 #endif
11734
11735 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11736 static void poll_bnx2x(struct net_device *dev)
11737 {
11738         struct bnx2x *bp = netdev_priv(dev);
11739
11740         disable_irq(bp->pdev->irq);
11741         bnx2x_interrupt(bp->pdev->irq, dev);
11742         enable_irq(bp->pdev->irq);
11743 }
11744 #endif
11745
11746 static const struct net_device_ops bnx2x_netdev_ops = {
11747         .ndo_open               = bnx2x_open,
11748         .ndo_stop               = bnx2x_close,
11749         .ndo_start_xmit         = bnx2x_start_xmit,
11750         .ndo_set_multicast_list = bnx2x_set_rx_mode,
11751         .ndo_set_mac_address    = bnx2x_change_mac_addr,
11752         .ndo_validate_addr      = eth_validate_addr,
11753         .ndo_do_ioctl           = bnx2x_ioctl,
11754         .ndo_change_mtu         = bnx2x_change_mtu,
11755         .ndo_tx_timeout         = bnx2x_tx_timeout,
11756 #ifdef BCM_VLAN
11757         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
11758 #endif
11759 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11760         .ndo_poll_controller    = poll_bnx2x,
11761 #endif
11762 };
11763
11764 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11765                                     struct net_device *dev)
11766 {
11767         struct bnx2x *bp;
11768         int rc;
11769
11770         SET_NETDEV_DEV(dev, &pdev->dev);
11771         bp = netdev_priv(dev);
11772
11773         bp->dev = dev;
11774         bp->pdev = pdev;
11775         bp->flags = 0;
11776         bp->func = PCI_FUNC(pdev->devfn);
11777
11778         rc = pci_enable_device(pdev);
11779         if (rc) {
11780                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11781                 goto err_out;
11782         }
11783
11784         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11785                 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11786                        " aborting\n");
11787                 rc = -ENODEV;
11788                 goto err_out_disable;
11789         }
11790
11791         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11792                 printk(KERN_ERR PFX "Cannot find second PCI device"
11793                        " base address, aborting\n");
11794                 rc = -ENODEV;
11795                 goto err_out_disable;
11796         }
11797
11798         if (atomic_read(&pdev->enable_cnt) == 1) {
11799                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11800                 if (rc) {
11801                         printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11802                                " aborting\n");
11803                         goto err_out_disable;
11804                 }
11805
11806                 pci_set_master(pdev);
11807                 pci_save_state(pdev);
11808         }
11809
11810         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11811         if (bp->pm_cap == 0) {
11812                 printk(KERN_ERR PFX "Cannot find power management"
11813                        " capability, aborting\n");
11814                 rc = -EIO;
11815                 goto err_out_release;
11816         }
11817
11818         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11819         if (bp->pcie_cap == 0) {
11820                 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11821                        " aborting\n");
11822                 rc = -EIO;
11823                 goto err_out_release;
11824         }
11825
11826         if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
11827                 bp->flags |= USING_DAC_FLAG;
11828                 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
11829                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11830                                " failed, aborting\n");
11831                         rc = -EIO;
11832                         goto err_out_release;
11833                 }
11834
11835         } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
11836                 printk(KERN_ERR PFX "System does not support DMA,"
11837                        " aborting\n");
11838                 rc = -EIO;
11839                 goto err_out_release;
11840         }
11841
11842         dev->mem_start = pci_resource_start(pdev, 0);
11843         dev->base_addr = dev->mem_start;
11844         dev->mem_end = pci_resource_end(pdev, 0);
11845
11846         dev->irq = pdev->irq;
11847
11848         bp->regview = pci_ioremap_bar(pdev, 0);
11849         if (!bp->regview) {
11850                 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11851                 rc = -ENOMEM;
11852                 goto err_out_release;
11853         }
11854
11855         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11856                                         min_t(u64, BNX2X_DB_SIZE,
11857                                               pci_resource_len(pdev, 2)));
11858         if (!bp->doorbells) {
11859                 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11860                 rc = -ENOMEM;
11861                 goto err_out_unmap;
11862         }
11863
11864         bnx2x_set_power_state(bp, PCI_D0);
11865
11866         /* clean indirect addresses */
11867         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11868                                PCICFG_VENDOR_ID_OFFSET);
11869         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11870         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11871         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11872         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11873
11874         dev->watchdog_timeo = TX_TIMEOUT;
11875
11876         dev->netdev_ops = &bnx2x_netdev_ops;
11877         dev->ethtool_ops = &bnx2x_ethtool_ops;
11878         dev->features |= NETIF_F_SG;
11879         dev->features |= NETIF_F_HW_CSUM;
11880         if (bp->flags & USING_DAC_FLAG)
11881                 dev->features |= NETIF_F_HIGHDMA;
11882         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11883         dev->features |= NETIF_F_TSO6;
11884 #ifdef BCM_VLAN
11885         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
11886         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11887
11888         dev->vlan_features |= NETIF_F_SG;
11889         dev->vlan_features |= NETIF_F_HW_CSUM;
11890         if (bp->flags & USING_DAC_FLAG)
11891                 dev->vlan_features |= NETIF_F_HIGHDMA;
11892         dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11893         dev->vlan_features |= NETIF_F_TSO6;
11894 #endif
11895
11896         /* get_port_hwinfo() will set prtad and mmds properly */
11897         bp->mdio.prtad = MDIO_PRTAD_NONE;
11898         bp->mdio.mmds = 0;
11899         bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11900         bp->mdio.dev = dev;
11901         bp->mdio.mdio_read = bnx2x_mdio_read;
11902         bp->mdio.mdio_write = bnx2x_mdio_write;
11903
11904         return 0;
11905
11906 err_out_unmap:
11907         if (bp->regview) {
11908                 iounmap(bp->regview);
11909                 bp->regview = NULL;
11910         }
11911         if (bp->doorbells) {
11912                 iounmap(bp->doorbells);
11913                 bp->doorbells = NULL;
11914         }
11915
11916 err_out_release:
11917         if (atomic_read(&pdev->enable_cnt) == 1)
11918                 pci_release_regions(pdev);
11919
11920 err_out_disable:
11921         pci_disable_device(pdev);
11922         pci_set_drvdata(pdev, NULL);
11923
11924 err_out:
11925         return rc;
11926 }
11927
11928 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11929                                                  int *width, int *speed)
11930 {
11931         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11932
11933         *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11934
11935         /* return value of 1=2.5GHz 2=5GHz */
11936         *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11937 }
11938
11939 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11940 {
11941         const struct firmware *firmware = bp->firmware;
11942         struct bnx2x_fw_file_hdr *fw_hdr;
11943         struct bnx2x_fw_file_section *sections;
11944         u32 offset, len, num_ops;
11945         u16 *ops_offsets;
11946         int i;
11947         const u8 *fw_ver;
11948
11949         if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11950                 return -EINVAL;
11951
11952         fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11953         sections = (struct bnx2x_fw_file_section *)fw_hdr;
11954
11955         /* Make sure none of the offsets and sizes make us read beyond
11956          * the end of the firmware data */
11957         for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11958                 offset = be32_to_cpu(sections[i].offset);
11959                 len = be32_to_cpu(sections[i].len);
11960                 if (offset + len > firmware->size) {
11961                         printk(KERN_ERR PFX "Section %d length is out of "
11962                                             "bounds\n", i);
11963                         return -EINVAL;
11964                 }
11965         }
11966
11967         /* Likewise for the init_ops offsets */
11968         offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11969         ops_offsets = (u16 *)(firmware->data + offset);
11970         num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11971
11972         for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11973                 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11974                         printk(KERN_ERR PFX "Section offset %d is out of "
11975                                             "bounds\n", i);
11976                         return -EINVAL;
11977                 }
11978         }
11979
11980         /* Check FW version */
11981         offset = be32_to_cpu(fw_hdr->fw_version.offset);
11982         fw_ver = firmware->data + offset;
11983         if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11984             (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11985             (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11986             (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11987                 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11988                                     " Should be %d.%d.%d.%d\n",
11989                        fw_ver[0], fw_ver[1], fw_ver[2],
11990                        fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11991                        BCM_5710_FW_MINOR_VERSION,
11992                        BCM_5710_FW_REVISION_VERSION,
11993                        BCM_5710_FW_ENGINEERING_VERSION);
11994                 return -EINVAL;
11995         }
11996
11997         return 0;
11998 }
11999
12000 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
12001 {
12002         const __be32 *source = (const __be32 *)_source;
12003         u32 *target = (u32 *)_target;
12004         u32 i;
12005
12006         for (i = 0; i < n/4; i++)
12007                 target[i] = be32_to_cpu(source[i]);
12008 }
12009
12010 /*
12011    Ops array is stored in the following format:
12012    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
12013  */
12014 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
12015 {
12016         const __be32 *source = (const __be32 *)_source;
12017         struct raw_op *target = (struct raw_op *)_target;
12018         u32 i, j, tmp;
12019
12020         for (i = 0, j = 0; i < n/8; i++, j += 2) {
12021                 tmp = be32_to_cpu(source[j]);
12022                 target[i].op = (tmp >> 24) & 0xff;
12023                 target[i].offset =  tmp & 0xffffff;
12024                 target[i].raw_data = be32_to_cpu(source[j+1]);
12025         }
12026 }
12027
12028 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
12029 {
12030         const __be16 *source = (const __be16 *)_source;
12031         u16 *target = (u16 *)_target;
12032         u32 i;
12033
12034         for (i = 0; i < n/2; i++)
12035                 target[i] = be16_to_cpu(source[i]);
12036 }
12037
12038 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
12039         do { \
12040                 u32 len = be32_to_cpu(fw_hdr->arr.len); \
12041                 bp->arr = kmalloc(len, GFP_KERNEL); \
12042                 if (!bp->arr) { \
12043                         printk(KERN_ERR PFX "Failed to allocate %d bytes " \
12044                                             "for "#arr"\n", len); \
12045                         goto lbl; \
12046                 } \
12047                 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
12048                      (u8 *)bp->arr, len); \
12049         } while (0)
12050
12051 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
12052 {
12053         const char *fw_file_name;
12054         struct bnx2x_fw_file_hdr *fw_hdr;
12055         int rc;
12056
12057         if (CHIP_IS_E1(bp))
12058                 fw_file_name = FW_FILE_NAME_E1;
12059         else
12060                 fw_file_name = FW_FILE_NAME_E1H;
12061
12062         printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
12063
12064         rc = request_firmware(&bp->firmware, fw_file_name, dev);
12065         if (rc) {
12066                 printk(KERN_ERR PFX "Can't load firmware file %s\n",
12067                        fw_file_name);
12068                 goto request_firmware_exit;
12069         }
12070
12071         rc = bnx2x_check_firmware(bp);
12072         if (rc) {
12073                 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
12074                 goto request_firmware_exit;
12075         }
12076
12077         fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
12078
12079         /* Initialize the pointers to the init arrays */
12080         /* Blob */
12081         BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
12082
12083         /* Opcodes */
12084         BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
12085
12086         /* Offsets */
12087         BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
12088                             be16_to_cpu_n);
12089
12090         /* STORMs firmware */
12091         INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12092                         be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
12093         INIT_TSEM_PRAM_DATA(bp)      = bp->firmware->data +
12094                         be32_to_cpu(fw_hdr->tsem_pram_data.offset);
12095         INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12096                         be32_to_cpu(fw_hdr->usem_int_table_data.offset);
12097         INIT_USEM_PRAM_DATA(bp)      = bp->firmware->data +
12098                         be32_to_cpu(fw_hdr->usem_pram_data.offset);
12099         INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12100                         be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
12101         INIT_XSEM_PRAM_DATA(bp)      = bp->firmware->data +
12102                         be32_to_cpu(fw_hdr->xsem_pram_data.offset);
12103         INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12104                         be32_to_cpu(fw_hdr->csem_int_table_data.offset);
12105         INIT_CSEM_PRAM_DATA(bp)      = bp->firmware->data +
12106                         be32_to_cpu(fw_hdr->csem_pram_data.offset);
12107
12108         return 0;
12109
12110 init_offsets_alloc_err:
12111         kfree(bp->init_ops);
12112 init_ops_alloc_err:
12113         kfree(bp->init_data);
12114 request_firmware_exit:
12115         release_firmware(bp->firmware);
12116
12117         return rc;
12118 }
12119
12120
12121 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12122                                     const struct pci_device_id *ent)
12123 {
12124         struct net_device *dev = NULL;
12125         struct bnx2x *bp;
12126         int pcie_width, pcie_speed;
12127         int rc;
12128
12129         /* dev zeroed in init_etherdev */
12130         dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
12131         if (!dev) {
12132                 printk(KERN_ERR PFX "Cannot allocate net device\n");
12133                 return -ENOMEM;
12134         }
12135
12136         bp = netdev_priv(dev);
12137         bp->msglevel = debug;
12138
12139         pci_set_drvdata(pdev, dev);
12140
12141         rc = bnx2x_init_dev(pdev, dev);
12142         if (rc < 0) {
12143                 free_netdev(dev);
12144                 return rc;
12145         }
12146
12147         rc = bnx2x_init_bp(bp);
12148         if (rc)
12149                 goto init_one_exit;
12150
12151         /* Set init arrays */
12152         rc = bnx2x_init_firmware(bp, &pdev->dev);
12153         if (rc) {
12154                 printk(KERN_ERR PFX "Error loading firmware\n");
12155                 goto init_one_exit;
12156         }
12157
12158         rc = register_netdev(dev);
12159         if (rc) {
12160                 dev_err(&pdev->dev, "Cannot register net device\n");
12161                 goto init_one_exit;
12162         }
12163
12164         bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
12165         printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
12166                " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
12167                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
12168                pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
12169                dev->base_addr, bp->pdev->irq);
12170         printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
12171
12172         return 0;
12173
12174 init_one_exit:
12175         if (bp->regview)
12176                 iounmap(bp->regview);
12177
12178         if (bp->doorbells)
12179                 iounmap(bp->doorbells);
12180
12181         free_netdev(dev);
12182
12183         if (atomic_read(&pdev->enable_cnt) == 1)
12184                 pci_release_regions(pdev);
12185
12186         pci_disable_device(pdev);
12187         pci_set_drvdata(pdev, NULL);
12188
12189         return rc;
12190 }
12191
12192 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
12193 {
12194         struct net_device *dev = pci_get_drvdata(pdev);
12195         struct bnx2x *bp;
12196
12197         if (!dev) {
12198                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12199                 return;
12200         }
12201         bp = netdev_priv(dev);
12202
12203         unregister_netdev(dev);
12204
12205         kfree(bp->init_ops_offsets);
12206         kfree(bp->init_ops);
12207         kfree(bp->init_data);
12208         release_firmware(bp->firmware);
12209
12210         if (bp->regview)
12211                 iounmap(bp->regview);
12212
12213         if (bp->doorbells)
12214                 iounmap(bp->doorbells);
12215
12216         free_netdev(dev);
12217
12218         if (atomic_read(&pdev->enable_cnt) == 1)
12219                 pci_release_regions(pdev);
12220
12221         pci_disable_device(pdev);
12222         pci_set_drvdata(pdev, NULL);
12223 }
12224
12225 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12226 {
12227         struct net_device *dev = pci_get_drvdata(pdev);
12228         struct bnx2x *bp;
12229
12230         if (!dev) {
12231                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12232                 return -ENODEV;
12233         }
12234         bp = netdev_priv(dev);
12235
12236         rtnl_lock();
12237
12238         pci_save_state(pdev);
12239
12240         if (!netif_running(dev)) {
12241                 rtnl_unlock();
12242                 return 0;
12243         }
12244
12245         netif_device_detach(dev);
12246
12247         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
12248
12249         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
12250
12251         rtnl_unlock();
12252
12253         return 0;
12254 }
12255
12256 static int bnx2x_resume(struct pci_dev *pdev)
12257 {
12258         struct net_device *dev = pci_get_drvdata(pdev);
12259         struct bnx2x *bp;
12260         int rc;
12261
12262         if (!dev) {
12263                 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12264                 return -ENODEV;
12265         }
12266         bp = netdev_priv(dev);
12267
12268         rtnl_lock();
12269
12270         pci_restore_state(pdev);
12271
12272         if (!netif_running(dev)) {
12273                 rtnl_unlock();
12274                 return 0;
12275         }
12276
12277         bnx2x_set_power_state(bp, PCI_D0);
12278         netif_device_attach(dev);
12279
12280         rc = bnx2x_nic_load(bp, LOAD_OPEN);
12281
12282         rtnl_unlock();
12283
12284         return rc;
12285 }
12286
12287 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12288 {
12289         int i;
12290
12291         bp->state = BNX2X_STATE_ERROR;
12292
12293         bp->rx_mode = BNX2X_RX_MODE_NONE;
12294
12295         bnx2x_netif_stop(bp, 0);
12296
12297         del_timer_sync(&bp->timer);
12298         bp->stats_state = STATS_STATE_DISABLED;
12299         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12300
12301         /* Release IRQs */
12302         bnx2x_free_irq(bp);
12303
12304         if (CHIP_IS_E1(bp)) {
12305                 struct mac_configuration_cmd *config =
12306                                                 bnx2x_sp(bp, mcast_config);
12307
12308                 for (i = 0; i < config->hdr.length; i++)
12309                         CAM_INVALIDATE(config->config_table[i]);
12310         }
12311
12312         /* Free SKBs, SGEs, TPA pool and driver internals */
12313         bnx2x_free_skbs(bp);
12314         for_each_queue(bp, i)
12315                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
12316         for_each_queue(bp, i)
12317                 netif_napi_del(&bnx2x_fp(bp, i, napi));
12318         bnx2x_free_mem(bp);
12319
12320         bp->state = BNX2X_STATE_CLOSED;
12321
12322         netif_carrier_off(bp->dev);
12323
12324         return 0;
12325 }
12326
12327 static void bnx2x_eeh_recover(struct bnx2x *bp)
12328 {
12329         u32 val;
12330
12331         mutex_init(&bp->port.phy_mutex);
12332
12333         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12334         bp->link_params.shmem_base = bp->common.shmem_base;
12335         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12336
12337         if (!bp->common.shmem_base ||
12338             (bp->common.shmem_base < 0xA0000) ||
12339             (bp->common.shmem_base >= 0xC0000)) {
12340                 BNX2X_DEV_INFO("MCP not active\n");
12341                 bp->flags |= NO_MCP_FLAG;
12342                 return;
12343         }
12344
12345         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12346         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12347                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12348                 BNX2X_ERR("BAD MCP validity signature\n");
12349
12350         if (!BP_NOMCP(bp)) {
12351                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12352                               & DRV_MSG_SEQ_NUMBER_MASK);
12353                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12354         }
12355 }
12356
12357 /**
12358  * bnx2x_io_error_detected - called when PCI error is detected
12359  * @pdev: Pointer to PCI device
12360  * @state: The current pci connection state
12361  *
12362  * This function is called after a PCI bus error affecting
12363  * this device has been detected.
12364  */
12365 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12366                                                 pci_channel_state_t state)
12367 {
12368         struct net_device *dev = pci_get_drvdata(pdev);
12369         struct bnx2x *bp = netdev_priv(dev);
12370
12371         rtnl_lock();
12372
12373         netif_device_detach(dev);
12374
12375         if (state == pci_channel_io_perm_failure) {
12376                 rtnl_unlock();
12377                 return PCI_ERS_RESULT_DISCONNECT;
12378         }
12379
12380         if (netif_running(dev))
12381                 bnx2x_eeh_nic_unload(bp);
12382
12383         pci_disable_device(pdev);
12384
12385         rtnl_unlock();
12386
12387         /* Request a slot reset */
12388         return PCI_ERS_RESULT_NEED_RESET;
12389 }
12390
12391 /**
12392  * bnx2x_io_slot_reset - called after the PCI bus has been reset
12393  * @pdev: Pointer to PCI device
12394  *
12395  * Restart the card from scratch, as if from a cold-boot.
12396  */
12397 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12398 {
12399         struct net_device *dev = pci_get_drvdata(pdev);
12400         struct bnx2x *bp = netdev_priv(dev);
12401
12402         rtnl_lock();
12403
12404         if (pci_enable_device(pdev)) {
12405                 dev_err(&pdev->dev,
12406                         "Cannot re-enable PCI device after reset\n");
12407                 rtnl_unlock();
12408                 return PCI_ERS_RESULT_DISCONNECT;
12409         }
12410
12411         pci_set_master(pdev);
12412         pci_restore_state(pdev);
12413
12414         if (netif_running(dev))
12415                 bnx2x_set_power_state(bp, PCI_D0);
12416
12417         rtnl_unlock();
12418
12419         return PCI_ERS_RESULT_RECOVERED;
12420 }
12421
12422 /**
12423  * bnx2x_io_resume - called when traffic can start flowing again
12424  * @pdev: Pointer to PCI device
12425  *
12426  * This callback is called when the error recovery driver tells us that
12427  * its OK to resume normal operation.
12428  */
12429 static void bnx2x_io_resume(struct pci_dev *pdev)
12430 {
12431         struct net_device *dev = pci_get_drvdata(pdev);
12432         struct bnx2x *bp = netdev_priv(dev);
12433
12434         rtnl_lock();
12435
12436         bnx2x_eeh_recover(bp);
12437
12438         if (netif_running(dev))
12439                 bnx2x_nic_load(bp, LOAD_NORMAL);
12440
12441         netif_device_attach(dev);
12442
12443         rtnl_unlock();
12444 }
12445
12446 static struct pci_error_handlers bnx2x_err_handler = {
12447         .error_detected = bnx2x_io_error_detected,
12448         .slot_reset     = bnx2x_io_slot_reset,
12449         .resume         = bnx2x_io_resume,
12450 };
12451
12452 static struct pci_driver bnx2x_pci_driver = {
12453         .name        = DRV_MODULE_NAME,
12454         .id_table    = bnx2x_pci_tbl,
12455         .probe       = bnx2x_init_one,
12456         .remove      = __devexit_p(bnx2x_remove_one),
12457         .suspend     = bnx2x_suspend,
12458         .resume      = bnx2x_resume,
12459         .err_handler = &bnx2x_err_handler,
12460 };
12461
12462 static int __init bnx2x_init(void)
12463 {
12464         int ret;
12465
12466         printk(KERN_INFO "%s", version);
12467
12468         bnx2x_wq = create_singlethread_workqueue("bnx2x");
12469         if (bnx2x_wq == NULL) {
12470                 printk(KERN_ERR PFX "Cannot create workqueue\n");
12471                 return -ENOMEM;
12472         }
12473
12474         ret = pci_register_driver(&bnx2x_pci_driver);
12475         if (ret) {
12476                 printk(KERN_ERR PFX "Cannot register driver\n");
12477                 destroy_workqueue(bnx2x_wq);
12478         }
12479         return ret;
12480 }
12481
12482 static void __exit bnx2x_cleanup(void)
12483 {
12484         pci_unregister_driver(&bnx2x_pci_driver);
12485
12486         destroy_workqueue(bnx2x_wq);
12487 }
12488
12489 module_init(bnx2x_init);
12490 module_exit(bnx2x_cleanup);
12491
12492 #ifdef BCM_CNIC
12493
12494 /* count denotes the number of new completions we have seen */
12495 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
12496 {
12497         struct eth_spe *spe;
12498
12499 #ifdef BNX2X_STOP_ON_ERROR
12500         if (unlikely(bp->panic))
12501                 return;
12502 #endif
12503
12504         spin_lock_bh(&bp->spq_lock);
12505         bp->cnic_spq_pending -= count;
12506
12507         for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
12508              bp->cnic_spq_pending++) {
12509
12510                 if (!bp->cnic_kwq_pending)
12511                         break;
12512
12513                 spe = bnx2x_sp_get_next(bp);
12514                 *spe = *bp->cnic_kwq_cons;
12515
12516                 bp->cnic_kwq_pending--;
12517
12518                 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
12519                    bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
12520
12521                 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
12522                         bp->cnic_kwq_cons = bp->cnic_kwq;
12523                 else
12524                         bp->cnic_kwq_cons++;
12525         }
12526         bnx2x_sp_prod_update(bp);
12527         spin_unlock_bh(&bp->spq_lock);
12528 }
12529
12530 static int bnx2x_cnic_sp_queue(struct net_device *dev,
12531                                struct kwqe_16 *kwqes[], u32 count)
12532 {
12533         struct bnx2x *bp = netdev_priv(dev);
12534         int i;
12535
12536 #ifdef BNX2X_STOP_ON_ERROR
12537         if (unlikely(bp->panic))
12538                 return -EIO;
12539 #endif
12540
12541         spin_lock_bh(&bp->spq_lock);
12542
12543         for (i = 0; i < count; i++) {
12544                 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
12545
12546                 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
12547                         break;
12548
12549                 *bp->cnic_kwq_prod = *spe;
12550
12551                 bp->cnic_kwq_pending++;
12552
12553                 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
12554                    spe->hdr.conn_and_cmd_data, spe->hdr.type,
12555                    spe->data.mac_config_addr.hi,
12556                    spe->data.mac_config_addr.lo,
12557                    bp->cnic_kwq_pending);
12558
12559                 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
12560                         bp->cnic_kwq_prod = bp->cnic_kwq;
12561                 else
12562                         bp->cnic_kwq_prod++;
12563         }
12564
12565         spin_unlock_bh(&bp->spq_lock);
12566
12567         if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
12568                 bnx2x_cnic_sp_post(bp, 0);
12569
12570         return i;
12571 }
12572
12573 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12574 {
12575         struct cnic_ops *c_ops;
12576         int rc = 0;
12577
12578         mutex_lock(&bp->cnic_mutex);
12579         c_ops = bp->cnic_ops;
12580         if (c_ops)
12581                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12582         mutex_unlock(&bp->cnic_mutex);
12583
12584         return rc;
12585 }
12586
12587 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12588 {
12589         struct cnic_ops *c_ops;
12590         int rc = 0;
12591
12592         rcu_read_lock();
12593         c_ops = rcu_dereference(bp->cnic_ops);
12594         if (c_ops)
12595                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12596         rcu_read_unlock();
12597
12598         return rc;
12599 }
12600
12601 /*
12602  * for commands that have no data
12603  */
12604 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
12605 {
12606         struct cnic_ctl_info ctl = {0};
12607
12608         ctl.cmd = cmd;
12609
12610         return bnx2x_cnic_ctl_send(bp, &ctl);
12611 }
12612
12613 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
12614 {
12615         struct cnic_ctl_info ctl;
12616
12617         /* first we tell CNIC and only then we count this as a completion */
12618         ctl.cmd = CNIC_CTL_COMPLETION_CMD;
12619         ctl.data.comp.cid = cid;
12620
12621         bnx2x_cnic_ctl_send_bh(bp, &ctl);
12622         bnx2x_cnic_sp_post(bp, 1);
12623 }
12624
12625 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
12626 {
12627         struct bnx2x *bp = netdev_priv(dev);
12628         int rc = 0;
12629
12630         switch (ctl->cmd) {
12631         case DRV_CTL_CTXTBL_WR_CMD: {
12632                 u32 index = ctl->data.io.offset;
12633                 dma_addr_t addr = ctl->data.io.dma_addr;
12634
12635                 bnx2x_ilt_wr(bp, index, addr);
12636                 break;
12637         }
12638
12639         case DRV_CTL_COMPLETION_CMD: {
12640                 int count = ctl->data.comp.comp_count;
12641
12642                 bnx2x_cnic_sp_post(bp, count);
12643                 break;
12644         }
12645
12646         /* rtnl_lock is held.  */
12647         case DRV_CTL_START_L2_CMD: {
12648                 u32 cli = ctl->data.ring.client_id;
12649
12650                 bp->rx_mode_cl_mask |= (1 << cli);
12651                 bnx2x_set_storm_rx_mode(bp);
12652                 break;
12653         }
12654
12655         /* rtnl_lock is held.  */
12656         case DRV_CTL_STOP_L2_CMD: {
12657                 u32 cli = ctl->data.ring.client_id;
12658
12659                 bp->rx_mode_cl_mask &= ~(1 << cli);
12660                 bnx2x_set_storm_rx_mode(bp);
12661                 break;
12662         }
12663
12664         default:
12665                 BNX2X_ERR("unknown command %x\n", ctl->cmd);
12666                 rc = -EINVAL;
12667         }
12668
12669         return rc;
12670 }
12671
12672 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
12673 {
12674         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12675
12676         if (bp->flags & USING_MSIX_FLAG) {
12677                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
12678                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
12679                 cp->irq_arr[0].vector = bp->msix_table[1].vector;
12680         } else {
12681                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
12682                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
12683         }
12684         cp->irq_arr[0].status_blk = bp->cnic_sb;
12685         cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
12686         cp->irq_arr[1].status_blk = bp->def_status_blk;
12687         cp->irq_arr[1].status_blk_num = DEF_SB_ID;
12688
12689         cp->num_irq = 2;
12690 }
12691
12692 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
12693                                void *data)
12694 {
12695         struct bnx2x *bp = netdev_priv(dev);
12696         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12697
12698         if (ops == NULL)
12699                 return -EINVAL;
12700
12701         if (atomic_read(&bp->intr_sem) != 0)
12702                 return -EBUSY;
12703
12704         bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
12705         if (!bp->cnic_kwq)
12706                 return -ENOMEM;
12707
12708         bp->cnic_kwq_cons = bp->cnic_kwq;
12709         bp->cnic_kwq_prod = bp->cnic_kwq;
12710         bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
12711
12712         bp->cnic_spq_pending = 0;
12713         bp->cnic_kwq_pending = 0;
12714
12715         bp->cnic_data = data;
12716
12717         cp->num_irq = 0;
12718         cp->drv_state = CNIC_DRV_STATE_REGD;
12719
12720         bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
12721
12722         bnx2x_setup_cnic_irq_info(bp);
12723         bnx2x_set_iscsi_eth_mac_addr(bp, 1);
12724         bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
12725         rcu_assign_pointer(bp->cnic_ops, ops);
12726
12727         return 0;
12728 }
12729
12730 static int bnx2x_unregister_cnic(struct net_device *dev)
12731 {
12732         struct bnx2x *bp = netdev_priv(dev);
12733         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12734
12735         mutex_lock(&bp->cnic_mutex);
12736         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
12737                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
12738                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
12739         }
12740         cp->drv_state = 0;
12741         rcu_assign_pointer(bp->cnic_ops, NULL);
12742         mutex_unlock(&bp->cnic_mutex);
12743         synchronize_rcu();
12744         kfree(bp->cnic_kwq);
12745         bp->cnic_kwq = NULL;
12746
12747         return 0;
12748 }
12749
12750 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
12751 {
12752         struct bnx2x *bp = netdev_priv(dev);
12753         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12754
12755         cp->drv_owner = THIS_MODULE;
12756         cp->chip_id = CHIP_ID(bp);
12757         cp->pdev = bp->pdev;
12758         cp->io_base = bp->regview;
12759         cp->io_base2 = bp->doorbells;
12760         cp->max_kwqe_pending = 8;
12761         cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
12762         cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
12763         cp->ctx_tbl_len = CNIC_ILT_LINES;
12764         cp->starting_cid = BCM_CNIC_CID_START;
12765         cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
12766         cp->drv_ctl = bnx2x_drv_ctl;
12767         cp->drv_register_cnic = bnx2x_register_cnic;
12768         cp->drv_unregister_cnic = bnx2x_unregister_cnic;
12769
12770         return cp;
12771 }
12772 EXPORT_SYMBOL(bnx2x_cnic_probe);
12773
12774 #endif /* BCM_CNIC */
12775