1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_dump.h"
59 #define DRV_MODULE_VERSION "1.48.114-1"
60 #define DRV_MODULE_RELDATE "2009/07/29"
61 #define BNX2X_BC_VER 0x040200
63 #include <linux/firmware.h>
64 #include "bnx2x_fw_file_hdr.h"
66 #define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67 #define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
69 /* Time in jiffies before concluding the transmitter is hung */
70 #define TX_TIMEOUT (5*HZ)
72 static char version[] __devinitdata =
73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
74 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76 MODULE_AUTHOR("Eliezer Tamir");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
81 static int multi_mode = 1;
82 module_param(multi_mode, int, 0);
83 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84 "(0 Disable; 1 Enable (default))");
86 static int num_rx_queues;
87 module_param(num_rx_queues, int, 0);
88 MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89 " (default is half number of CPUs)");
91 static int num_tx_queues;
92 module_param(num_tx_queues, int, 0);
93 MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94 " (default is half number of CPUs)");
96 static int disable_tpa;
97 module_param(disable_tpa, int, 0);
98 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
101 module_param(int_mode, int, 0);
102 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
104 static int dropless_fc;
105 module_param(dropless_fc, int, 0);
106 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
109 module_param(poll, int, 0);
110 MODULE_PARM_DESC(poll, " Use polling (for debug)");
112 static int mrrs = -1;
113 module_param(mrrs, int, 0);
114 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
117 module_param(debug, int, 0);
118 MODULE_PARM_DESC(debug, " Default debug msglevel");
120 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
122 static struct workqueue_struct *bnx2x_wq;
124 enum bnx2x_board_type {
130 /* indexed by board_type, above */
133 } board_info[] __devinitdata = {
134 { "Broadcom NetXtreme II BCM57710 XGb" },
135 { "Broadcom NetXtreme II BCM57711 XGb" },
136 { "Broadcom NetXtreme II BCM57711E XGb" }
140 static const struct pci_device_id bnx2x_pci_tbl[] = {
141 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
142 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
143 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
147 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
149 /****************************************************************************
150 * General service functions
151 ****************************************************************************/
154 * locking is done by mcp
156 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
159 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
160 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
161 PCICFG_VENDOR_ID_OFFSET);
164 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
168 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
169 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
170 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
171 PCICFG_VENDOR_ID_OFFSET);
176 static const u32 dmae_reg_go_c[] = {
177 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
178 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
179 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
180 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
183 /* copy command into DMAE command memory and set DMAE command go */
184 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
190 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
191 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
192 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
194 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
195 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
197 REG_WR(bp, dmae_reg_go_c[idx], 1);
200 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
203 struct dmae_command dmae;
204 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
207 if (!bp->dmae_ready) {
208 u32 *data = bnx2x_sp(bp, wb_data[0]);
210 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
211 " using indirect\n", dst_addr, len32);
212 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
216 memset(&dmae, 0, sizeof(struct dmae_command));
218 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
219 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
220 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
222 DMAE_CMD_ENDIANITY_B_DW_SWAP |
224 DMAE_CMD_ENDIANITY_DW_SWAP |
226 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
227 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
228 dmae.src_addr_lo = U64_LO(dma_addr);
229 dmae.src_addr_hi = U64_HI(dma_addr);
230 dmae.dst_addr_lo = dst_addr >> 2;
231 dmae.dst_addr_hi = 0;
233 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
234 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
235 dmae.comp_val = DMAE_COMP_VAL;
237 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
238 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
239 "dst_addr [%x:%08x (%08x)]\n"
240 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
241 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
242 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
243 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
244 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
245 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
246 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
248 mutex_lock(&bp->dmae_mutex);
252 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
256 while (*wb_comp != DMAE_COMP_VAL) {
257 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
260 BNX2X_ERR("DMAE timeout!\n");
264 /* adjust delay for emulation/FPGA */
265 if (CHIP_REV_IS_SLOW(bp))
271 mutex_unlock(&bp->dmae_mutex);
274 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
276 struct dmae_command dmae;
277 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
280 if (!bp->dmae_ready) {
281 u32 *data = bnx2x_sp(bp, wb_data[0]);
284 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
285 " using indirect\n", src_addr, len32);
286 for (i = 0; i < len32; i++)
287 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
291 memset(&dmae, 0, sizeof(struct dmae_command));
293 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
294 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
295 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
297 DMAE_CMD_ENDIANITY_B_DW_SWAP |
299 DMAE_CMD_ENDIANITY_DW_SWAP |
301 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
302 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
303 dmae.src_addr_lo = src_addr >> 2;
304 dmae.src_addr_hi = 0;
305 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
306 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
308 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
309 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
310 dmae.comp_val = DMAE_COMP_VAL;
312 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
313 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
314 "dst_addr [%x:%08x (%08x)]\n"
315 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
316 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
317 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
318 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
320 mutex_lock(&bp->dmae_mutex);
322 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
325 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
329 while (*wb_comp != DMAE_COMP_VAL) {
332 BNX2X_ERR("DMAE timeout!\n");
336 /* adjust delay for emulation/FPGA */
337 if (CHIP_REV_IS_SLOW(bp))
342 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
343 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
344 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
346 mutex_unlock(&bp->dmae_mutex);
349 /* used only for slowpath so not inlined */
350 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
354 wb_write[0] = val_hi;
355 wb_write[1] = val_lo;
356 REG_WR_DMAE(bp, reg, wb_write, 2);
360 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
364 REG_RD_DMAE(bp, reg, wb_data, 2);
366 return HILO_U64(wb_data[0], wb_data[1]);
370 static int bnx2x_mc_assert(struct bnx2x *bp)
374 u32 row0, row1, row2, row3;
377 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
378 XSTORM_ASSERT_LIST_INDEX_OFFSET);
380 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
382 /* print the asserts */
383 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
385 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
386 XSTORM_ASSERT_LIST_OFFSET(i));
387 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
388 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
389 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
390 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
391 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
392 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
394 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
395 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
396 " 0x%08x 0x%08x 0x%08x\n",
397 i, row3, row2, row1, row0);
405 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
406 TSTORM_ASSERT_LIST_INDEX_OFFSET);
408 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
410 /* print the asserts */
411 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
413 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
414 TSTORM_ASSERT_LIST_OFFSET(i));
415 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
416 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
417 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
418 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
419 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
420 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
422 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
423 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
424 " 0x%08x 0x%08x 0x%08x\n",
425 i, row3, row2, row1, row0);
433 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
434 CSTORM_ASSERT_LIST_INDEX_OFFSET);
436 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
438 /* print the asserts */
439 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
441 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
442 CSTORM_ASSERT_LIST_OFFSET(i));
443 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
444 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
445 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
446 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
447 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
448 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
450 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
451 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
452 " 0x%08x 0x%08x 0x%08x\n",
453 i, row3, row2, row1, row0);
461 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
462 USTORM_ASSERT_LIST_INDEX_OFFSET);
464 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
466 /* print the asserts */
467 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
469 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
470 USTORM_ASSERT_LIST_OFFSET(i));
471 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
472 USTORM_ASSERT_LIST_OFFSET(i) + 4);
473 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
474 USTORM_ASSERT_LIST_OFFSET(i) + 8);
475 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
476 USTORM_ASSERT_LIST_OFFSET(i) + 12);
478 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
479 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
480 " 0x%08x 0x%08x 0x%08x\n",
481 i, row3, row2, row1, row0);
491 static void bnx2x_fw_dump(struct bnx2x *bp)
497 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
498 mark = ((mark + 0x3) & ~0x3);
499 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
501 printk(KERN_ERR PFX);
502 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
503 for (word = 0; word < 8; word++)
504 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
507 printk(KERN_CONT "%s", (char *)data);
509 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
510 for (word = 0; word < 8; word++)
511 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
514 printk(KERN_CONT "%s", (char *)data);
516 printk(KERN_ERR PFX "end of fw dump\n");
519 static void bnx2x_panic_dump(struct bnx2x *bp)
524 bp->stats_state = STATS_STATE_DISABLED;
525 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
527 BNX2X_ERR("begin crash dump -----------------\n");
531 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
532 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
533 " spq_prod_idx(%u)\n",
534 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
535 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
538 for_each_rx_queue(bp, i) {
539 struct bnx2x_fastpath *fp = &bp->fp[i];
541 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
542 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
543 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
544 i, fp->rx_bd_prod, fp->rx_bd_cons,
545 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
546 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
547 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
548 " fp_u_idx(%x) *sb_u_idx(%x)\n",
549 fp->rx_sge_prod, fp->last_max_sge,
550 le16_to_cpu(fp->fp_u_idx),
551 fp->status_blk->u_status_block.status_block_index);
555 for_each_tx_queue(bp, i) {
556 struct bnx2x_fastpath *fp = &bp->fp[i];
558 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
559 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
560 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
561 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
562 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
563 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
564 fp->status_blk->c_status_block.status_block_index,
565 fp->tx_db.data.prod);
570 for_each_rx_queue(bp, i) {
571 struct bnx2x_fastpath *fp = &bp->fp[i];
573 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
574 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
575 for (j = start; j != end; j = RX_BD(j + 1)) {
576 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
577 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
579 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
580 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
583 start = RX_SGE(fp->rx_sge_prod);
584 end = RX_SGE(fp->last_max_sge);
585 for (j = start; j != end; j = RX_SGE(j + 1)) {
586 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
587 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
589 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
590 i, j, rx_sge[1], rx_sge[0], sw_page->page);
593 start = RCQ_BD(fp->rx_comp_cons - 10);
594 end = RCQ_BD(fp->rx_comp_cons + 503);
595 for (j = start; j != end; j = RCQ_BD(j + 1)) {
596 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
598 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
599 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
604 for_each_tx_queue(bp, i) {
605 struct bnx2x_fastpath *fp = &bp->fp[i];
607 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
608 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
609 for (j = start; j != end; j = TX_BD(j + 1)) {
610 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
612 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
613 i, j, sw_bd->skb, sw_bd->first_bd);
616 start = TX_BD(fp->tx_bd_cons - 10);
617 end = TX_BD(fp->tx_bd_cons + 254);
618 for (j = start; j != end; j = TX_BD(j + 1)) {
619 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
621 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
622 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
628 BNX2X_ERR("end crash dump -----------------\n");
631 static void bnx2x_int_enable(struct bnx2x *bp)
633 int port = BP_PORT(bp);
634 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
635 u32 val = REG_RD(bp, addr);
636 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
637 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
640 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
641 HC_CONFIG_0_REG_INT_LINE_EN_0);
642 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
643 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
645 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
646 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
647 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
648 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
650 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
651 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
652 HC_CONFIG_0_REG_INT_LINE_EN_0 |
653 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
655 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
658 REG_WR(bp, addr, val);
660 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
663 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
664 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
666 REG_WR(bp, addr, val);
668 * Ensure that HC_CONFIG is written before leading/trailing edge config
673 if (CHIP_IS_E1H(bp)) {
674 /* init leading/trailing edge */
676 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
678 /* enable nig and gpio3 attention */
683 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
684 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
687 /* Make sure that interrupts are indeed enabled from here on */
691 static void bnx2x_int_disable(struct bnx2x *bp)
693 int port = BP_PORT(bp);
694 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
695 u32 val = REG_RD(bp, addr);
697 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
698 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
699 HC_CONFIG_0_REG_INT_LINE_EN_0 |
700 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
702 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
705 /* flush all outstanding writes */
708 REG_WR(bp, addr, val);
709 if (REG_RD(bp, addr) != val)
710 BNX2X_ERR("BUG! proper val not read from IGU!\n");
714 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
716 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
719 /* disable interrupt handling */
720 atomic_inc(&bp->intr_sem);
721 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
724 /* prevent the HW from sending interrupts */
725 bnx2x_int_disable(bp);
727 /* make sure all ISRs are done */
729 synchronize_irq(bp->msix_table[0].vector);
731 for_each_queue(bp, i)
732 synchronize_irq(bp->msix_table[i + offset].vector);
734 synchronize_irq(bp->pdev->irq);
736 /* make sure sp_task is not running */
737 cancel_delayed_work(&bp->sp_task);
738 flush_workqueue(bnx2x_wq);
744 * General service functions
747 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
748 u8 storm, u16 index, u8 op, u8 update)
750 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
751 COMMAND_REG_INT_ACK);
752 struct igu_ack_register igu_ack;
754 igu_ack.status_block_index = index;
755 igu_ack.sb_id_and_flags =
756 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
757 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
758 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
759 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
761 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
762 (*(u32 *)&igu_ack), hc_addr);
763 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
765 /* Make sure that ACK is written */
770 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
772 struct host_status_block *fpsb = fp->status_blk;
775 barrier(); /* status block is written to by the chip */
776 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
777 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
780 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
781 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
787 static u16 bnx2x_ack_int(struct bnx2x *bp)
789 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
790 COMMAND_REG_SIMD_MASK);
791 u32 result = REG_RD(bp, hc_addr);
793 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
801 * fast path service functions
804 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
806 /* Tell compiler that consumer and producer can change */
808 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
811 /* free skb in the packet ring at pos idx
812 * return idx of last bd freed
814 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
817 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
818 struct eth_tx_start_bd *tx_start_bd;
819 struct eth_tx_bd *tx_data_bd;
820 struct sk_buff *skb = tx_buf->skb;
821 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
824 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
828 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
829 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
830 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
831 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
833 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
834 #ifdef BNX2X_STOP_ON_ERROR
835 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
836 BNX2X_ERR("BAD nbd!\n");
840 new_cons = nbd + tx_buf->first_bd;
842 /* Get the next bd */
843 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
845 /* Skip a parse bd... */
847 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
849 /* ...and the TSO split header bd since they have no mapping */
850 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
852 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
858 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
859 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
860 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
861 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
863 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
868 dev_kfree_skb_any(skb);
869 tx_buf->first_bd = 0;
875 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
881 barrier(); /* Tell compiler that prod and cons can change */
882 prod = fp->tx_bd_prod;
883 cons = fp->tx_bd_cons;
885 /* NUM_TX_RINGS = number of "next-page" entries
886 It will be used as a threshold */
887 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
889 #ifdef BNX2X_STOP_ON_ERROR
891 WARN_ON(used > fp->bp->tx_ring_size);
892 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
895 return (s16)(fp->bp->tx_ring_size) - used;
898 static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
900 struct bnx2x *bp = fp->bp;
901 struct netdev_queue *txq;
902 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
905 #ifdef BNX2X_STOP_ON_ERROR
906 if (unlikely(bp->panic))
910 txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
911 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
912 sw_cons = fp->tx_pkt_cons;
914 while (sw_cons != hw_cons) {
917 pkt_cons = TX_BD(sw_cons);
919 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
921 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
922 hw_cons, sw_cons, pkt_cons);
924 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
926 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
929 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
934 fp->tx_pkt_cons = sw_cons;
935 fp->tx_bd_cons = bd_cons;
937 /* TBD need a thresh? */
938 if (unlikely(netif_tx_queue_stopped(txq))) {
940 /* Need to make the tx_bd_cons update visible to start_xmit()
941 * before checking for netif_tx_queue_stopped(). Without the
942 * memory barrier, there is a small possibility that
943 * start_xmit() will miss it and cause the queue to be stopped
948 if ((netif_tx_queue_stopped(txq)) &&
949 (bp->state == BNX2X_STATE_OPEN) &&
950 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
951 netif_tx_wake_queue(txq);
956 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
957 union eth_rx_cqe *rr_cqe)
959 struct bnx2x *bp = fp->bp;
960 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
961 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
964 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
965 fp->index, cid, command, bp->state,
966 rr_cqe->ramrod_cqe.ramrod_type);
971 switch (command | fp->state) {
972 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
973 BNX2X_FP_STATE_OPENING):
974 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
976 fp->state = BNX2X_FP_STATE_OPEN;
979 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
980 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
982 fp->state = BNX2X_FP_STATE_HALTED;
986 BNX2X_ERR("unexpected MC reply (%d) "
987 "fp->state is %x\n", command, fp->state);
990 mb(); /* force bnx2x_wait_ramrod() to see the change */
994 switch (command | bp->state) {
995 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
996 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
997 bp->state = BNX2X_STATE_OPEN;
1000 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1001 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1002 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1003 fp->state = BNX2X_FP_STATE_HALTED;
1006 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1007 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1008 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1012 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1013 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1014 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1015 bp->set_mac_pending = 0;
1018 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1019 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED):
1020 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1024 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
1025 command, bp->state);
1028 mb(); /* force bnx2x_wait_ramrod() to see the change */
1031 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1032 struct bnx2x_fastpath *fp, u16 index)
1034 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1035 struct page *page = sw_buf->page;
1036 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1038 /* Skip "next page" elements */
1042 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1043 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1044 __free_pages(page, PAGES_PER_SGE_SHIFT);
1046 sw_buf->page = NULL;
1051 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1052 struct bnx2x_fastpath *fp, int last)
1056 for (i = 0; i < last; i++)
1057 bnx2x_free_rx_sge(bp, fp, i);
1060 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1061 struct bnx2x_fastpath *fp, u16 index)
1063 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1064 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1065 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1068 if (unlikely(page == NULL))
1071 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1072 PCI_DMA_FROMDEVICE);
1073 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1074 __free_pages(page, PAGES_PER_SGE_SHIFT);
1078 sw_buf->page = page;
1079 pci_unmap_addr_set(sw_buf, mapping, mapping);
1081 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1082 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1087 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1088 struct bnx2x_fastpath *fp, u16 index)
1090 struct sk_buff *skb;
1091 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1092 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1095 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1096 if (unlikely(skb == NULL))
1099 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1100 PCI_DMA_FROMDEVICE);
1101 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1107 pci_unmap_addr_set(rx_buf, mapping, mapping);
1109 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1110 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1115 /* note that we are not allocating a new skb,
1116 * we are just moving one from cons to prod
1117 * we are not creating a new mapping,
1118 * so there is no need to check for dma_mapping_error().
1120 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1121 struct sk_buff *skb, u16 cons, u16 prod)
1123 struct bnx2x *bp = fp->bp;
1124 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1125 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1126 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1127 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1129 pci_dma_sync_single_for_device(bp->pdev,
1130 pci_unmap_addr(cons_rx_buf, mapping),
1131 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1133 prod_rx_buf->skb = cons_rx_buf->skb;
1134 pci_unmap_addr_set(prod_rx_buf, mapping,
1135 pci_unmap_addr(cons_rx_buf, mapping));
1136 *prod_bd = *cons_bd;
1139 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1142 u16 last_max = fp->last_max_sge;
1144 if (SUB_S16(idx, last_max) > 0)
1145 fp->last_max_sge = idx;
1148 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1152 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1153 int idx = RX_SGE_CNT * i - 1;
1155 for (j = 0; j < 2; j++) {
1156 SGE_MASK_CLEAR_BIT(fp, idx);
1162 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1163 struct eth_fast_path_rx_cqe *fp_cqe)
1165 struct bnx2x *bp = fp->bp;
1166 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1167 le16_to_cpu(fp_cqe->len_on_bd)) >>
1169 u16 last_max, last_elem, first_elem;
1176 /* First mark all used pages */
1177 for (i = 0; i < sge_len; i++)
1178 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1180 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1181 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1183 /* Here we assume that the last SGE index is the biggest */
1184 prefetch((void *)(fp->sge_mask));
1185 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1187 last_max = RX_SGE(fp->last_max_sge);
1188 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1189 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1191 /* If ring is not full */
1192 if (last_elem + 1 != first_elem)
1195 /* Now update the prod */
1196 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1197 if (likely(fp->sge_mask[i]))
1200 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1201 delta += RX_SGE_MASK_ELEM_SZ;
1205 fp->rx_sge_prod += delta;
1206 /* clear page-end entries */
1207 bnx2x_clear_sge_mask_next_elems(fp);
1210 DP(NETIF_MSG_RX_STATUS,
1211 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1212 fp->last_max_sge, fp->rx_sge_prod);
1215 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1217 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1218 memset(fp->sge_mask, 0xff,
1219 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1221 /* Clear the two last indices in the page to 1:
1222 these are the indices that correspond to the "next" element,
1223 hence will never be indicated and should be removed from
1224 the calculations. */
1225 bnx2x_clear_sge_mask_next_elems(fp);
1228 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1229 struct sk_buff *skb, u16 cons, u16 prod)
1231 struct bnx2x *bp = fp->bp;
1232 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1233 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1234 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1237 /* move empty skb from pool to prod and map it */
1238 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1239 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1240 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1241 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1243 /* move partial skb from cons to pool (don't unmap yet) */
1244 fp->tpa_pool[queue] = *cons_rx_buf;
1246 /* mark bin state as start - print error if current state != stop */
1247 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1248 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1250 fp->tpa_state[queue] = BNX2X_TPA_START;
1252 /* point prod_bd to new skb */
1253 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1254 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1256 #ifdef BNX2X_STOP_ON_ERROR
1257 fp->tpa_queue_used |= (1 << queue);
1258 #ifdef __powerpc64__
1259 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1261 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1263 fp->tpa_queue_used);
1267 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1268 struct sk_buff *skb,
1269 struct eth_fast_path_rx_cqe *fp_cqe,
1272 struct sw_rx_page *rx_pg, old_rx_pg;
1273 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1274 u32 i, frag_len, frag_size, pages;
1278 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1279 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1281 /* This is needed in order to enable forwarding support */
1283 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1284 max(frag_size, (u32)len_on_bd));
1286 #ifdef BNX2X_STOP_ON_ERROR
1288 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1289 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1291 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1292 fp_cqe->pkt_len, len_on_bd);
1298 /* Run through the SGL and compose the fragmented skb */
1299 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1300 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1302 /* FW gives the indices of the SGE as if the ring is an array
1303 (meaning that "next" element will consume 2 indices) */
1304 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1305 rx_pg = &fp->rx_page_ring[sge_idx];
1308 /* If we fail to allocate a substitute page, we simply stop
1309 where we are and drop the whole packet */
1310 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1311 if (unlikely(err)) {
1312 fp->eth_q_stats.rx_skb_alloc_failed++;
1316 /* Unmap the page as we r going to pass it to the stack */
1317 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1318 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1320 /* Add one frag and update the appropriate fields in the skb */
1321 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1323 skb->data_len += frag_len;
1324 skb->truesize += frag_len;
1325 skb->len += frag_len;
1327 frag_size -= frag_len;
1333 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1334 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1337 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1338 struct sk_buff *skb = rx_buf->skb;
1340 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1342 /* Unmap skb in the pool anyway, as we are going to change
1343 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1345 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1346 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1348 if (likely(new_skb)) {
1349 /* fix ip xsum and give it to the stack */
1350 /* (no need to map the new skb) */
1353 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1354 PARSING_FLAGS_VLAN);
1355 int is_not_hwaccel_vlan_cqe =
1356 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1360 prefetch(((char *)(skb)) + 128);
1362 #ifdef BNX2X_STOP_ON_ERROR
1363 if (pad + len > bp->rx_buf_size) {
1364 BNX2X_ERR("skb_put is about to fail... "
1365 "pad %d len %d rx_buf_size %d\n",
1366 pad, len, bp->rx_buf_size);
1372 skb_reserve(skb, pad);
1375 skb->protocol = eth_type_trans(skb, bp->dev);
1376 skb->ip_summed = CHECKSUM_UNNECESSARY;
1381 iph = (struct iphdr *)skb->data;
1383 /* If there is no Rx VLAN offloading -
1384 take VLAN tag into an account */
1385 if (unlikely(is_not_hwaccel_vlan_cqe))
1386 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1389 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1392 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1393 &cqe->fast_path_cqe, cqe_idx)) {
1395 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1396 (!is_not_hwaccel_vlan_cqe))
1397 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1398 le16_to_cpu(cqe->fast_path_cqe.
1402 netif_receive_skb(skb);
1404 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1405 " - dropping packet!\n");
1410 /* put new skb in bin */
1411 fp->tpa_pool[queue].skb = new_skb;
1414 /* else drop the packet and keep the buffer in the bin */
1415 DP(NETIF_MSG_RX_STATUS,
1416 "Failed to allocate new skb - dropping packet!\n");
1417 fp->eth_q_stats.rx_skb_alloc_failed++;
1420 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1423 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1424 struct bnx2x_fastpath *fp,
1425 u16 bd_prod, u16 rx_comp_prod,
1428 struct ustorm_eth_rx_producers rx_prods = {0};
1431 /* Update producers */
1432 rx_prods.bd_prod = bd_prod;
1433 rx_prods.cqe_prod = rx_comp_prod;
1434 rx_prods.sge_prod = rx_sge_prod;
1437 * Make sure that the BD and SGE data is updated before updating the
1438 * producers since FW might read the BD/SGE right after the producer
1440 * This is only applicable for weak-ordered memory model archs such
1441 * as IA-64. The following barrier is also mandatory since FW will
1442 * assumes BDs must have buffers.
1446 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1447 REG_WR(bp, BAR_USTRORM_INTMEM +
1448 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1449 ((u32 *)&rx_prods)[i]);
1451 mmiowb(); /* keep prod updates ordered */
1453 DP(NETIF_MSG_RX_STATUS,
1454 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1455 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1458 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1460 struct bnx2x *bp = fp->bp;
1461 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1462 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1465 #ifdef BNX2X_STOP_ON_ERROR
1466 if (unlikely(bp->panic))
1470 /* CQ "next element" is of the size of the regular element,
1471 that's why it's ok here */
1472 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1473 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1476 bd_cons = fp->rx_bd_cons;
1477 bd_prod = fp->rx_bd_prod;
1478 bd_prod_fw = bd_prod;
1479 sw_comp_cons = fp->rx_comp_cons;
1480 sw_comp_prod = fp->rx_comp_prod;
1482 /* Memory barrier necessary as speculative reads of the rx
1483 * buffer can be ahead of the index in the status block
1487 DP(NETIF_MSG_RX_STATUS,
1488 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1489 fp->index, hw_comp_cons, sw_comp_cons);
1491 while (sw_comp_cons != hw_comp_cons) {
1492 struct sw_rx_bd *rx_buf = NULL;
1493 struct sk_buff *skb;
1494 union eth_rx_cqe *cqe;
1498 comp_ring_cons = RCQ_BD(sw_comp_cons);
1499 bd_prod = RX_BD(bd_prod);
1500 bd_cons = RX_BD(bd_cons);
1502 /* Prefetch the page containing the BD descriptor
1503 at producer's index. It will be needed when new skb is
1505 prefetch((void *)(PAGE_ALIGN((unsigned long)
1506 (&fp->rx_desc_ring[bd_prod])) -
1509 cqe = &fp->rx_comp_ring[comp_ring_cons];
1510 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1512 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1513 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1514 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1515 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1516 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1517 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1519 /* is this a slowpath msg? */
1520 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1521 bnx2x_sp_event(fp, cqe);
1524 /* this is an rx packet */
1526 rx_buf = &fp->rx_buf_ring[bd_cons];
1528 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1529 pad = cqe->fast_path_cqe.placement_offset;
1531 /* If CQE is marked both TPA_START and TPA_END
1532 it is a non-TPA CQE */
1533 if ((!fp->disable_tpa) &&
1534 (TPA_TYPE(cqe_fp_flags) !=
1535 (TPA_TYPE_START | TPA_TYPE_END))) {
1536 u16 queue = cqe->fast_path_cqe.queue_index;
1538 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1539 DP(NETIF_MSG_RX_STATUS,
1540 "calling tpa_start on queue %d\n",
1543 bnx2x_tpa_start(fp, queue, skb,
1548 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1549 DP(NETIF_MSG_RX_STATUS,
1550 "calling tpa_stop on queue %d\n",
1553 if (!BNX2X_RX_SUM_FIX(cqe))
1554 BNX2X_ERR("STOP on none TCP "
1557 /* This is a size of the linear data
1559 len = le16_to_cpu(cqe->fast_path_cqe.
1561 bnx2x_tpa_stop(bp, fp, queue, pad,
1562 len, cqe, comp_ring_cons);
1563 #ifdef BNX2X_STOP_ON_ERROR
1568 bnx2x_update_sge_prod(fp,
1569 &cqe->fast_path_cqe);
1574 pci_dma_sync_single_for_device(bp->pdev,
1575 pci_unmap_addr(rx_buf, mapping),
1576 pad + RX_COPY_THRESH,
1577 PCI_DMA_FROMDEVICE);
1579 prefetch(((char *)(skb)) + 128);
1581 /* is this an error packet? */
1582 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1583 DP(NETIF_MSG_RX_ERR,
1584 "ERROR flags %x rx packet %u\n",
1585 cqe_fp_flags, sw_comp_cons);
1586 fp->eth_q_stats.rx_err_discard_pkt++;
1590 /* Since we don't have a jumbo ring
1591 * copy small packets if mtu > 1500
1593 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1594 (len <= RX_COPY_THRESH)) {
1595 struct sk_buff *new_skb;
1597 new_skb = netdev_alloc_skb(bp->dev,
1599 if (new_skb == NULL) {
1600 DP(NETIF_MSG_RX_ERR,
1601 "ERROR packet dropped "
1602 "because of alloc failure\n");
1603 fp->eth_q_stats.rx_skb_alloc_failed++;
1608 skb_copy_from_linear_data_offset(skb, pad,
1609 new_skb->data + pad, len);
1610 skb_reserve(new_skb, pad);
1611 skb_put(new_skb, len);
1613 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1618 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1619 pci_unmap_single(bp->pdev,
1620 pci_unmap_addr(rx_buf, mapping),
1622 PCI_DMA_FROMDEVICE);
1623 skb_reserve(skb, pad);
1627 DP(NETIF_MSG_RX_ERR,
1628 "ERROR packet dropped because "
1629 "of alloc failure\n");
1630 fp->eth_q_stats.rx_skb_alloc_failed++;
1632 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1636 skb->protocol = eth_type_trans(skb, bp->dev);
1638 skb->ip_summed = CHECKSUM_NONE;
1640 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1641 skb->ip_summed = CHECKSUM_UNNECESSARY;
1643 fp->eth_q_stats.hw_csum_err++;
1647 skb_record_rx_queue(skb, fp->index);
1649 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1650 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1651 PARSING_FLAGS_VLAN))
1652 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1653 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1656 netif_receive_skb(skb);
1662 bd_cons = NEXT_RX_IDX(bd_cons);
1663 bd_prod = NEXT_RX_IDX(bd_prod);
1664 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1667 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1668 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1670 if (rx_pkt == budget)
1674 fp->rx_bd_cons = bd_cons;
1675 fp->rx_bd_prod = bd_prod_fw;
1676 fp->rx_comp_cons = sw_comp_cons;
1677 fp->rx_comp_prod = sw_comp_prod;
1679 /* Update producers */
1680 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1683 fp->rx_pkt += rx_pkt;
1689 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1691 struct bnx2x_fastpath *fp = fp_cookie;
1692 struct bnx2x *bp = fp->bp;
1694 /* Return here if interrupt is disabled */
1695 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1696 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1700 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1701 fp->index, fp->sb_id);
1702 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1704 #ifdef BNX2X_STOP_ON_ERROR
1705 if (unlikely(bp->panic))
1708 /* Handle Rx or Tx according to MSI-X vector */
1709 if (fp->is_rx_queue) {
1710 prefetch(fp->rx_cons_sb);
1711 prefetch(&fp->status_blk->u_status_block.status_block_index);
1713 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1716 prefetch(fp->tx_cons_sb);
1717 prefetch(&fp->status_blk->c_status_block.status_block_index);
1719 bnx2x_update_fpsb_idx(fp);
1723 /* Re-enable interrupts */
1724 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1725 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1726 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1727 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1733 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1735 struct bnx2x *bp = netdev_priv(dev_instance);
1736 u16 status = bnx2x_ack_int(bp);
1740 /* Return here if interrupt is shared and it's not for us */
1741 if (unlikely(status == 0)) {
1742 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1745 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1747 /* Return here if interrupt is disabled */
1748 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1749 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1753 #ifdef BNX2X_STOP_ON_ERROR
1754 if (unlikely(bp->panic))
1758 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1759 struct bnx2x_fastpath *fp = &bp->fp[i];
1761 mask = 0x2 << fp->sb_id;
1762 if (status & mask) {
1763 /* Handle Rx or Tx according to SB id */
1764 if (fp->is_rx_queue) {
1765 prefetch(fp->rx_cons_sb);
1766 prefetch(&fp->status_blk->u_status_block.
1767 status_block_index);
1769 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1772 prefetch(fp->tx_cons_sb);
1773 prefetch(&fp->status_blk->c_status_block.
1774 status_block_index);
1776 bnx2x_update_fpsb_idx(fp);
1780 /* Re-enable interrupts */
1781 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1782 le16_to_cpu(fp->fp_u_idx),
1784 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1785 le16_to_cpu(fp->fp_c_idx),
1793 if (unlikely(status & 0x1)) {
1794 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1802 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1808 /* end of fast path */
1810 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1815 * General service functions
1818 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1821 u32 resource_bit = (1 << resource);
1822 int func = BP_FUNC(bp);
1823 u32 hw_lock_control_reg;
1826 /* Validating that the resource is within range */
1827 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1829 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1830 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1835 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1837 hw_lock_control_reg =
1838 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1841 /* Validating that the resource is not already taken */
1842 lock_status = REG_RD(bp, hw_lock_control_reg);
1843 if (lock_status & resource_bit) {
1844 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1845 lock_status, resource_bit);
1849 /* Try for 5 second every 5ms */
1850 for (cnt = 0; cnt < 1000; cnt++) {
1851 /* Try to acquire the lock */
1852 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1853 lock_status = REG_RD(bp, hw_lock_control_reg);
1854 if (lock_status & resource_bit)
1859 DP(NETIF_MSG_HW, "Timeout\n");
1863 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1866 u32 resource_bit = (1 << resource);
1867 int func = BP_FUNC(bp);
1868 u32 hw_lock_control_reg;
1870 /* Validating that the resource is within range */
1871 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1873 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1874 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1879 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1881 hw_lock_control_reg =
1882 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1885 /* Validating that the resource is currently taken */
1886 lock_status = REG_RD(bp, hw_lock_control_reg);
1887 if (!(lock_status & resource_bit)) {
1888 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1889 lock_status, resource_bit);
1893 REG_WR(bp, hw_lock_control_reg, resource_bit);
1897 /* HW Lock for shared dual port PHYs */
1898 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1900 mutex_lock(&bp->port.phy_mutex);
1902 if (bp->port.need_hw_lock)
1903 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1906 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1908 if (bp->port.need_hw_lock)
1909 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1911 mutex_unlock(&bp->port.phy_mutex);
1914 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1916 /* The GPIO should be swapped if swap register is set and active */
1917 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1918 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1919 int gpio_shift = gpio_num +
1920 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1921 u32 gpio_mask = (1 << gpio_shift);
1925 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1926 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1930 /* read GPIO value */
1931 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1933 /* get the requested pin value */
1934 if ((gpio_reg & gpio_mask) == gpio_mask)
1939 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1944 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1946 /* The GPIO should be swapped if swap register is set and active */
1947 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1948 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1949 int gpio_shift = gpio_num +
1950 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1951 u32 gpio_mask = (1 << gpio_shift);
1954 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1955 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1959 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1960 /* read GPIO and mask except the float bits */
1961 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1964 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1965 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1966 gpio_num, gpio_shift);
1967 /* clear FLOAT and set CLR */
1968 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1969 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1972 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1973 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1974 gpio_num, gpio_shift);
1975 /* clear FLOAT and set SET */
1976 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1977 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1980 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1981 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1982 gpio_num, gpio_shift);
1984 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1991 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1992 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1997 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1999 /* The GPIO should be swapped if swap register is set and active */
2000 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2001 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2002 int gpio_shift = gpio_num +
2003 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2004 u32 gpio_mask = (1 << gpio_shift);
2007 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2008 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2012 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2014 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2017 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2018 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2019 "output low\n", gpio_num, gpio_shift);
2020 /* clear SET and set CLR */
2021 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2022 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2025 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2026 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2027 "output high\n", gpio_num, gpio_shift);
2028 /* clear CLR and set SET */
2029 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2030 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2037 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2038 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2043 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2045 u32 spio_mask = (1 << spio_num);
2048 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2049 (spio_num > MISC_REGISTERS_SPIO_7)) {
2050 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2054 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2055 /* read SPIO and mask except the float bits */
2056 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2059 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2060 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2061 /* clear FLOAT and set CLR */
2062 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2063 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2066 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2067 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2068 /* clear FLOAT and set SET */
2069 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2070 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2073 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2074 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2076 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2083 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2084 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2089 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2091 switch (bp->link_vars.ieee_fc &
2092 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2093 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2094 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2098 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2099 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2103 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2104 bp->port.advertising |= ADVERTISED_Asym_Pause;
2108 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2114 static void bnx2x_link_report(struct bnx2x *bp)
2116 if (bp->state == BNX2X_STATE_DISABLED) {
2117 netif_carrier_off(bp->dev);
2118 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2122 if (bp->link_vars.link_up) {
2123 if (bp->state == BNX2X_STATE_OPEN)
2124 netif_carrier_on(bp->dev);
2125 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2127 printk("%d Mbps ", bp->link_vars.line_speed);
2129 if (bp->link_vars.duplex == DUPLEX_FULL)
2130 printk("full duplex");
2132 printk("half duplex");
2134 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2135 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2136 printk(", receive ");
2137 if (bp->link_vars.flow_ctrl &
2139 printk("& transmit ");
2141 printk(", transmit ");
2143 printk("flow control ON");
2147 } else { /* link_down */
2148 netif_carrier_off(bp->dev);
2149 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2153 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2155 if (!BP_NOMCP(bp)) {
2158 /* Initialize link parameters structure variables */
2159 /* It is recommended to turn off RX FC for jumbo frames
2160 for better performance */
2161 if (bp->dev->mtu > 5000)
2162 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2164 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2166 bnx2x_acquire_phy_lock(bp);
2168 if (load_mode == LOAD_DIAG)
2169 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2171 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2173 bnx2x_release_phy_lock(bp);
2175 bnx2x_calc_fc_adv(bp);
2177 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2178 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2179 bnx2x_link_report(bp);
2184 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2188 static void bnx2x_link_set(struct bnx2x *bp)
2190 if (!BP_NOMCP(bp)) {
2191 bnx2x_acquire_phy_lock(bp);
2192 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2193 bnx2x_release_phy_lock(bp);
2195 bnx2x_calc_fc_adv(bp);
2197 BNX2X_ERR("Bootcode is missing - can not set link\n");
2200 static void bnx2x__link_reset(struct bnx2x *bp)
2202 if (!BP_NOMCP(bp)) {
2203 bnx2x_acquire_phy_lock(bp);
2204 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2205 bnx2x_release_phy_lock(bp);
2207 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2210 static u8 bnx2x_link_test(struct bnx2x *bp)
2214 bnx2x_acquire_phy_lock(bp);
2215 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2216 bnx2x_release_phy_lock(bp);
2221 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2223 u32 r_param = bp->link_vars.line_speed / 8;
2224 u32 fair_periodic_timeout_usec;
2227 memset(&(bp->cmng.rs_vars), 0,
2228 sizeof(struct rate_shaping_vars_per_port));
2229 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2231 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2232 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2234 /* this is the threshold below which no timer arming will occur
2235 1.25 coefficient is for the threshold to be a little bigger
2236 than the real time, to compensate for timer in-accuracy */
2237 bp->cmng.rs_vars.rs_threshold =
2238 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2240 /* resolution of fairness timer */
2241 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2242 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2243 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2245 /* this is the threshold below which we won't arm the timer anymore */
2246 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2248 /* we multiply by 1e3/8 to get bytes/msec.
2249 We don't want the credits to pass a credit
2250 of the t_fair*FAIR_MEM (algorithm resolution) */
2251 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2252 /* since each tick is 4 usec */
2253 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2256 /* Calculates the sum of vn_min_rates.
2257 It's needed for further normalizing of the min_rates.
2259 sum of vn_min_rates.
2261 0 - if all the min_rates are 0.
2262 In the later case fainess algorithm should be deactivated.
2263 If not all min_rates are zero then those that are zeroes will be set to 1.
2265 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2268 int port = BP_PORT(bp);
2271 bp->vn_weight_sum = 0;
2272 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2273 int func = 2*vn + port;
2274 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2275 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2276 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2278 /* Skip hidden vns */
2279 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2282 /* If min rate is zero - set it to 1 */
2284 vn_min_rate = DEF_MIN_RATE;
2288 bp->vn_weight_sum += vn_min_rate;
2291 /* ... only if all min rates are zeros - disable fairness */
2293 bp->vn_weight_sum = 0;
2296 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2298 struct rate_shaping_vars_per_vn m_rs_vn;
2299 struct fairness_vars_per_vn m_fair_vn;
2300 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2301 u16 vn_min_rate, vn_max_rate;
2304 /* If function is hidden - set min and max to zeroes */
2305 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2310 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2311 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2312 /* If fairness is enabled (not all min rates are zeroes) and
2313 if current min rate is zero - set it to 1.
2314 This is a requirement of the algorithm. */
2315 if (bp->vn_weight_sum && (vn_min_rate == 0))
2316 vn_min_rate = DEF_MIN_RATE;
2317 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2318 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2322 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2323 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2325 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2326 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2328 /* global vn counter - maximal Mbps for this vn */
2329 m_rs_vn.vn_counter.rate = vn_max_rate;
2331 /* quota - number of bytes transmitted in this period */
2332 m_rs_vn.vn_counter.quota =
2333 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2335 if (bp->vn_weight_sum) {
2336 /* credit for each period of the fairness algorithm:
2337 number of bytes in T_FAIR (the vn share the port rate).
2338 vn_weight_sum should not be larger than 10000, thus
2339 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2341 m_fair_vn.vn_credit_delta =
2342 max((u32)(vn_min_rate * (T_FAIR_COEF /
2343 (8 * bp->vn_weight_sum))),
2344 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2345 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2346 m_fair_vn.vn_credit_delta);
2349 /* Store it to internal memory */
2350 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2351 REG_WR(bp, BAR_XSTRORM_INTMEM +
2352 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2353 ((u32 *)(&m_rs_vn))[i]);
2355 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2356 REG_WR(bp, BAR_XSTRORM_INTMEM +
2357 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2358 ((u32 *)(&m_fair_vn))[i]);
2362 /* This function is called upon link interrupt */
2363 static void bnx2x_link_attn(struct bnx2x *bp)
2365 /* Make sure that we are synced with the current statistics */
2366 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2368 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2370 if (bp->link_vars.link_up) {
2372 /* dropless flow control */
2373 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
2374 int port = BP_PORT(bp);
2375 u32 pause_enabled = 0;
2377 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2380 REG_WR(bp, BAR_USTRORM_INTMEM +
2381 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2385 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2386 struct host_port_stats *pstats;
2388 pstats = bnx2x_sp(bp, port_stats);
2389 /* reset old bmac stats */
2390 memset(&(pstats->mac_stx[0]), 0,
2391 sizeof(struct mac_stx));
2393 if ((bp->state == BNX2X_STATE_OPEN) ||
2394 (bp->state == BNX2X_STATE_DISABLED))
2395 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2398 /* indicate link status */
2399 bnx2x_link_report(bp);
2402 int port = BP_PORT(bp);
2406 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2407 if (vn == BP_E1HVN(bp))
2410 func = ((vn << 1) | port);
2412 /* Set the attention towards other drivers
2414 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2415 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2418 if (bp->link_vars.link_up) {
2421 /* Init rate shaping and fairness contexts */
2422 bnx2x_init_port_minmax(bp);
2424 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2425 bnx2x_init_vn_minmax(bp, 2*vn + port);
2427 /* Store it to internal memory */
2429 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2430 REG_WR(bp, BAR_XSTRORM_INTMEM +
2431 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2432 ((u32 *)(&bp->cmng))[i]);
2437 static void bnx2x__link_status_update(struct bnx2x *bp)
2439 int func = BP_FUNC(bp);
2441 if (bp->state != BNX2X_STATE_OPEN)
2444 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2446 if (bp->link_vars.link_up)
2447 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2449 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2451 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2452 bnx2x_calc_vn_weight_sum(bp);
2454 /* indicate link status */
2455 bnx2x_link_report(bp);
2458 static void bnx2x_pmf_update(struct bnx2x *bp)
2460 int port = BP_PORT(bp);
2464 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2466 /* enable nig attention */
2467 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2468 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2469 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2471 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2479 * General service functions
2482 /* send the MCP a request, block until there is a reply */
2483 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2485 int func = BP_FUNC(bp);
2486 u32 seq = ++bp->fw_seq;
2489 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2491 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2492 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2495 /* let the FW do it's magic ... */
2498 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2500 /* Give the FW up to 2 second (200*10ms) */
2501 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
2503 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2504 cnt*delay, rc, seq);
2506 /* is this a reply to our command? */
2507 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2508 rc &= FW_MSG_CODE_MASK;
2511 BNX2X_ERR("FW failed to respond!\n");
2519 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2520 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set);
2521 static void bnx2x_set_rx_mode(struct net_device *dev);
2523 static void bnx2x_e1h_disable(struct bnx2x *bp)
2525 int port = BP_PORT(bp);
2528 bp->rx_mode = BNX2X_RX_MODE_NONE;
2529 bnx2x_set_storm_rx_mode(bp);
2531 netif_tx_disable(bp->dev);
2532 bp->dev->trans_start = jiffies; /* prevent tx timeout */
2534 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2536 bnx2x_set_mac_addr_e1h(bp, 0);
2538 for (i = 0; i < MC_HASH_SIZE; i++)
2539 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
2541 netif_carrier_off(bp->dev);
2544 static void bnx2x_e1h_enable(struct bnx2x *bp)
2546 int port = BP_PORT(bp);
2548 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2550 bnx2x_set_mac_addr_e1h(bp, 1);
2552 /* Tx queue should be only reenabled */
2553 netif_tx_wake_all_queues(bp->dev);
2555 /* Initialize the receive filter. */
2556 bnx2x_set_rx_mode(bp->dev);
2559 static void bnx2x_update_min_max(struct bnx2x *bp)
2561 int port = BP_PORT(bp);
2564 /* Init rate shaping and fairness contexts */
2565 bnx2x_init_port_minmax(bp);
2567 bnx2x_calc_vn_weight_sum(bp);
2569 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2570 bnx2x_init_vn_minmax(bp, 2*vn + port);
2575 /* Set the attention towards other drivers on the same port */
2576 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2577 if (vn == BP_E1HVN(bp))
2580 func = ((vn << 1) | port);
2581 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2582 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2585 /* Store it to internal memory */
2586 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2587 REG_WR(bp, BAR_XSTRORM_INTMEM +
2588 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2589 ((u32 *)(&bp->cmng))[i]);
2593 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2595 int func = BP_FUNC(bp);
2597 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2598 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2600 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2602 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2603 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2604 bp->state = BNX2X_STATE_DISABLED;
2606 bnx2x_e1h_disable(bp);
2608 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2609 bp->state = BNX2X_STATE_OPEN;
2611 bnx2x_e1h_enable(bp);
2613 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2615 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2617 bnx2x_update_min_max(bp);
2618 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2621 /* Report results to MCP */
2623 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2625 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2628 /* the slow path queue is odd since completions arrive on the fastpath ring */
2629 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2630 u32 data_hi, u32 data_lo, int common)
2632 int func = BP_FUNC(bp);
2634 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2635 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2636 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2637 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2638 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2640 #ifdef BNX2X_STOP_ON_ERROR
2641 if (unlikely(bp->panic))
2645 spin_lock_bh(&bp->spq_lock);
2647 if (!bp->spq_left) {
2648 BNX2X_ERR("BUG! SPQ ring full!\n");
2649 spin_unlock_bh(&bp->spq_lock);
2654 /* CID needs port number to be encoded int it */
2655 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2656 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2658 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2660 bp->spq_prod_bd->hdr.type |=
2661 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2663 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2664 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2668 if (bp->spq_prod_bd == bp->spq_last_bd) {
2669 bp->spq_prod_bd = bp->spq;
2670 bp->spq_prod_idx = 0;
2671 DP(NETIF_MSG_TIMER, "end of spq\n");
2678 /* Make sure that BD data is updated before writing the producer */
2681 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2686 spin_unlock_bh(&bp->spq_lock);
2690 /* acquire split MCP access lock register */
2691 static int bnx2x_acquire_alr(struct bnx2x *bp)
2698 for (j = 0; j < i*10; j++) {
2700 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2701 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2702 if (val & (1L << 31))
2707 if (!(val & (1L << 31))) {
2708 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2715 /* release split MCP access lock register */
2716 static void bnx2x_release_alr(struct bnx2x *bp)
2720 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2723 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2725 struct host_def_status_block *def_sb = bp->def_status_blk;
2728 barrier(); /* status block is written to by the chip */
2729 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2730 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2733 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2734 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2737 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2738 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2741 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2742 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2745 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2746 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2753 * slow path service functions
2756 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2758 int port = BP_PORT(bp);
2759 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2760 COMMAND_REG_ATTN_BITS_SET);
2761 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2762 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2763 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2764 NIG_REG_MASK_INTERRUPT_PORT0;
2768 if (bp->attn_state & asserted)
2769 BNX2X_ERR("IGU ERROR\n");
2771 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2772 aeu_mask = REG_RD(bp, aeu_addr);
2774 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2775 aeu_mask, asserted);
2776 aeu_mask &= ~(asserted & 0xff);
2777 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2779 REG_WR(bp, aeu_addr, aeu_mask);
2780 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2782 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2783 bp->attn_state |= asserted;
2784 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2786 if (asserted & ATTN_HARD_WIRED_MASK) {
2787 if (asserted & ATTN_NIG_FOR_FUNC) {
2789 bnx2x_acquire_phy_lock(bp);
2791 /* save nig interrupt mask */
2792 nig_mask = REG_RD(bp, nig_int_mask_addr);
2793 REG_WR(bp, nig_int_mask_addr, 0);
2795 bnx2x_link_attn(bp);
2797 /* handle unicore attn? */
2799 if (asserted & ATTN_SW_TIMER_4_FUNC)
2800 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2802 if (asserted & GPIO_2_FUNC)
2803 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2805 if (asserted & GPIO_3_FUNC)
2806 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2808 if (asserted & GPIO_4_FUNC)
2809 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2812 if (asserted & ATTN_GENERAL_ATTN_1) {
2813 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2814 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2816 if (asserted & ATTN_GENERAL_ATTN_2) {
2817 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2818 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2820 if (asserted & ATTN_GENERAL_ATTN_3) {
2821 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2822 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2825 if (asserted & ATTN_GENERAL_ATTN_4) {
2826 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2827 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2829 if (asserted & ATTN_GENERAL_ATTN_5) {
2830 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2831 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2833 if (asserted & ATTN_GENERAL_ATTN_6) {
2834 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2835 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2839 } /* if hardwired */
2841 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2843 REG_WR(bp, hc_addr, asserted);
2845 /* now set back the mask */
2846 if (asserted & ATTN_NIG_FOR_FUNC) {
2847 REG_WR(bp, nig_int_mask_addr, nig_mask);
2848 bnx2x_release_phy_lock(bp);
2852 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2854 int port = BP_PORT(bp);
2856 /* mark the failure */
2857 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2858 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2859 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2860 bp->link_params.ext_phy_config);
2862 /* log the failure */
2863 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2864 " the driver to shutdown the card to prevent permanent"
2865 " damage. Please contact Dell Support for assistance\n",
2868 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2870 int port = BP_PORT(bp);
2872 u32 val, swap_val, swap_override;
2874 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2875 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2877 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2879 val = REG_RD(bp, reg_offset);
2880 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2881 REG_WR(bp, reg_offset, val);
2883 BNX2X_ERR("SPIO5 hw attention\n");
2885 /* Fan failure attention */
2886 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2887 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2888 /* Low power mode is controlled by GPIO 2 */
2889 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2890 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2891 /* The PHY reset is controlled by GPIO 1 */
2892 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2893 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2896 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2897 /* The PHY reset is controlled by GPIO 1 */
2898 /* fake the port number to cancel the swap done in
2900 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2901 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2902 port = (swap_val && swap_override) ^ 1;
2903 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2904 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2910 bnx2x_fan_failure(bp);
2913 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2914 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2915 bnx2x_acquire_phy_lock(bp);
2916 bnx2x_handle_module_detect_int(&bp->link_params);
2917 bnx2x_release_phy_lock(bp);
2920 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2922 val = REG_RD(bp, reg_offset);
2923 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2924 REG_WR(bp, reg_offset, val);
2926 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2927 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2932 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2936 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2938 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2939 BNX2X_ERR("DB hw attention 0x%x\n", val);
2940 /* DORQ discard attention */
2942 BNX2X_ERR("FATAL error from DORQ\n");
2945 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2947 int port = BP_PORT(bp);
2950 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2951 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2953 val = REG_RD(bp, reg_offset);
2954 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2955 REG_WR(bp, reg_offset, val);
2957 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2958 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
2963 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2967 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2969 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2970 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2971 /* CFC error attention */
2973 BNX2X_ERR("FATAL error from CFC\n");
2976 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2978 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2979 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2980 /* RQ_USDMDP_FIFO_OVERFLOW */
2982 BNX2X_ERR("FATAL error from PXP\n");
2985 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2987 int port = BP_PORT(bp);
2990 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2991 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2993 val = REG_RD(bp, reg_offset);
2994 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2995 REG_WR(bp, reg_offset, val);
2997 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2998 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3003 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3007 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3009 if (attn & BNX2X_PMF_LINK_ASSERT) {
3010 int func = BP_FUNC(bp);
3012 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3013 val = SHMEM_RD(bp, func_mb[func].drv_status);
3014 if (val & DRV_STATUS_DCC_EVENT_MASK)
3016 (val & DRV_STATUS_DCC_EVENT_MASK));
3017 bnx2x__link_status_update(bp);
3018 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3019 bnx2x_pmf_update(bp);
3021 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3023 BNX2X_ERR("MC assert!\n");
3024 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3025 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3026 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3027 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3030 } else if (attn & BNX2X_MCP_ASSERT) {
3032 BNX2X_ERR("MCP assert!\n");
3033 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3037 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3040 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3041 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3042 if (attn & BNX2X_GRC_TIMEOUT) {
3043 val = CHIP_IS_E1H(bp) ?
3044 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3045 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3047 if (attn & BNX2X_GRC_RSV) {
3048 val = CHIP_IS_E1H(bp) ?
3049 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3050 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3052 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3056 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3058 struct attn_route attn;
3059 struct attn_route group_mask;
3060 int port = BP_PORT(bp);
3066 /* need to take HW lock because MCP or other port might also
3067 try to handle this event */
3068 bnx2x_acquire_alr(bp);
3070 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3071 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3072 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3073 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3074 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3075 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3077 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3078 if (deasserted & (1 << index)) {
3079 group_mask = bp->attn_group[index];
3081 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3082 index, group_mask.sig[0], group_mask.sig[1],
3083 group_mask.sig[2], group_mask.sig[3]);
3085 bnx2x_attn_int_deasserted3(bp,
3086 attn.sig[3] & group_mask.sig[3]);
3087 bnx2x_attn_int_deasserted1(bp,
3088 attn.sig[1] & group_mask.sig[1]);
3089 bnx2x_attn_int_deasserted2(bp,
3090 attn.sig[2] & group_mask.sig[2]);
3091 bnx2x_attn_int_deasserted0(bp,
3092 attn.sig[0] & group_mask.sig[0]);
3094 if ((attn.sig[0] & group_mask.sig[0] &
3095 HW_PRTY_ASSERT_SET_0) ||
3096 (attn.sig[1] & group_mask.sig[1] &
3097 HW_PRTY_ASSERT_SET_1) ||
3098 (attn.sig[2] & group_mask.sig[2] &
3099 HW_PRTY_ASSERT_SET_2))
3100 BNX2X_ERR("FATAL HW block parity attention\n");
3104 bnx2x_release_alr(bp);
3106 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3109 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3111 REG_WR(bp, reg_addr, val);
3113 if (~bp->attn_state & deasserted)
3114 BNX2X_ERR("IGU ERROR\n");
3116 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3117 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3119 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3120 aeu_mask = REG_RD(bp, reg_addr);
3122 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3123 aeu_mask, deasserted);
3124 aeu_mask |= (deasserted & 0xff);
3125 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3127 REG_WR(bp, reg_addr, aeu_mask);
3128 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3130 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3131 bp->attn_state &= ~deasserted;
3132 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3135 static void bnx2x_attn_int(struct bnx2x *bp)
3137 /* read local copy of bits */
3138 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3140 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3142 u32 attn_state = bp->attn_state;
3144 /* look for changed bits */
3145 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3146 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3149 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3150 attn_bits, attn_ack, asserted, deasserted);
3152 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3153 BNX2X_ERR("BAD attention state\n");
3155 /* handle bits that were raised */
3157 bnx2x_attn_int_asserted(bp, asserted);
3160 bnx2x_attn_int_deasserted(bp, deasserted);
3163 static void bnx2x_sp_task(struct work_struct *work)
3165 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3169 /* Return here if interrupt is disabled */
3170 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3171 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3175 status = bnx2x_update_dsb_idx(bp);
3176 /* if (status == 0) */
3177 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
3179 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
3185 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3187 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3189 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3191 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3193 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3198 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3200 struct net_device *dev = dev_instance;
3201 struct bnx2x *bp = netdev_priv(dev);
3203 /* Return here if interrupt is disabled */
3204 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3205 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3209 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3211 #ifdef BNX2X_STOP_ON_ERROR
3212 if (unlikely(bp->panic))
3216 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3221 /* end of slow path */
3225 /****************************************************************************
3227 ****************************************************************************/
3229 /* sum[hi:lo] += add[hi:lo] */
3230 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3233 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3236 /* difference = minuend - subtrahend */
3237 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3239 if (m_lo < s_lo) { \
3241 d_hi = m_hi - s_hi; \
3243 /* we can 'loan' 1 */ \
3245 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3247 /* m_hi <= s_hi */ \
3252 /* m_lo >= s_lo */ \
3253 if (m_hi < s_hi) { \
3257 /* m_hi >= s_hi */ \
3258 d_hi = m_hi - s_hi; \
3259 d_lo = m_lo - s_lo; \
3264 #define UPDATE_STAT64(s, t) \
3266 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3267 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3268 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3269 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3270 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3271 pstats->mac_stx[1].t##_lo, diff.lo); \
3274 #define UPDATE_STAT64_NIG(s, t) \
3276 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3277 diff.lo, new->s##_lo, old->s##_lo); \
3278 ADD_64(estats->t##_hi, diff.hi, \
3279 estats->t##_lo, diff.lo); \
3282 /* sum[hi:lo] += add */
3283 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3286 s_hi += (s_lo < a) ? 1 : 0; \
3289 #define UPDATE_EXTEND_STAT(s) \
3291 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3292 pstats->mac_stx[1].s##_lo, \
3296 #define UPDATE_EXTEND_TSTAT(s, t) \
3298 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3299 old_tclient->s = tclient->s; \
3300 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3303 #define UPDATE_EXTEND_USTAT(s, t) \
3305 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3306 old_uclient->s = uclient->s; \
3307 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3310 #define UPDATE_EXTEND_XSTAT(s, t) \
3312 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3313 old_xclient->s = xclient->s; \
3314 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3317 /* minuend -= subtrahend */
3318 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3320 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3323 /* minuend[hi:lo] -= subtrahend */
3324 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3326 SUB_64(m_hi, 0, m_lo, s); \
3329 #define SUB_EXTEND_USTAT(s, t) \
3331 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3332 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3336 * General service functions
3339 static inline long bnx2x_hilo(u32 *hiref)
3341 u32 lo = *(hiref + 1);
3342 #if (BITS_PER_LONG == 64)
3345 return HILO_U64(hi, lo);
3352 * Init service functions
3355 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3357 if (!bp->stats_pending) {
3358 struct eth_query_ramrod_data ramrod_data = {0};
3361 ramrod_data.drv_counter = bp->stats_counter++;
3362 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3363 for_each_queue(bp, i)
3364 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3366 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3367 ((u32 *)&ramrod_data)[1],
3368 ((u32 *)&ramrod_data)[0], 0);
3370 /* stats ramrod has it's own slot on the spq */
3372 bp->stats_pending = 1;
3377 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3379 struct dmae_command *dmae = &bp->stats_dmae;
3380 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3382 *stats_comp = DMAE_COMP_VAL;
3383 if (CHIP_REV_IS_SLOW(bp))
3387 if (bp->executer_idx) {
3388 int loader_idx = PMF_DMAE_C(bp);
3390 memset(dmae, 0, sizeof(struct dmae_command));
3392 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3393 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3394 DMAE_CMD_DST_RESET |
3396 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3398 DMAE_CMD_ENDIANITY_DW_SWAP |
3400 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3402 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3403 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3404 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3405 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3406 sizeof(struct dmae_command) *
3407 (loader_idx + 1)) >> 2;
3408 dmae->dst_addr_hi = 0;
3409 dmae->len = sizeof(struct dmae_command) >> 2;
3412 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3413 dmae->comp_addr_hi = 0;
3417 bnx2x_post_dmae(bp, dmae, loader_idx);
3419 } else if (bp->func_stx) {
3421 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3425 static int bnx2x_stats_comp(struct bnx2x *bp)
3427 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3431 while (*stats_comp != DMAE_COMP_VAL) {
3433 BNX2X_ERR("timeout waiting for stats finished\n");
3443 * Statistics service functions
3446 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3448 struct dmae_command *dmae;
3450 int loader_idx = PMF_DMAE_C(bp);
3451 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3454 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3455 BNX2X_ERR("BUG!\n");
3459 bp->executer_idx = 0;
3461 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3463 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3465 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3467 DMAE_CMD_ENDIANITY_DW_SWAP |
3469 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3470 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3472 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3473 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3474 dmae->src_addr_lo = bp->port.port_stx >> 2;
3475 dmae->src_addr_hi = 0;
3476 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3477 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3478 dmae->len = DMAE_LEN32_RD_MAX;
3479 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3480 dmae->comp_addr_hi = 0;
3483 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3484 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3485 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3486 dmae->src_addr_hi = 0;
3487 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3488 DMAE_LEN32_RD_MAX * 4);
3489 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3490 DMAE_LEN32_RD_MAX * 4);
3491 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3492 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3493 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3494 dmae->comp_val = DMAE_COMP_VAL;
3497 bnx2x_hw_stats_post(bp);
3498 bnx2x_stats_comp(bp);
3501 static void bnx2x_port_stats_init(struct bnx2x *bp)
3503 struct dmae_command *dmae;
3504 int port = BP_PORT(bp);
3505 int vn = BP_E1HVN(bp);
3507 int loader_idx = PMF_DMAE_C(bp);
3509 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3512 if (!bp->link_vars.link_up || !bp->port.pmf) {
3513 BNX2X_ERR("BUG!\n");
3517 bp->executer_idx = 0;
3520 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3521 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3522 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3524 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3526 DMAE_CMD_ENDIANITY_DW_SWAP |
3528 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3529 (vn << DMAE_CMD_E1HVN_SHIFT));
3531 if (bp->port.port_stx) {
3533 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3534 dmae->opcode = opcode;
3535 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3536 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3537 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3538 dmae->dst_addr_hi = 0;
3539 dmae->len = sizeof(struct host_port_stats) >> 2;
3540 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3541 dmae->comp_addr_hi = 0;
3547 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3548 dmae->opcode = opcode;
3549 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3550 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3551 dmae->dst_addr_lo = bp->func_stx >> 2;
3552 dmae->dst_addr_hi = 0;
3553 dmae->len = sizeof(struct host_func_stats) >> 2;
3554 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3555 dmae->comp_addr_hi = 0;
3560 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3561 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3562 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3564 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3566 DMAE_CMD_ENDIANITY_DW_SWAP |
3568 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3569 (vn << DMAE_CMD_E1HVN_SHIFT));
3571 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3573 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3574 NIG_REG_INGRESS_BMAC0_MEM);
3576 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3577 BIGMAC_REGISTER_TX_STAT_GTBYT */
3578 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3579 dmae->opcode = opcode;
3580 dmae->src_addr_lo = (mac_addr +
3581 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3582 dmae->src_addr_hi = 0;
3583 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3584 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3585 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3586 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3587 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3588 dmae->comp_addr_hi = 0;
3591 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3592 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3593 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3594 dmae->opcode = opcode;
3595 dmae->src_addr_lo = (mac_addr +
3596 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3597 dmae->src_addr_hi = 0;
3598 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3599 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3600 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3601 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3602 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3603 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3604 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3605 dmae->comp_addr_hi = 0;
3608 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3610 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3612 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3613 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3614 dmae->opcode = opcode;
3615 dmae->src_addr_lo = (mac_addr +
3616 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3617 dmae->src_addr_hi = 0;
3618 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3619 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3620 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3621 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3622 dmae->comp_addr_hi = 0;
3625 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3626 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3627 dmae->opcode = opcode;
3628 dmae->src_addr_lo = (mac_addr +
3629 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3630 dmae->src_addr_hi = 0;
3631 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3632 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3633 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3634 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3636 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3637 dmae->comp_addr_hi = 0;
3640 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3641 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3642 dmae->opcode = opcode;
3643 dmae->src_addr_lo = (mac_addr +
3644 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3645 dmae->src_addr_hi = 0;
3646 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3647 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3648 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3649 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3650 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3651 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3652 dmae->comp_addr_hi = 0;
3657 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3658 dmae->opcode = opcode;
3659 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3660 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3661 dmae->src_addr_hi = 0;
3662 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3663 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3664 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3665 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3666 dmae->comp_addr_hi = 0;
3669 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3670 dmae->opcode = opcode;
3671 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3672 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3673 dmae->src_addr_hi = 0;
3674 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3675 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3676 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3677 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3678 dmae->len = (2*sizeof(u32)) >> 2;
3679 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3680 dmae->comp_addr_hi = 0;
3683 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3684 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3685 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3686 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3688 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3690 DMAE_CMD_ENDIANITY_DW_SWAP |
3692 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3693 (vn << DMAE_CMD_E1HVN_SHIFT));
3694 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3695 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3696 dmae->src_addr_hi = 0;
3697 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3698 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3699 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3700 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3701 dmae->len = (2*sizeof(u32)) >> 2;
3702 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3703 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3704 dmae->comp_val = DMAE_COMP_VAL;
3709 static void bnx2x_func_stats_init(struct bnx2x *bp)
3711 struct dmae_command *dmae = &bp->stats_dmae;
3712 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3715 if (!bp->func_stx) {
3716 BNX2X_ERR("BUG!\n");
3720 bp->executer_idx = 0;
3721 memset(dmae, 0, sizeof(struct dmae_command));
3723 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3724 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3725 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3727 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3729 DMAE_CMD_ENDIANITY_DW_SWAP |
3731 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3732 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3733 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3734 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3735 dmae->dst_addr_lo = bp->func_stx >> 2;
3736 dmae->dst_addr_hi = 0;
3737 dmae->len = sizeof(struct host_func_stats) >> 2;
3738 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3739 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3740 dmae->comp_val = DMAE_COMP_VAL;
3745 static void bnx2x_stats_start(struct bnx2x *bp)
3748 bnx2x_port_stats_init(bp);
3750 else if (bp->func_stx)
3751 bnx2x_func_stats_init(bp);
3753 bnx2x_hw_stats_post(bp);
3754 bnx2x_storm_stats_post(bp);
3757 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3759 bnx2x_stats_comp(bp);
3760 bnx2x_stats_pmf_update(bp);
3761 bnx2x_stats_start(bp);
3764 static void bnx2x_stats_restart(struct bnx2x *bp)
3766 bnx2x_stats_comp(bp);
3767 bnx2x_stats_start(bp);
3770 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3772 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3773 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3774 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3780 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3781 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3782 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3783 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3784 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3785 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3786 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3787 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3788 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3789 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3790 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3791 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3792 UPDATE_STAT64(tx_stat_gt127,
3793 tx_stat_etherstatspkts65octetsto127octets);
3794 UPDATE_STAT64(tx_stat_gt255,
3795 tx_stat_etherstatspkts128octetsto255octets);
3796 UPDATE_STAT64(tx_stat_gt511,
3797 tx_stat_etherstatspkts256octetsto511octets);
3798 UPDATE_STAT64(tx_stat_gt1023,
3799 tx_stat_etherstatspkts512octetsto1023octets);
3800 UPDATE_STAT64(tx_stat_gt1518,
3801 tx_stat_etherstatspkts1024octetsto1522octets);
3802 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3803 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3804 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3805 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3806 UPDATE_STAT64(tx_stat_gterr,
3807 tx_stat_dot3statsinternalmactransmiterrors);
3808 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3810 estats->pause_frames_received_hi =
3811 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3812 estats->pause_frames_received_lo =
3813 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3815 estats->pause_frames_sent_hi =
3816 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3817 estats->pause_frames_sent_lo =
3818 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3821 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3823 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3824 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3825 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3827 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3828 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3829 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3830 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3831 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3832 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3833 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3834 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3835 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3836 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3837 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3838 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3839 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3840 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3841 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3842 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3843 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3844 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3845 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3846 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3847 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3848 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3849 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3850 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3851 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3852 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3853 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3854 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3855 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3856 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3857 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3859 estats->pause_frames_received_hi =
3860 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3861 estats->pause_frames_received_lo =
3862 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3863 ADD_64(estats->pause_frames_received_hi,
3864 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3865 estats->pause_frames_received_lo,
3866 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3868 estats->pause_frames_sent_hi =
3869 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3870 estats->pause_frames_sent_lo =
3871 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3872 ADD_64(estats->pause_frames_sent_hi,
3873 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3874 estats->pause_frames_sent_lo,
3875 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3878 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3880 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3881 struct nig_stats *old = &(bp->port.old_nig_stats);
3882 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3883 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3890 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3891 bnx2x_bmac_stats_update(bp);
3893 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3894 bnx2x_emac_stats_update(bp);
3896 else { /* unreached */
3897 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3901 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3902 new->brb_discard - old->brb_discard);
3903 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3904 new->brb_truncate - old->brb_truncate);
3906 UPDATE_STAT64_NIG(egress_mac_pkt0,
3907 etherstatspkts1024octetsto1522octets);
3908 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3910 memcpy(old, new, sizeof(struct nig_stats));
3912 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3913 sizeof(struct mac_stx));
3914 estats->brb_drop_hi = pstats->brb_drop_hi;
3915 estats->brb_drop_lo = pstats->brb_drop_lo;
3917 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3919 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3920 if (nig_timer_max != estats->nig_timer_max) {
3921 estats->nig_timer_max = nig_timer_max;
3922 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3928 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3930 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3931 struct tstorm_per_port_stats *tport =
3932 &stats->tstorm_common.port_statistics;
3933 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3934 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3937 memcpy(&(fstats->total_bytes_received_hi),
3938 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
3939 sizeof(struct host_func_stats) - 2*sizeof(u32));
3940 estats->error_bytes_received_hi = 0;
3941 estats->error_bytes_received_lo = 0;
3942 estats->etherstatsoverrsizepkts_hi = 0;
3943 estats->etherstatsoverrsizepkts_lo = 0;
3944 estats->no_buff_discard_hi = 0;
3945 estats->no_buff_discard_lo = 0;
3947 for_each_rx_queue(bp, i) {
3948 struct bnx2x_fastpath *fp = &bp->fp[i];
3949 int cl_id = fp->cl_id;
3950 struct tstorm_per_client_stats *tclient =
3951 &stats->tstorm_common.client_statistics[cl_id];
3952 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3953 struct ustorm_per_client_stats *uclient =
3954 &stats->ustorm_common.client_statistics[cl_id];
3955 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3956 struct xstorm_per_client_stats *xclient =
3957 &stats->xstorm_common.client_statistics[cl_id];
3958 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3959 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3962 /* are storm stats valid? */
3963 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3964 bp->stats_counter) {
3965 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3966 " xstorm counter (%d) != stats_counter (%d)\n",
3967 i, xclient->stats_counter, bp->stats_counter);
3970 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3971 bp->stats_counter) {
3972 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3973 " tstorm counter (%d) != stats_counter (%d)\n",
3974 i, tclient->stats_counter, bp->stats_counter);
3977 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3978 bp->stats_counter) {
3979 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3980 " ustorm counter (%d) != stats_counter (%d)\n",
3981 i, uclient->stats_counter, bp->stats_counter);
3985 qstats->total_bytes_received_hi =
3986 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3987 qstats->total_bytes_received_lo =
3988 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3990 ADD_64(qstats->total_bytes_received_hi,
3991 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
3992 qstats->total_bytes_received_lo,
3993 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
3995 ADD_64(qstats->total_bytes_received_hi,
3996 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
3997 qstats->total_bytes_received_lo,
3998 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4000 qstats->valid_bytes_received_hi =
4001 qstats->total_bytes_received_hi;
4002 qstats->valid_bytes_received_lo =
4003 qstats->total_bytes_received_lo;
4005 qstats->error_bytes_received_hi =
4006 le32_to_cpu(tclient->rcv_error_bytes.hi);
4007 qstats->error_bytes_received_lo =
4008 le32_to_cpu(tclient->rcv_error_bytes.lo);
4010 ADD_64(qstats->total_bytes_received_hi,
4011 qstats->error_bytes_received_hi,
4012 qstats->total_bytes_received_lo,
4013 qstats->error_bytes_received_lo);
4015 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4016 total_unicast_packets_received);
4017 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4018 total_multicast_packets_received);
4019 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4020 total_broadcast_packets_received);
4021 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4022 etherstatsoverrsizepkts);
4023 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4025 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4026 total_unicast_packets_received);
4027 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4028 total_multicast_packets_received);
4029 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4030 total_broadcast_packets_received);
4031 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4032 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4033 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4035 qstats->total_bytes_transmitted_hi =
4036 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4037 qstats->total_bytes_transmitted_lo =
4038 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4040 ADD_64(qstats->total_bytes_transmitted_hi,
4041 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4042 qstats->total_bytes_transmitted_lo,
4043 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4045 ADD_64(qstats->total_bytes_transmitted_hi,
4046 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4047 qstats->total_bytes_transmitted_lo,
4048 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4050 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4051 total_unicast_packets_transmitted);
4052 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4053 total_multicast_packets_transmitted);
4054 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4055 total_broadcast_packets_transmitted);
4057 old_tclient->checksum_discard = tclient->checksum_discard;
4058 old_tclient->ttl0_discard = tclient->ttl0_discard;
4060 ADD_64(fstats->total_bytes_received_hi,
4061 qstats->total_bytes_received_hi,
4062 fstats->total_bytes_received_lo,
4063 qstats->total_bytes_received_lo);
4064 ADD_64(fstats->total_bytes_transmitted_hi,
4065 qstats->total_bytes_transmitted_hi,
4066 fstats->total_bytes_transmitted_lo,
4067 qstats->total_bytes_transmitted_lo);
4068 ADD_64(fstats->total_unicast_packets_received_hi,
4069 qstats->total_unicast_packets_received_hi,
4070 fstats->total_unicast_packets_received_lo,
4071 qstats->total_unicast_packets_received_lo);
4072 ADD_64(fstats->total_multicast_packets_received_hi,
4073 qstats->total_multicast_packets_received_hi,
4074 fstats->total_multicast_packets_received_lo,
4075 qstats->total_multicast_packets_received_lo);
4076 ADD_64(fstats->total_broadcast_packets_received_hi,
4077 qstats->total_broadcast_packets_received_hi,
4078 fstats->total_broadcast_packets_received_lo,
4079 qstats->total_broadcast_packets_received_lo);
4080 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4081 qstats->total_unicast_packets_transmitted_hi,
4082 fstats->total_unicast_packets_transmitted_lo,
4083 qstats->total_unicast_packets_transmitted_lo);
4084 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4085 qstats->total_multicast_packets_transmitted_hi,
4086 fstats->total_multicast_packets_transmitted_lo,
4087 qstats->total_multicast_packets_transmitted_lo);
4088 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4089 qstats->total_broadcast_packets_transmitted_hi,
4090 fstats->total_broadcast_packets_transmitted_lo,
4091 qstats->total_broadcast_packets_transmitted_lo);
4092 ADD_64(fstats->valid_bytes_received_hi,
4093 qstats->valid_bytes_received_hi,
4094 fstats->valid_bytes_received_lo,
4095 qstats->valid_bytes_received_lo);
4097 ADD_64(estats->error_bytes_received_hi,
4098 qstats->error_bytes_received_hi,
4099 estats->error_bytes_received_lo,
4100 qstats->error_bytes_received_lo);
4101 ADD_64(estats->etherstatsoverrsizepkts_hi,
4102 qstats->etherstatsoverrsizepkts_hi,
4103 estats->etherstatsoverrsizepkts_lo,
4104 qstats->etherstatsoverrsizepkts_lo);
4105 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4106 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4109 ADD_64(fstats->total_bytes_received_hi,
4110 estats->rx_stat_ifhcinbadoctets_hi,
4111 fstats->total_bytes_received_lo,
4112 estats->rx_stat_ifhcinbadoctets_lo);
4114 memcpy(estats, &(fstats->total_bytes_received_hi),
4115 sizeof(struct host_func_stats) - 2*sizeof(u32));
4117 ADD_64(estats->etherstatsoverrsizepkts_hi,
4118 estats->rx_stat_dot3statsframestoolong_hi,
4119 estats->etherstatsoverrsizepkts_lo,
4120 estats->rx_stat_dot3statsframestoolong_lo);
4121 ADD_64(estats->error_bytes_received_hi,
4122 estats->rx_stat_ifhcinbadoctets_hi,
4123 estats->error_bytes_received_lo,
4124 estats->rx_stat_ifhcinbadoctets_lo);
4127 estats->mac_filter_discard =
4128 le32_to_cpu(tport->mac_filter_discard);
4129 estats->xxoverflow_discard =
4130 le32_to_cpu(tport->xxoverflow_discard);
4131 estats->brb_truncate_discard =
4132 le32_to_cpu(tport->brb_truncate_discard);
4133 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4136 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4138 bp->stats_pending = 0;
4143 static void bnx2x_net_stats_update(struct bnx2x *bp)
4145 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4146 struct net_device_stats *nstats = &bp->dev->stats;
4149 nstats->rx_packets =
4150 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4151 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4152 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4154 nstats->tx_packets =
4155 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4156 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4157 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4159 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4161 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4163 nstats->rx_dropped = estats->mac_discard;
4164 for_each_rx_queue(bp, i)
4165 nstats->rx_dropped +=
4166 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4168 nstats->tx_dropped = 0;
4171 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4173 nstats->collisions =
4174 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4176 nstats->rx_length_errors =
4177 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4178 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4179 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4180 bnx2x_hilo(&estats->brb_truncate_hi);
4181 nstats->rx_crc_errors =
4182 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4183 nstats->rx_frame_errors =
4184 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4185 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4186 nstats->rx_missed_errors = estats->xxoverflow_discard;
4188 nstats->rx_errors = nstats->rx_length_errors +
4189 nstats->rx_over_errors +
4190 nstats->rx_crc_errors +
4191 nstats->rx_frame_errors +
4192 nstats->rx_fifo_errors +
4193 nstats->rx_missed_errors;
4195 nstats->tx_aborted_errors =
4196 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4197 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4198 nstats->tx_carrier_errors =
4199 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4200 nstats->tx_fifo_errors = 0;
4201 nstats->tx_heartbeat_errors = 0;
4202 nstats->tx_window_errors = 0;
4204 nstats->tx_errors = nstats->tx_aborted_errors +
4205 nstats->tx_carrier_errors +
4206 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4209 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4211 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4214 estats->driver_xoff = 0;
4215 estats->rx_err_discard_pkt = 0;
4216 estats->rx_skb_alloc_failed = 0;
4217 estats->hw_csum_err = 0;
4218 for_each_rx_queue(bp, i) {
4219 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4221 estats->driver_xoff += qstats->driver_xoff;
4222 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4223 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4224 estats->hw_csum_err += qstats->hw_csum_err;
4228 static void bnx2x_stats_update(struct bnx2x *bp)
4230 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4232 if (*stats_comp != DMAE_COMP_VAL)
4236 bnx2x_hw_stats_update(bp);
4238 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4239 BNX2X_ERR("storm stats were not updated for 3 times\n");
4244 bnx2x_net_stats_update(bp);
4245 bnx2x_drv_stats_update(bp);
4247 if (bp->msglevel & NETIF_MSG_TIMER) {
4248 struct bnx2x_fastpath *fp0_rx = bp->fp;
4249 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
4250 struct tstorm_per_client_stats *old_tclient =
4251 &bp->fp->old_tclient;
4252 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4253 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4254 struct net_device_stats *nstats = &bp->dev->stats;
4257 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4258 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4260 bnx2x_tx_avail(fp0_tx),
4261 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
4262 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4264 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4265 fp0_rx->rx_comp_cons),
4266 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
4267 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4268 "brb truncate %u\n",
4269 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4270 qstats->driver_xoff,
4271 estats->brb_drop_lo, estats->brb_truncate_lo);
4272 printk(KERN_DEBUG "tstats: checksum_discard %u "
4273 "packets_too_big_discard %lu no_buff_discard %lu "
4274 "mac_discard %u mac_filter_discard %u "
4275 "xxovrflow_discard %u brb_truncate_discard %u "
4276 "ttl0_discard %u\n",
4277 le32_to_cpu(old_tclient->checksum_discard),
4278 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4279 bnx2x_hilo(&qstats->no_buff_discard_hi),
4280 estats->mac_discard, estats->mac_filter_discard,
4281 estats->xxoverflow_discard, estats->brb_truncate_discard,
4282 le32_to_cpu(old_tclient->ttl0_discard));
4284 for_each_queue(bp, i) {
4285 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4286 bnx2x_fp(bp, i, tx_pkt),
4287 bnx2x_fp(bp, i, rx_pkt),
4288 bnx2x_fp(bp, i, rx_calls));
4292 bnx2x_hw_stats_post(bp);
4293 bnx2x_storm_stats_post(bp);
4296 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4298 struct dmae_command *dmae;
4300 int loader_idx = PMF_DMAE_C(bp);
4301 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4303 bp->executer_idx = 0;
4305 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4307 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4309 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4311 DMAE_CMD_ENDIANITY_DW_SWAP |
4313 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4314 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4316 if (bp->port.port_stx) {
4318 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4320 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4322 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4323 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4324 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4325 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4326 dmae->dst_addr_hi = 0;
4327 dmae->len = sizeof(struct host_port_stats) >> 2;
4329 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4330 dmae->comp_addr_hi = 0;
4333 dmae->comp_addr_lo =
4334 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4335 dmae->comp_addr_hi =
4336 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4337 dmae->comp_val = DMAE_COMP_VAL;
4345 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4346 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4347 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4348 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4349 dmae->dst_addr_lo = bp->func_stx >> 2;
4350 dmae->dst_addr_hi = 0;
4351 dmae->len = sizeof(struct host_func_stats) >> 2;
4352 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4353 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4354 dmae->comp_val = DMAE_COMP_VAL;
4360 static void bnx2x_stats_stop(struct bnx2x *bp)
4364 bnx2x_stats_comp(bp);
4367 update = (bnx2x_hw_stats_update(bp) == 0);
4369 update |= (bnx2x_storm_stats_update(bp) == 0);
4372 bnx2x_net_stats_update(bp);
4375 bnx2x_port_stats_stop(bp);
4377 bnx2x_hw_stats_post(bp);
4378 bnx2x_stats_comp(bp);
4382 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4386 static const struct {
4387 void (*action)(struct bnx2x *bp);
4388 enum bnx2x_stats_state next_state;
4389 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4392 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4393 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4394 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4395 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4398 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4399 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4400 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4401 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4405 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4407 enum bnx2x_stats_state state = bp->stats_state;
4409 bnx2x_stats_stm[state][event].action(bp);
4410 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4412 /* Make sure the state has been "changed" */
4415 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4416 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4417 state, event, bp->stats_state);
4420 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4422 struct dmae_command *dmae;
4423 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4426 if (!bp->port.pmf || !bp->port.port_stx) {
4427 BNX2X_ERR("BUG!\n");
4431 bp->executer_idx = 0;
4433 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4434 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4435 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4436 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4438 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4440 DMAE_CMD_ENDIANITY_DW_SWAP |
4442 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4443 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4444 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4445 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4446 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4447 dmae->dst_addr_hi = 0;
4448 dmae->len = sizeof(struct host_port_stats) >> 2;
4449 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4450 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4451 dmae->comp_val = DMAE_COMP_VAL;
4454 bnx2x_hw_stats_post(bp);
4455 bnx2x_stats_comp(bp);
4458 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4460 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4461 int port = BP_PORT(bp);
4466 if (!bp->port.pmf || !bp->func_stx) {
4467 BNX2X_ERR("BUG!\n");
4471 /* save our func_stx */
4472 func_stx = bp->func_stx;
4474 for (vn = VN_0; vn < vn_max; vn++) {
4477 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4478 bnx2x_func_stats_init(bp);
4479 bnx2x_hw_stats_post(bp);
4480 bnx2x_stats_comp(bp);
4483 /* restore our func_stx */
4484 bp->func_stx = func_stx;
4487 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4489 struct dmae_command *dmae = &bp->stats_dmae;
4490 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4493 if (!bp->func_stx) {
4494 BNX2X_ERR("BUG!\n");
4498 bp->executer_idx = 0;
4499 memset(dmae, 0, sizeof(struct dmae_command));
4501 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4502 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4503 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4505 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4507 DMAE_CMD_ENDIANITY_DW_SWAP |
4509 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4510 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4511 dmae->src_addr_lo = bp->func_stx >> 2;
4512 dmae->src_addr_hi = 0;
4513 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4514 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4515 dmae->len = sizeof(struct host_func_stats) >> 2;
4516 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4517 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4518 dmae->comp_val = DMAE_COMP_VAL;
4521 bnx2x_hw_stats_post(bp);
4522 bnx2x_stats_comp(bp);
4525 static void bnx2x_stats_init(struct bnx2x *bp)
4527 int port = BP_PORT(bp);
4528 int func = BP_FUNC(bp);
4531 bp->stats_pending = 0;
4532 bp->executer_idx = 0;
4533 bp->stats_counter = 0;
4535 /* port and func stats for management */
4536 if (!BP_NOMCP(bp)) {
4537 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4538 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4541 bp->port.port_stx = 0;
4544 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4545 bp->port.port_stx, bp->func_stx);
4548 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4549 bp->port.old_nig_stats.brb_discard =
4550 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4551 bp->port.old_nig_stats.brb_truncate =
4552 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4553 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4554 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4555 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4556 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4558 /* function stats */
4559 for_each_queue(bp, i) {
4560 struct bnx2x_fastpath *fp = &bp->fp[i];
4562 memset(&fp->old_tclient, 0,
4563 sizeof(struct tstorm_per_client_stats));
4564 memset(&fp->old_uclient, 0,
4565 sizeof(struct ustorm_per_client_stats));
4566 memset(&fp->old_xclient, 0,
4567 sizeof(struct xstorm_per_client_stats));
4568 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4571 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4572 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4574 bp->stats_state = STATS_STATE_DISABLED;
4577 if (bp->port.port_stx)
4578 bnx2x_port_stats_base_init(bp);
4581 bnx2x_func_stats_base_init(bp);
4583 } else if (bp->func_stx)
4584 bnx2x_func_stats_base_update(bp);
4587 static void bnx2x_timer(unsigned long data)
4589 struct bnx2x *bp = (struct bnx2x *) data;
4591 if (!netif_running(bp->dev))
4594 if (atomic_read(&bp->intr_sem) != 0)
4598 struct bnx2x_fastpath *fp = &bp->fp[0];
4602 rc = bnx2x_rx_int(fp, 1000);
4605 if (!BP_NOMCP(bp)) {
4606 int func = BP_FUNC(bp);
4610 ++bp->fw_drv_pulse_wr_seq;
4611 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4612 /* TBD - add SYSTEM_TIME */
4613 drv_pulse = bp->fw_drv_pulse_wr_seq;
4614 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4616 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4617 MCP_PULSE_SEQ_MASK);
4618 /* The delta between driver pulse and mcp response
4619 * should be 1 (before mcp response) or 0 (after mcp response)
4621 if ((drv_pulse != mcp_pulse) &&
4622 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4623 /* someone lost a heartbeat... */
4624 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4625 drv_pulse, mcp_pulse);
4629 if ((bp->state == BNX2X_STATE_OPEN) ||
4630 (bp->state == BNX2X_STATE_DISABLED))
4631 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4634 mod_timer(&bp->timer, jiffies + bp->current_interval);
4637 /* end of Statistics */
4642 * nic init service functions
4645 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4647 int port = BP_PORT(bp);
4650 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4651 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4652 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4653 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4654 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4655 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
4658 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4659 dma_addr_t mapping, int sb_id)
4661 int port = BP_PORT(bp);
4662 int func = BP_FUNC(bp);
4667 section = ((u64)mapping) + offsetof(struct host_status_block,
4669 sb->u_status_block.status_block_id = sb_id;
4671 REG_WR(bp, BAR_CSTRORM_INTMEM +
4672 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4673 REG_WR(bp, BAR_CSTRORM_INTMEM +
4674 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
4676 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4677 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
4679 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4680 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4681 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
4684 section = ((u64)mapping) + offsetof(struct host_status_block,
4686 sb->c_status_block.status_block_id = sb_id;
4688 REG_WR(bp, BAR_CSTRORM_INTMEM +
4689 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
4690 REG_WR(bp, BAR_CSTRORM_INTMEM +
4691 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
4693 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4694 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
4696 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4697 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4698 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
4700 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4703 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4705 int func = BP_FUNC(bp);
4707 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
4708 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4709 sizeof(struct tstorm_def_status_block)/4);
4710 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4711 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4712 sizeof(struct cstorm_def_status_block_u)/4);
4713 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4714 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4715 sizeof(struct cstorm_def_status_block_c)/4);
4716 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
4717 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4718 sizeof(struct xstorm_def_status_block)/4);
4721 static void bnx2x_init_def_sb(struct bnx2x *bp,
4722 struct host_def_status_block *def_sb,
4723 dma_addr_t mapping, int sb_id)
4725 int port = BP_PORT(bp);
4726 int func = BP_FUNC(bp);
4727 int index, val, reg_offset;
4731 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4732 atten_status_block);
4733 def_sb->atten_status_block.status_block_id = sb_id;
4737 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4738 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4740 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4741 bp->attn_group[index].sig[0] = REG_RD(bp,
4742 reg_offset + 0x10*index);
4743 bp->attn_group[index].sig[1] = REG_RD(bp,
4744 reg_offset + 0x4 + 0x10*index);
4745 bp->attn_group[index].sig[2] = REG_RD(bp,
4746 reg_offset + 0x8 + 0x10*index);
4747 bp->attn_group[index].sig[3] = REG_RD(bp,
4748 reg_offset + 0xc + 0x10*index);
4751 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4752 HC_REG_ATTN_MSG0_ADDR_L);
4754 REG_WR(bp, reg_offset, U64_LO(section));
4755 REG_WR(bp, reg_offset + 4, U64_HI(section));
4757 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4759 val = REG_RD(bp, reg_offset);
4761 REG_WR(bp, reg_offset, val);
4764 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4765 u_def_status_block);
4766 def_sb->u_def_status_block.status_block_id = sb_id;
4768 REG_WR(bp, BAR_CSTRORM_INTMEM +
4769 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4770 REG_WR(bp, BAR_CSTRORM_INTMEM +
4771 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
4773 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4774 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
4776 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4777 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4778 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
4781 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4782 c_def_status_block);
4783 def_sb->c_def_status_block.status_block_id = sb_id;
4785 REG_WR(bp, BAR_CSTRORM_INTMEM +
4786 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
4787 REG_WR(bp, BAR_CSTRORM_INTMEM +
4788 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
4790 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4791 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
4793 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4794 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4795 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
4798 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4799 t_def_status_block);
4800 def_sb->t_def_status_block.status_block_id = sb_id;
4802 REG_WR(bp, BAR_TSTRORM_INTMEM +
4803 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4804 REG_WR(bp, BAR_TSTRORM_INTMEM +
4805 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4807 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4808 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4810 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4811 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4812 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4815 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4816 x_def_status_block);
4817 def_sb->x_def_status_block.status_block_id = sb_id;
4819 REG_WR(bp, BAR_XSTRORM_INTMEM +
4820 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4821 REG_WR(bp, BAR_XSTRORM_INTMEM +
4822 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4824 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4825 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4827 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4828 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4829 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4831 bp->stats_pending = 0;
4832 bp->set_mac_pending = 0;
4834 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4837 static void bnx2x_update_coalesce(struct bnx2x *bp)
4839 int port = BP_PORT(bp);
4842 for_each_queue(bp, i) {
4843 int sb_id = bp->fp[i].sb_id;
4845 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4846 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4847 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4848 U_SB_ETH_RX_CQ_INDEX),
4850 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4851 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4852 U_SB_ETH_RX_CQ_INDEX),
4853 (bp->rx_ticks/12) ? 0 : 1);
4855 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4856 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4857 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4858 C_SB_ETH_TX_CQ_INDEX),
4860 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4861 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4862 C_SB_ETH_TX_CQ_INDEX),
4863 (bp->tx_ticks/12) ? 0 : 1);
4867 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4868 struct bnx2x_fastpath *fp, int last)
4872 for (i = 0; i < last; i++) {
4873 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4874 struct sk_buff *skb = rx_buf->skb;
4877 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4881 if (fp->tpa_state[i] == BNX2X_TPA_START)
4882 pci_unmap_single(bp->pdev,
4883 pci_unmap_addr(rx_buf, mapping),
4884 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4891 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4893 int func = BP_FUNC(bp);
4894 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4895 ETH_MAX_AGGREGATION_QUEUES_E1H;
4896 u16 ring_prod, cqe_ring_prod;
4899 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4901 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4903 if (bp->flags & TPA_ENABLE_FLAG) {
4905 for_each_rx_queue(bp, j) {
4906 struct bnx2x_fastpath *fp = &bp->fp[j];
4908 for (i = 0; i < max_agg_queues; i++) {
4909 fp->tpa_pool[i].skb =
4910 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4911 if (!fp->tpa_pool[i].skb) {
4912 BNX2X_ERR("Failed to allocate TPA "
4913 "skb pool for queue[%d] - "
4914 "disabling TPA on this "
4916 bnx2x_free_tpa_pool(bp, fp, i);
4917 fp->disable_tpa = 1;
4920 pci_unmap_addr_set((struct sw_rx_bd *)
4921 &bp->fp->tpa_pool[i],
4923 fp->tpa_state[i] = BNX2X_TPA_STOP;
4928 for_each_rx_queue(bp, j) {
4929 struct bnx2x_fastpath *fp = &bp->fp[j];
4932 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4933 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4935 /* Mark queue as Rx */
4936 fp->is_rx_queue = 1;
4938 /* "next page" elements initialization */
4940 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4941 struct eth_rx_sge *sge;
4943 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4945 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4946 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4948 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4949 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4952 bnx2x_init_sge_ring_bit_mask(fp);
4955 for (i = 1; i <= NUM_RX_RINGS; i++) {
4956 struct eth_rx_bd *rx_bd;
4958 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4960 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4961 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4963 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4964 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4968 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4969 struct eth_rx_cqe_next_page *nextpg;
4971 nextpg = (struct eth_rx_cqe_next_page *)
4972 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4974 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4975 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4977 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4978 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4981 /* Allocate SGEs and initialize the ring elements */
4982 for (i = 0, ring_prod = 0;
4983 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4985 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4986 BNX2X_ERR("was only able to allocate "
4988 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4989 /* Cleanup already allocated elements */
4990 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4991 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4992 fp->disable_tpa = 1;
4996 ring_prod = NEXT_SGE_IDX(ring_prod);
4998 fp->rx_sge_prod = ring_prod;
5000 /* Allocate BDs and initialize BD ring */
5001 fp->rx_comp_cons = 0;
5002 cqe_ring_prod = ring_prod = 0;
5003 for (i = 0; i < bp->rx_ring_size; i++) {
5004 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5005 BNX2X_ERR("was only able to allocate "
5006 "%d rx skbs on queue[%d]\n", i, j);
5007 fp->eth_q_stats.rx_skb_alloc_failed++;
5010 ring_prod = NEXT_RX_IDX(ring_prod);
5011 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5012 WARN_ON(ring_prod <= i);
5015 fp->rx_bd_prod = ring_prod;
5016 /* must not have more available CQEs than BDs */
5017 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5019 fp->rx_pkt = fp->rx_calls = 0;
5022 * this will generate an interrupt (to the TSTORM)
5023 * must only be done after chip is initialized
5025 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5030 REG_WR(bp, BAR_USTRORM_INTMEM +
5031 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5032 U64_LO(fp->rx_comp_mapping));
5033 REG_WR(bp, BAR_USTRORM_INTMEM +
5034 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5035 U64_HI(fp->rx_comp_mapping));
5039 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5043 for_each_tx_queue(bp, j) {
5044 struct bnx2x_fastpath *fp = &bp->fp[j];
5046 for (i = 1; i <= NUM_TX_RINGS; i++) {
5047 struct eth_tx_next_bd *tx_next_bd =
5048 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5050 tx_next_bd->addr_hi =
5051 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5052 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5053 tx_next_bd->addr_lo =
5054 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5055 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5058 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5059 fp->tx_db.data.zero_fill1 = 0;
5060 fp->tx_db.data.prod = 0;
5062 fp->tx_pkt_prod = 0;
5063 fp->tx_pkt_cons = 0;
5066 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5070 /* clean tx statistics */
5071 for_each_rx_queue(bp, i)
5072 bnx2x_fp(bp, i, tx_pkt) = 0;
5075 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5077 int func = BP_FUNC(bp);
5079 spin_lock_init(&bp->spq_lock);
5081 bp->spq_left = MAX_SPQ_PENDING;
5082 bp->spq_prod_idx = 0;
5083 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5084 bp->spq_prod_bd = bp->spq;
5085 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5087 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5088 U64_LO(bp->spq_mapping));
5090 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5091 U64_HI(bp->spq_mapping));
5093 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5097 static void bnx2x_init_context(struct bnx2x *bp)
5101 for_each_rx_queue(bp, i) {
5102 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5103 struct bnx2x_fastpath *fp = &bp->fp[i];
5104 u8 cl_id = fp->cl_id;
5106 context->ustorm_st_context.common.sb_index_numbers =
5107 BNX2X_RX_SB_INDEX_NUM;
5108 context->ustorm_st_context.common.clientId = cl_id;
5109 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5110 context->ustorm_st_context.common.flags =
5111 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5112 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5113 context->ustorm_st_context.common.statistics_counter_id =
5115 context->ustorm_st_context.common.mc_alignment_log_size =
5116 BNX2X_RX_ALIGN_SHIFT;
5117 context->ustorm_st_context.common.bd_buff_size =
5119 context->ustorm_st_context.common.bd_page_base_hi =
5120 U64_HI(fp->rx_desc_mapping);
5121 context->ustorm_st_context.common.bd_page_base_lo =
5122 U64_LO(fp->rx_desc_mapping);
5123 if (!fp->disable_tpa) {
5124 context->ustorm_st_context.common.flags |=
5125 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5126 context->ustorm_st_context.common.sge_buff_size =
5127 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5129 context->ustorm_st_context.common.sge_page_base_hi =
5130 U64_HI(fp->rx_sge_mapping);
5131 context->ustorm_st_context.common.sge_page_base_lo =
5132 U64_LO(fp->rx_sge_mapping);
5134 context->ustorm_st_context.common.max_sges_for_packet =
5135 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5136 context->ustorm_st_context.common.max_sges_for_packet =
5137 ((context->ustorm_st_context.common.
5138 max_sges_for_packet + PAGES_PER_SGE - 1) &
5139 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5142 context->ustorm_ag_context.cdu_usage =
5143 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5144 CDU_REGION_NUMBER_UCM_AG,
5145 ETH_CONNECTION_TYPE);
5147 context->xstorm_ag_context.cdu_reserved =
5148 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5149 CDU_REGION_NUMBER_XCM_AG,
5150 ETH_CONNECTION_TYPE);
5153 for_each_tx_queue(bp, i) {
5154 struct bnx2x_fastpath *fp = &bp->fp[i];
5155 struct eth_context *context =
5156 bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
5158 context->cstorm_st_context.sb_index_number =
5159 C_SB_ETH_TX_CQ_INDEX;
5160 context->cstorm_st_context.status_block_id = fp->sb_id;
5162 context->xstorm_st_context.tx_bd_page_base_hi =
5163 U64_HI(fp->tx_desc_mapping);
5164 context->xstorm_st_context.tx_bd_page_base_lo =
5165 U64_LO(fp->tx_desc_mapping);
5166 context->xstorm_st_context.statistics_data = (fp->cl_id |
5167 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5171 static void bnx2x_init_ind_table(struct bnx2x *bp)
5173 int func = BP_FUNC(bp);
5176 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5180 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
5181 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5182 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5183 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5184 bp->fp->cl_id + (i % bp->num_rx_queues));
5187 static void bnx2x_set_client_config(struct bnx2x *bp)
5189 struct tstorm_eth_client_config tstorm_client = {0};
5190 int port = BP_PORT(bp);
5193 tstorm_client.mtu = bp->dev->mtu;
5194 tstorm_client.config_flags =
5195 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5196 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5198 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5199 tstorm_client.config_flags |=
5200 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5201 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5205 for_each_queue(bp, i) {
5206 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5208 REG_WR(bp, BAR_TSTRORM_INTMEM +
5209 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5210 ((u32 *)&tstorm_client)[0]);
5211 REG_WR(bp, BAR_TSTRORM_INTMEM +
5212 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5213 ((u32 *)&tstorm_client)[1]);
5216 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5217 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5220 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5222 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5223 int mode = bp->rx_mode;
5224 int mask = (1 << BP_L_ID(bp));
5225 int func = BP_FUNC(bp);
5226 int port = BP_PORT(bp);
5228 /* All but management unicast packets should pass to the host as well */
5230 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5231 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5232 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5233 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5235 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
5238 case BNX2X_RX_MODE_NONE: /* no Rx */
5239 tstorm_mac_filter.ucast_drop_all = mask;
5240 tstorm_mac_filter.mcast_drop_all = mask;
5241 tstorm_mac_filter.bcast_drop_all = mask;
5244 case BNX2X_RX_MODE_NORMAL:
5245 tstorm_mac_filter.bcast_accept_all = mask;
5248 case BNX2X_RX_MODE_ALLMULTI:
5249 tstorm_mac_filter.mcast_accept_all = mask;
5250 tstorm_mac_filter.bcast_accept_all = mask;
5253 case BNX2X_RX_MODE_PROMISC:
5254 tstorm_mac_filter.ucast_accept_all = mask;
5255 tstorm_mac_filter.mcast_accept_all = mask;
5256 tstorm_mac_filter.bcast_accept_all = mask;
5257 /* pass management unicast packets as well */
5258 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5262 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5267 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5270 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5271 REG_WR(bp, BAR_TSTRORM_INTMEM +
5272 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5273 ((u32 *)&tstorm_mac_filter)[i]);
5275 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5276 ((u32 *)&tstorm_mac_filter)[i]); */
5279 if (mode != BNX2X_RX_MODE_NONE)
5280 bnx2x_set_client_config(bp);
5283 static void bnx2x_init_internal_common(struct bnx2x *bp)
5287 /* Zero this manually as its initialization is
5288 currently missing in the initTool */
5289 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5290 REG_WR(bp, BAR_USTRORM_INTMEM +
5291 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5294 static void bnx2x_init_internal_port(struct bnx2x *bp)
5296 int port = BP_PORT(bp);
5299 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5301 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5302 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5303 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5306 static void bnx2x_init_internal_func(struct bnx2x *bp)
5308 struct tstorm_eth_function_common_config tstorm_config = {0};
5309 struct stats_indication_flags stats_flags = {0};
5310 int port = BP_PORT(bp);
5311 int func = BP_FUNC(bp);
5317 tstorm_config.config_flags = MULTI_FLAGS(bp);
5318 tstorm_config.rss_result_mask = MULTI_MASK;
5321 /* Enable TPA if needed */
5322 if (bp->flags & TPA_ENABLE_FLAG)
5323 tstorm_config.config_flags |=
5324 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5327 tstorm_config.config_flags |=
5328 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5330 tstorm_config.leading_client_id = BP_L_ID(bp);
5332 REG_WR(bp, BAR_TSTRORM_INTMEM +
5333 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5334 (*(u32 *)&tstorm_config));
5336 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5337 bnx2x_set_storm_rx_mode(bp);
5339 for_each_queue(bp, i) {
5340 u8 cl_id = bp->fp[i].cl_id;
5342 /* reset xstorm per client statistics */
5343 offset = BAR_XSTRORM_INTMEM +
5344 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5346 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5347 REG_WR(bp, offset + j*4, 0);
5349 /* reset tstorm per client statistics */
5350 offset = BAR_TSTRORM_INTMEM +
5351 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5353 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5354 REG_WR(bp, offset + j*4, 0);
5356 /* reset ustorm per client statistics */
5357 offset = BAR_USTRORM_INTMEM +
5358 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5360 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5361 REG_WR(bp, offset + j*4, 0);
5364 /* Init statistics related context */
5365 stats_flags.collect_eth = 1;
5367 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5368 ((u32 *)&stats_flags)[0]);
5369 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5370 ((u32 *)&stats_flags)[1]);
5372 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5373 ((u32 *)&stats_flags)[0]);
5374 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5375 ((u32 *)&stats_flags)[1]);
5377 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5378 ((u32 *)&stats_flags)[0]);
5379 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5380 ((u32 *)&stats_flags)[1]);
5382 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5383 ((u32 *)&stats_flags)[0]);
5384 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5385 ((u32 *)&stats_flags)[1]);
5387 REG_WR(bp, BAR_XSTRORM_INTMEM +
5388 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5389 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5390 REG_WR(bp, BAR_XSTRORM_INTMEM +
5391 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5392 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5394 REG_WR(bp, BAR_TSTRORM_INTMEM +
5395 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5396 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5397 REG_WR(bp, BAR_TSTRORM_INTMEM +
5398 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5399 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5401 REG_WR(bp, BAR_USTRORM_INTMEM +
5402 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5403 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5404 REG_WR(bp, BAR_USTRORM_INTMEM +
5405 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5406 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5408 if (CHIP_IS_E1H(bp)) {
5409 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5411 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5413 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5415 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5418 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5422 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5424 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5425 SGE_PAGE_SIZE * PAGES_PER_SGE),
5427 for_each_rx_queue(bp, i) {
5428 struct bnx2x_fastpath *fp = &bp->fp[i];
5430 REG_WR(bp, BAR_USTRORM_INTMEM +
5431 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5432 U64_LO(fp->rx_comp_mapping));
5433 REG_WR(bp, BAR_USTRORM_INTMEM +
5434 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5435 U64_HI(fp->rx_comp_mapping));
5438 REG_WR(bp, BAR_USTRORM_INTMEM +
5439 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5440 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5441 REG_WR(bp, BAR_USTRORM_INTMEM +
5442 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5443 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5445 REG_WR16(bp, BAR_USTRORM_INTMEM +
5446 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5450 /* dropless flow control */
5451 if (CHIP_IS_E1H(bp)) {
5452 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5454 rx_pause.bd_thr_low = 250;
5455 rx_pause.cqe_thr_low = 250;
5457 rx_pause.sge_thr_low = 0;
5458 rx_pause.bd_thr_high = 350;
5459 rx_pause.cqe_thr_high = 350;
5460 rx_pause.sge_thr_high = 0;
5462 for_each_rx_queue(bp, i) {
5463 struct bnx2x_fastpath *fp = &bp->fp[i];
5465 if (!fp->disable_tpa) {
5466 rx_pause.sge_thr_low = 150;
5467 rx_pause.sge_thr_high = 250;
5471 offset = BAR_USTRORM_INTMEM +
5472 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5475 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5477 REG_WR(bp, offset + j*4,
5478 ((u32 *)&rx_pause)[j]);
5482 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5484 /* Init rate shaping and fairness contexts */
5488 /* During init there is no active link
5489 Until link is up, set link rate to 10Gbps */
5490 bp->link_vars.line_speed = SPEED_10000;
5491 bnx2x_init_port_minmax(bp);
5493 bnx2x_calc_vn_weight_sum(bp);
5495 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5496 bnx2x_init_vn_minmax(bp, 2*vn + port);
5498 /* Enable rate shaping and fairness */
5499 bp->cmng.flags.cmng_enables =
5500 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5501 if (bp->vn_weight_sum)
5502 bp->cmng.flags.cmng_enables |=
5503 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5505 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5506 " fairness will be disabled\n");
5508 /* rate shaping and fairness are disabled */
5510 "single function mode minmax will be disabled\n");
5514 /* Store it to internal memory */
5516 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5517 REG_WR(bp, BAR_XSTRORM_INTMEM +
5518 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5519 ((u32 *)(&bp->cmng))[i]);
5522 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5524 switch (load_code) {
5525 case FW_MSG_CODE_DRV_LOAD_COMMON:
5526 bnx2x_init_internal_common(bp);
5529 case FW_MSG_CODE_DRV_LOAD_PORT:
5530 bnx2x_init_internal_port(bp);
5533 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5534 bnx2x_init_internal_func(bp);
5538 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5543 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5547 for_each_queue(bp, i) {
5548 struct bnx2x_fastpath *fp = &bp->fp[i];
5551 fp->state = BNX2X_FP_STATE_CLOSED;
5553 fp->cl_id = BP_L_ID(bp) + i;
5554 fp->sb_id = fp->cl_id;
5555 /* Suitable Rx and Tx SBs are served by the same client */
5556 if (i >= bp->num_rx_queues)
5557 fp->cl_id -= bp->num_rx_queues;
5559 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5560 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5561 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5563 bnx2x_update_fpsb_idx(fp);
5566 /* ensure status block indices were read */
5570 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5572 bnx2x_update_dsb_idx(bp);
5573 bnx2x_update_coalesce(bp);
5574 bnx2x_init_rx_rings(bp);
5575 bnx2x_init_tx_ring(bp);
5576 bnx2x_init_sp_ring(bp);
5577 bnx2x_init_context(bp);
5578 bnx2x_init_internal(bp, load_code);
5579 bnx2x_init_ind_table(bp);
5580 bnx2x_stats_init(bp);
5582 /* At this point, we are ready for interrupts */
5583 atomic_set(&bp->intr_sem, 0);
5585 /* flush all before enabling interrupts */
5589 bnx2x_int_enable(bp);
5591 /* Check for SPIO5 */
5592 bnx2x_attn_int_deasserted0(bp,
5593 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5594 AEU_INPUTS_ATTN_BITS_SPIO5);
5597 /* end of nic init */
5600 * gzip service functions
5603 static int bnx2x_gunzip_init(struct bnx2x *bp)
5605 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5606 &bp->gunzip_mapping);
5607 if (bp->gunzip_buf == NULL)
5610 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5611 if (bp->strm == NULL)
5614 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5616 if (bp->strm->workspace == NULL)
5626 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5627 bp->gunzip_mapping);
5628 bp->gunzip_buf = NULL;
5631 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5632 " un-compression\n", bp->dev->name);
5636 static void bnx2x_gunzip_end(struct bnx2x *bp)
5638 kfree(bp->strm->workspace);
5643 if (bp->gunzip_buf) {
5644 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5645 bp->gunzip_mapping);
5646 bp->gunzip_buf = NULL;
5650 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5654 /* check gzip header */
5655 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5656 BNX2X_ERR("Bad gzip header\n");
5664 if (zbuf[3] & FNAME)
5665 while ((zbuf[n++] != 0) && (n < len));
5667 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5668 bp->strm->avail_in = len - n;
5669 bp->strm->next_out = bp->gunzip_buf;
5670 bp->strm->avail_out = FW_BUF_SIZE;
5672 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5676 rc = zlib_inflate(bp->strm, Z_FINISH);
5677 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5678 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5679 bp->dev->name, bp->strm->msg);
5681 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5682 if (bp->gunzip_outlen & 0x3)
5683 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5684 " gunzip_outlen (%d) not aligned\n",
5685 bp->dev->name, bp->gunzip_outlen);
5686 bp->gunzip_outlen >>= 2;
5688 zlib_inflateEnd(bp->strm);
5690 if (rc == Z_STREAM_END)
5696 /* nic load/unload */
5699 * General service functions
5702 /* send a NIG loopback debug packet */
5703 static void bnx2x_lb_pckt(struct bnx2x *bp)
5707 /* Ethernet source and destination addresses */
5708 wb_write[0] = 0x55555555;
5709 wb_write[1] = 0x55555555;
5710 wb_write[2] = 0x20; /* SOP */
5711 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5713 /* NON-IP protocol */
5714 wb_write[0] = 0x09000000;
5715 wb_write[1] = 0x55555555;
5716 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
5717 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5720 /* some of the internal memories
5721 * are not directly readable from the driver
5722 * to test them we send debug packets
5724 static int bnx2x_int_mem_test(struct bnx2x *bp)
5730 if (CHIP_REV_IS_FPGA(bp))
5732 else if (CHIP_REV_IS_EMUL(bp))
5737 DP(NETIF_MSG_HW, "start part1\n");
5739 /* Disable inputs of parser neighbor blocks */
5740 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5741 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5742 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5743 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5745 /* Write 0 to parser credits for CFC search request */
5746 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5748 /* send Ethernet packet */
5751 /* TODO do i reset NIG statistic? */
5752 /* Wait until NIG register shows 1 packet of size 0x10 */
5753 count = 1000 * factor;
5756 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5757 val = *bnx2x_sp(bp, wb_data[0]);
5765 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5769 /* Wait until PRS register shows 1 packet */
5770 count = 1000 * factor;
5772 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5780 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5784 /* Reset and init BRB, PRS */
5785 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5787 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5789 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5790 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5792 DP(NETIF_MSG_HW, "part2\n");
5794 /* Disable inputs of parser neighbor blocks */
5795 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5796 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5797 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5798 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5800 /* Write 0 to parser credits for CFC search request */
5801 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5803 /* send 10 Ethernet packets */
5804 for (i = 0; i < 10; i++)
5807 /* Wait until NIG register shows 10 + 1
5808 packets of size 11*0x10 = 0xb0 */
5809 count = 1000 * factor;
5812 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5813 val = *bnx2x_sp(bp, wb_data[0]);
5821 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5825 /* Wait until PRS register shows 2 packets */
5826 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5828 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5830 /* Write 1 to parser credits for CFC search request */
5831 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5833 /* Wait until PRS register shows 3 packets */
5834 msleep(10 * factor);
5835 /* Wait until NIG register shows 1 packet of size 0x10 */
5836 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5838 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5840 /* clear NIG EOP FIFO */
5841 for (i = 0; i < 11; i++)
5842 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5843 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5845 BNX2X_ERR("clear of NIG failed\n");
5849 /* Reset and init BRB, PRS, NIG */
5850 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5852 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5854 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5855 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5858 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5861 /* Enable inputs of parser neighbor blocks */
5862 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5863 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5864 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5865 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5867 DP(NETIF_MSG_HW, "done\n");
5872 static void enable_blocks_attention(struct bnx2x *bp)
5874 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5875 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5876 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5877 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5878 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5879 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5880 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5881 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5882 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5883 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5884 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5885 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5886 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5887 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5888 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5889 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5890 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5891 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5892 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5893 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5894 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5895 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5896 if (CHIP_REV_IS_FPGA(bp))
5897 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5899 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5900 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5901 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5902 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5903 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5904 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5905 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5906 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5907 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5908 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5912 static void bnx2x_reset_common(struct bnx2x *bp)
5915 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5917 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5921 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5927 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5928 SHARED_HW_CFG_FAN_FAILURE_MASK;
5930 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5934 * The fan failure mechanism is usually related to the PHY type since
5935 * the power consumption of the board is affected by the PHY. Currently,
5936 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5938 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5939 for (port = PORT_0; port < PORT_MAX; port++) {
5941 SHMEM_RD(bp, dev_info.port_hw_config[port].
5942 external_phy_config) &
5943 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
5946 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
5948 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
5950 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
5953 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
5955 if (is_required == 0)
5958 /* Fan failure is indicated by SPIO 5 */
5959 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5960 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5962 /* set to active low mode */
5963 val = REG_RD(bp, MISC_REG_SPIO_INT);
5964 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5965 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5966 REG_WR(bp, MISC_REG_SPIO_INT, val);
5968 /* enable interrupt to signal the IGU */
5969 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5970 val |= (1 << MISC_REGISTERS_SPIO_5);
5971 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5974 static int bnx2x_init_common(struct bnx2x *bp)
5978 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5980 bnx2x_reset_common(bp);
5981 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5982 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5984 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
5985 if (CHIP_IS_E1H(bp))
5986 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5988 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5990 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5992 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
5993 if (CHIP_IS_E1(bp)) {
5994 /* enable HW interrupt from PXP on USDM overflow
5995 bit 16 on INT_MASK_0 */
5996 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5999 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
6003 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6004 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6005 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6006 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6007 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6008 /* make sure this value is 0 */
6009 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6011 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6012 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6013 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6014 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6015 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6018 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
6020 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6021 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6022 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6025 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6026 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6028 /* let the HW do it's magic ... */
6030 /* finish PXP init */
6031 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6033 BNX2X_ERR("PXP2 CFG failed\n");
6036 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6038 BNX2X_ERR("PXP2 RD_INIT failed\n");
6042 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6043 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6045 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
6047 /* clean the DMAE memory */
6049 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6051 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6052 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6053 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6054 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
6056 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6057 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6058 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6059 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6061 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
6062 /* soft reset pulse */
6063 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6064 REG_WR(bp, QM_REG_SOFT_RESET, 0);
6067 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
6070 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
6071 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6072 if (!CHIP_REV_IS_SLOW(bp)) {
6073 /* enable hw interrupt from doorbell Q */
6074 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6077 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6078 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6079 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6081 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6082 if (CHIP_IS_E1H(bp))
6083 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
6085 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6086 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6087 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6088 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
6090 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6091 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6092 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6093 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6095 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6096 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6097 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6098 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
6101 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6103 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6106 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6107 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6108 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
6110 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6111 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6112 REG_WR(bp, i, 0xc0cac01a);
6113 /* TODO: replace with something meaningful */
6115 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6116 REG_WR(bp, SRC_REG_SOFT_RST, 0);
6118 if (sizeof(union cdu_context) != 1024)
6119 /* we currently assume that a context is 1024 bytes */
6120 printk(KERN_ALERT PFX "please adjust the size of"
6121 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
6123 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
6124 val = (4 << 24) + (0 << 12) + 1024;
6125 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6127 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
6128 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6129 /* enable context validation interrupt from CFC */
6130 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6132 /* set the thresholds to prevent CFC/CDU race */
6133 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6135 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6136 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
6138 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
6139 /* Reset PCIE errors for debug */
6140 REG_WR(bp, 0x2814, 0xffffffff);
6141 REG_WR(bp, 0x3820, 0xffffffff);
6143 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
6144 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
6145 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
6146 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
6148 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
6149 if (CHIP_IS_E1H(bp)) {
6150 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6151 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6154 if (CHIP_REV_IS_SLOW(bp))
6157 /* finish CFC init */
6158 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6160 BNX2X_ERR("CFC LL_INIT failed\n");
6163 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6165 BNX2X_ERR("CFC AC_INIT failed\n");
6168 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6170 BNX2X_ERR("CFC CAM_INIT failed\n");
6173 REG_WR(bp, CFC_REG_DEBUG0, 0);
6175 /* read NIG statistic
6176 to see if this is our first up since powerup */
6177 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6178 val = *bnx2x_sp(bp, wb_data[0]);
6180 /* do internal memory self test */
6181 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6182 BNX2X_ERR("internal mem self test failed\n");
6186 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6187 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6188 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6189 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6190 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6191 bp->port.need_hw_lock = 1;
6198 bnx2x_setup_fan_failure_detection(bp);
6200 /* clear PXP2 attentions */
6201 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6203 enable_blocks_attention(bp);
6205 if (!BP_NOMCP(bp)) {
6206 bnx2x_acquire_phy_lock(bp);
6207 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6208 bnx2x_release_phy_lock(bp);
6210 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6215 static int bnx2x_init_port(struct bnx2x *bp)
6217 int port = BP_PORT(bp);
6218 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
6222 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6224 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6226 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
6227 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
6229 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6230 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6231 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6236 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
6237 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
6238 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6239 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6244 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
6245 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
6246 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6247 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6252 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
6253 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
6254 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6255 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6257 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6260 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
6261 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
6263 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6265 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6267 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
6268 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6269 /* no pause for emulation and FPGA */
6274 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6275 else if (bp->dev->mtu > 4096) {
6276 if (bp->flags & ONE_PORT_FLAG)
6280 /* (24*1024 + val*4)/256 */
6281 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6284 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6285 high = low + 56; /* 14*1024/256 */
6287 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6288 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6291 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
6293 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
6294 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
6295 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
6296 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
6298 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6299 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6300 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6301 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
6303 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
6304 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
6306 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
6308 /* configure PBF to work without PAUSE mtu 9000 */
6309 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6311 /* update threshold */
6312 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6313 /* update init credit */
6314 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6317 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6319 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6322 /* tell the searcher where the T2 table is */
6323 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
6325 wb_write[0] = U64_LO(bp->t2_mapping);
6326 wb_write[1] = U64_HI(bp->t2_mapping);
6327 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
6328 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
6329 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
6330 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
6332 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
6334 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6335 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
6337 if (CHIP_IS_E1(bp)) {
6338 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6339 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6341 bnx2x_init_block(bp, HC_BLOCK, init_stage);
6343 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6344 /* init aeu_mask_attn_func_0/1:
6345 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6346 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6347 * bits 4-7 are used for "per vn group attention" */
6348 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6349 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6351 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6352 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6353 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6354 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6355 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6357 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6359 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6361 if (CHIP_IS_E1H(bp)) {
6362 /* 0x2 disable e1hov, 0x1 enable */
6363 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6364 (IS_E1HMF(bp) ? 0x1 : 0x2));
6367 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6368 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6369 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6373 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6374 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6376 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6377 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6379 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6381 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6382 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6384 /* The GPIO should be swapped if the swap register is
6386 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6387 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6389 /* Select function upon port-swap configuration */
6391 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6392 aeu_gpio_mask = (swap_val && swap_override) ?
6393 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6394 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6396 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6397 aeu_gpio_mask = (swap_val && swap_override) ?
6398 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6399 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6401 val = REG_RD(bp, offset);
6402 /* add GPIO3 to group */
6403 val |= aeu_gpio_mask;
6404 REG_WR(bp, offset, val);
6408 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6409 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6410 /* add SPIO 5 to group 0 */
6412 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6413 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6414 val = REG_RD(bp, reg_addr);
6415 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6416 REG_WR(bp, reg_addr, val);
6424 bnx2x__link_reset(bp);
6429 #define ILT_PER_FUNC (768/2)
6430 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6431 /* the phys address is shifted right 12 bits and has an added
6432 1=valid bit added to the 53rd bit
6433 then since this is a wide register(TM)
6434 we split it into two 32 bit writes
6436 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6437 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6438 #define PXP_ONE_ILT(x) (((x) << 10) | x)
6439 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6441 #define CNIC_ILT_LINES 0
6443 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6447 if (CHIP_IS_E1H(bp))
6448 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6450 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6452 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6455 static int bnx2x_init_func(struct bnx2x *bp)
6457 int port = BP_PORT(bp);
6458 int func = BP_FUNC(bp);
6462 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6464 /* set MSI reconfigure capability */
6465 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6466 val = REG_RD(bp, addr);
6467 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6468 REG_WR(bp, addr, val);
6470 i = FUNC_ILT_BASE(func);
6472 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6473 if (CHIP_IS_E1H(bp)) {
6474 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6475 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6477 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6478 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6481 if (CHIP_IS_E1H(bp)) {
6482 for (i = 0; i < 9; i++)
6483 bnx2x_init_block(bp,
6484 cm_blocks[i], FUNC0_STAGE + func);
6486 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6487 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6490 /* HC init per function */
6491 if (CHIP_IS_E1H(bp)) {
6492 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6494 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6495 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6497 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
6499 /* Reset PCIE errors for debug */
6500 REG_WR(bp, 0x2114, 0xffffffff);
6501 REG_WR(bp, 0x2120, 0xffffffff);
6506 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6510 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6511 BP_FUNC(bp), load_code);
6514 mutex_init(&bp->dmae_mutex);
6515 rc = bnx2x_gunzip_init(bp);
6519 switch (load_code) {
6520 case FW_MSG_CODE_DRV_LOAD_COMMON:
6521 rc = bnx2x_init_common(bp);
6526 case FW_MSG_CODE_DRV_LOAD_PORT:
6528 rc = bnx2x_init_port(bp);
6533 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6535 rc = bnx2x_init_func(bp);
6541 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6545 if (!BP_NOMCP(bp)) {
6546 int func = BP_FUNC(bp);
6548 bp->fw_drv_pulse_wr_seq =
6549 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6550 DRV_PULSE_SEQ_MASK);
6551 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6554 /* this needs to be done before gunzip end */
6555 bnx2x_zero_def_sb(bp);
6556 for_each_queue(bp, i)
6557 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6560 bnx2x_gunzip_end(bp);
6565 static void bnx2x_free_mem(struct bnx2x *bp)
6568 #define BNX2X_PCI_FREE(x, y, size) \
6571 pci_free_consistent(bp->pdev, size, x, y); \
6577 #define BNX2X_FREE(x) \
6589 for_each_queue(bp, i) {
6592 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6593 bnx2x_fp(bp, i, status_blk_mapping),
6594 sizeof(struct host_status_block));
6597 for_each_rx_queue(bp, i) {
6599 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6600 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6601 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6602 bnx2x_fp(bp, i, rx_desc_mapping),
6603 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6605 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6606 bnx2x_fp(bp, i, rx_comp_mapping),
6607 sizeof(struct eth_fast_path_rx_cqe) *
6611 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6612 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6613 bnx2x_fp(bp, i, rx_sge_mapping),
6614 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6617 for_each_tx_queue(bp, i) {
6619 /* fastpath tx rings: tx_buf tx_desc */
6620 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6621 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6622 bnx2x_fp(bp, i, tx_desc_mapping),
6623 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6625 /* end of fastpath */
6627 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6628 sizeof(struct host_def_status_block));
6630 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6631 sizeof(struct bnx2x_slowpath));
6634 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6635 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6636 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6637 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6639 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6641 #undef BNX2X_PCI_FREE
6645 static int bnx2x_alloc_mem(struct bnx2x *bp)
6648 #define BNX2X_PCI_ALLOC(x, y, size) \
6650 x = pci_alloc_consistent(bp->pdev, size, y); \
6652 goto alloc_mem_err; \
6653 memset(x, 0, size); \
6656 #define BNX2X_ALLOC(x, size) \
6658 x = vmalloc(size); \
6660 goto alloc_mem_err; \
6661 memset(x, 0, size); \
6668 for_each_queue(bp, i) {
6669 bnx2x_fp(bp, i, bp) = bp;
6672 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6673 &bnx2x_fp(bp, i, status_blk_mapping),
6674 sizeof(struct host_status_block));
6677 for_each_rx_queue(bp, i) {
6679 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6680 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6681 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6682 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6683 &bnx2x_fp(bp, i, rx_desc_mapping),
6684 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6686 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6687 &bnx2x_fp(bp, i, rx_comp_mapping),
6688 sizeof(struct eth_fast_path_rx_cqe) *
6692 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6693 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6694 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6695 &bnx2x_fp(bp, i, rx_sge_mapping),
6696 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6699 for_each_tx_queue(bp, i) {
6701 /* fastpath tx rings: tx_buf tx_desc */
6702 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6703 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6704 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6705 &bnx2x_fp(bp, i, tx_desc_mapping),
6706 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6708 /* end of fastpath */
6710 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6711 sizeof(struct host_def_status_block));
6713 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6714 sizeof(struct bnx2x_slowpath));
6717 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6720 for (i = 0; i < 64*1024; i += 64) {
6721 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6722 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6725 /* allocate searcher T2 table
6726 we allocate 1/4 of alloc num for T2
6727 (which is not entered into the ILT) */
6728 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6731 for (i = 0; i < 16*1024; i += 64)
6732 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6734 /* now fixup the last line in the block to point to the next block */
6735 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6737 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6738 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6740 /* QM queues (128*MAX_CONN) */
6741 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6744 /* Slow path ring */
6745 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6753 #undef BNX2X_PCI_ALLOC
6757 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6761 for_each_tx_queue(bp, i) {
6762 struct bnx2x_fastpath *fp = &bp->fp[i];
6764 u16 bd_cons = fp->tx_bd_cons;
6765 u16 sw_prod = fp->tx_pkt_prod;
6766 u16 sw_cons = fp->tx_pkt_cons;
6768 while (sw_cons != sw_prod) {
6769 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6775 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6779 for_each_rx_queue(bp, j) {
6780 struct bnx2x_fastpath *fp = &bp->fp[j];
6782 for (i = 0; i < NUM_RX_BD; i++) {
6783 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6784 struct sk_buff *skb = rx_buf->skb;
6789 pci_unmap_single(bp->pdev,
6790 pci_unmap_addr(rx_buf, mapping),
6791 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6796 if (!fp->disable_tpa)
6797 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6798 ETH_MAX_AGGREGATION_QUEUES_E1 :
6799 ETH_MAX_AGGREGATION_QUEUES_E1H);
6803 static void bnx2x_free_skbs(struct bnx2x *bp)
6805 bnx2x_free_tx_skbs(bp);
6806 bnx2x_free_rx_skbs(bp);
6809 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6813 free_irq(bp->msix_table[0].vector, bp->dev);
6814 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6815 bp->msix_table[0].vector);
6817 for_each_queue(bp, i) {
6818 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
6819 "state %x\n", i, bp->msix_table[i + offset].vector,
6820 bnx2x_fp(bp, i, state));
6822 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6826 static void bnx2x_free_irq(struct bnx2x *bp)
6828 if (bp->flags & USING_MSIX_FLAG) {
6829 bnx2x_free_msix_irqs(bp);
6830 pci_disable_msix(bp->pdev);
6831 bp->flags &= ~USING_MSIX_FLAG;
6833 } else if (bp->flags & USING_MSI_FLAG) {
6834 free_irq(bp->pdev->irq, bp->dev);
6835 pci_disable_msi(bp->pdev);
6836 bp->flags &= ~USING_MSI_FLAG;
6839 free_irq(bp->pdev->irq, bp->dev);
6842 static int bnx2x_enable_msix(struct bnx2x *bp)
6844 int i, rc, offset = 1;
6847 bp->msix_table[0].entry = igu_vec;
6848 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6850 for_each_queue(bp, i) {
6851 igu_vec = BP_L_ID(bp) + offset + i;
6852 bp->msix_table[i + offset].entry = igu_vec;
6853 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6854 "(fastpath #%u)\n", i + offset, igu_vec, i);
6857 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6858 BNX2X_NUM_QUEUES(bp) + offset);
6860 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6864 bp->flags |= USING_MSIX_FLAG;
6869 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6871 int i, rc, offset = 1;
6873 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6874 bp->dev->name, bp->dev);
6876 BNX2X_ERR("request sp irq failed\n");
6880 for_each_queue(bp, i) {
6881 struct bnx2x_fastpath *fp = &bp->fp[i];
6883 if (i < bp->num_rx_queues)
6884 sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
6886 sprintf(fp->name, "%s-tx-%d",
6887 bp->dev->name, i - bp->num_rx_queues);
6889 rc = request_irq(bp->msix_table[i + offset].vector,
6890 bnx2x_msix_fp_int, 0, fp->name, fp);
6892 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
6893 bnx2x_free_msix_irqs(bp);
6897 fp->state = BNX2X_FP_STATE_IRQ;
6900 i = BNX2X_NUM_QUEUES(bp);
6901 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d"
6903 bp->dev->name, bp->msix_table[0].vector,
6904 0, bp->msix_table[offset].vector,
6905 i - 1, bp->msix_table[offset + i - 1].vector);
6910 static int bnx2x_enable_msi(struct bnx2x *bp)
6914 rc = pci_enable_msi(bp->pdev);
6916 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6919 bp->flags |= USING_MSI_FLAG;
6924 static int bnx2x_req_irq(struct bnx2x *bp)
6926 unsigned long flags;
6929 if (bp->flags & USING_MSI_FLAG)
6932 flags = IRQF_SHARED;
6934 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
6935 bp->dev->name, bp->dev);
6937 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6942 static void bnx2x_napi_enable(struct bnx2x *bp)
6946 for_each_rx_queue(bp, i)
6947 napi_enable(&bnx2x_fp(bp, i, napi));
6950 static void bnx2x_napi_disable(struct bnx2x *bp)
6954 for_each_rx_queue(bp, i)
6955 napi_disable(&bnx2x_fp(bp, i, napi));
6958 static void bnx2x_netif_start(struct bnx2x *bp)
6962 intr_sem = atomic_dec_and_test(&bp->intr_sem);
6963 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
6966 if (netif_running(bp->dev)) {
6967 bnx2x_napi_enable(bp);
6968 bnx2x_int_enable(bp);
6969 if (bp->state == BNX2X_STATE_OPEN)
6970 netif_tx_wake_all_queues(bp->dev);
6975 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6977 bnx2x_int_disable_sync(bp, disable_hw);
6978 bnx2x_napi_disable(bp);
6979 netif_tx_disable(bp->dev);
6980 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6984 * Init service functions
6987 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6989 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6990 int port = BP_PORT(bp);
6993 * unicasts 0-31:port0 32-63:port1
6994 * multicast 64-127:port0 128-191:port1
6996 config->hdr.length = 2;
6997 config->hdr.offset = port ? 32 : 0;
6998 config->hdr.client_id = bp->fp->cl_id;
6999 config->hdr.reserved1 = 0;
7002 config->config_table[0].cam_entry.msb_mac_addr =
7003 swab16(*(u16 *)&bp->dev->dev_addr[0]);
7004 config->config_table[0].cam_entry.middle_mac_addr =
7005 swab16(*(u16 *)&bp->dev->dev_addr[2]);
7006 config->config_table[0].cam_entry.lsb_mac_addr =
7007 swab16(*(u16 *)&bp->dev->dev_addr[4]);
7008 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
7010 config->config_table[0].target_table_entry.flags = 0;
7012 CAM_INVALIDATE(config->config_table[0]);
7013 config->config_table[0].target_table_entry.clients_bit_vector =
7014 cpu_to_le32(1 << BP_L_ID(bp));
7015 config->config_table[0].target_table_entry.vlan_id = 0;
7017 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7018 (set ? "setting" : "clearing"),
7019 config->config_table[0].cam_entry.msb_mac_addr,
7020 config->config_table[0].cam_entry.middle_mac_addr,
7021 config->config_table[0].cam_entry.lsb_mac_addr);
7024 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
7025 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
7026 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
7027 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7029 config->config_table[1].target_table_entry.flags =
7030 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7032 CAM_INVALIDATE(config->config_table[1]);
7033 config->config_table[1].target_table_entry.clients_bit_vector =
7034 cpu_to_le32(1 << BP_L_ID(bp));
7035 config->config_table[1].target_table_entry.vlan_id = 0;
7037 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7038 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7039 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7042 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
7044 struct mac_configuration_cmd_e1h *config =
7045 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7047 /* CAM allocation for E1H
7048 * unicasts: by func number
7049 * multicast: 20+FUNC*20, 20 each
7051 config->hdr.length = 1;
7052 config->hdr.offset = BP_FUNC(bp);
7053 config->hdr.client_id = bp->fp->cl_id;
7054 config->hdr.reserved1 = 0;
7057 config->config_table[0].msb_mac_addr =
7058 swab16(*(u16 *)&bp->dev->dev_addr[0]);
7059 config->config_table[0].middle_mac_addr =
7060 swab16(*(u16 *)&bp->dev->dev_addr[2]);
7061 config->config_table[0].lsb_mac_addr =
7062 swab16(*(u16 *)&bp->dev->dev_addr[4]);
7063 config->config_table[0].clients_bit_vector =
7064 cpu_to_le32(1 << BP_L_ID(bp));
7065 config->config_table[0].vlan_id = 0;
7066 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
7068 config->config_table[0].flags = BP_PORT(bp);
7070 config->config_table[0].flags =
7071 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
7073 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
7074 (set ? "setting" : "clearing"),
7075 config->config_table[0].msb_mac_addr,
7076 config->config_table[0].middle_mac_addr,
7077 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
7079 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7080 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7081 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7084 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7085 int *state_p, int poll)
7087 /* can take a while if any port is running */
7090 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7091 poll ? "polling" : "waiting", state, idx);
7096 bnx2x_rx_int(bp->fp, 10);
7097 /* if index is different from 0
7098 * the reply for some commands will
7099 * be on the non default queue
7102 bnx2x_rx_int(&bp->fp[idx], 10);
7105 mb(); /* state is changed by bnx2x_sp_event() */
7106 if (*state_p == state) {
7107 #ifdef BNX2X_STOP_ON_ERROR
7108 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7120 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7121 poll ? "polling" : "waiting", state, idx);
7122 #ifdef BNX2X_STOP_ON_ERROR
7129 static int bnx2x_setup_leading(struct bnx2x *bp)
7133 /* reset IGU state */
7134 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7137 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7139 /* Wait for completion */
7140 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7145 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7147 struct bnx2x_fastpath *fp = &bp->fp[index];
7149 /* reset IGU state */
7150 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7153 fp->state = BNX2X_FP_STATE_OPENING;
7154 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7157 /* Wait for completion */
7158 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7162 static int bnx2x_poll(struct napi_struct *napi, int budget);
7164 static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out,
7165 int *num_tx_queues_out)
7167 int _num_rx_queues = 0, _num_tx_queues = 0;
7169 switch (bp->multi_mode) {
7170 case ETH_RSS_MODE_DISABLED:
7175 case ETH_RSS_MODE_REGULAR:
7177 _num_rx_queues = min_t(u32, num_rx_queues,
7178 BNX2X_MAX_QUEUES(bp));
7180 _num_rx_queues = min_t(u32, num_online_cpus(),
7181 BNX2X_MAX_QUEUES(bp));
7184 _num_tx_queues = min_t(u32, num_tx_queues,
7185 BNX2X_MAX_QUEUES(bp));
7187 _num_tx_queues = min_t(u32, num_online_cpus(),
7188 BNX2X_MAX_QUEUES(bp));
7190 /* There must be not more Tx queues than Rx queues */
7191 if (_num_tx_queues > _num_rx_queues) {
7192 BNX2X_ERR("number of tx queues (%d) > "
7193 "number of rx queues (%d)"
7194 " defaulting to %d\n",
7195 _num_tx_queues, _num_rx_queues,
7197 _num_tx_queues = _num_rx_queues;
7208 *num_rx_queues_out = _num_rx_queues;
7209 *num_tx_queues_out = _num_tx_queues;
7212 static int bnx2x_set_int_mode(struct bnx2x *bp)
7219 bp->num_rx_queues = 1;
7220 bp->num_tx_queues = 1;
7221 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7226 /* Set interrupt mode according to bp->multi_mode value */
7227 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues,
7228 &bp->num_tx_queues);
7230 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n",
7231 bp->num_rx_queues, bp->num_tx_queues);
7233 /* if we can't use MSI-X we only need one fp,
7234 * so try to enable MSI-X with the requested number of fp's
7235 * and fallback to MSI or legacy INTx with one fp
7237 rc = bnx2x_enable_msix(bp);
7239 /* failed to enable MSI-X */
7241 BNX2X_ERR("Multi requested but failed to "
7242 "enable MSI-X (rx %d tx %d), "
7243 "set number of queues to 1\n",
7244 bp->num_rx_queues, bp->num_tx_queues);
7245 bp->num_rx_queues = 1;
7246 bp->num_tx_queues = 1;
7250 bp->dev->real_num_tx_queues = bp->num_tx_queues;
7255 /* must be called with rtnl_lock */
7256 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7261 #ifdef BNX2X_STOP_ON_ERROR
7262 if (unlikely(bp->panic))
7266 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7268 rc = bnx2x_set_int_mode(bp);
7270 if (bnx2x_alloc_mem(bp))
7273 for_each_rx_queue(bp, i)
7274 bnx2x_fp(bp, i, disable_tpa) =
7275 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7277 for_each_rx_queue(bp, i)
7278 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7281 bnx2x_napi_enable(bp);
7283 if (bp->flags & USING_MSIX_FLAG) {
7284 rc = bnx2x_req_msix_irqs(bp);
7286 pci_disable_msix(bp->pdev);
7290 /* Fall to INTx if failed to enable MSI-X due to lack of
7291 memory (in bnx2x_set_int_mode()) */
7292 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7293 bnx2x_enable_msi(bp);
7295 rc = bnx2x_req_irq(bp);
7297 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
7298 if (bp->flags & USING_MSI_FLAG)
7299 pci_disable_msi(bp->pdev);
7302 if (bp->flags & USING_MSI_FLAG) {
7303 bp->dev->irq = bp->pdev->irq;
7304 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
7305 bp->dev->name, bp->pdev->irq);
7309 /* Send LOAD_REQUEST command to MCP
7310 Returns the type of LOAD command:
7311 if it is the first port to be initialized
7312 common blocks should be initialized, otherwise - not
7314 if (!BP_NOMCP(bp)) {
7315 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7317 BNX2X_ERR("MCP response failure, aborting\n");
7321 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7322 rc = -EBUSY; /* other port in diagnostic mode */
7327 int port = BP_PORT(bp);
7329 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
7330 load_count[0], load_count[1], load_count[2]);
7332 load_count[1 + port]++;
7333 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
7334 load_count[0], load_count[1], load_count[2]);
7335 if (load_count[0] == 1)
7336 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7337 else if (load_count[1 + port] == 1)
7338 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7340 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7343 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7344 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7348 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7351 rc = bnx2x_init_hw(bp, load_code);
7353 BNX2X_ERR("HW init failed, aborting\n");
7357 /* Setup NIC internals and enable interrupts */
7358 bnx2x_nic_init(bp, load_code);
7360 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7361 (bp->common.shmem2_base))
7362 SHMEM2_WR(bp, dcc_support,
7363 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7364 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7366 /* Send LOAD_DONE command to MCP */
7367 if (!BP_NOMCP(bp)) {
7368 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7370 BNX2X_ERR("MCP response failure, aborting\n");
7376 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7378 rc = bnx2x_setup_leading(bp);
7380 BNX2X_ERR("Setup leading failed!\n");
7381 #ifndef BNX2X_STOP_ON_ERROR
7389 if (CHIP_IS_E1H(bp))
7390 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
7391 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
7392 bp->state = BNX2X_STATE_DISABLED;
7395 if (bp->state == BNX2X_STATE_OPEN) {
7396 for_each_nondefault_queue(bp, i) {
7397 rc = bnx2x_setup_multi(bp, i);
7403 bnx2x_set_mac_addr_e1(bp, 1);
7405 bnx2x_set_mac_addr_e1h(bp, 1);
7409 bnx2x_initial_phy_init(bp, load_mode);
7411 /* Start fast path */
7412 switch (load_mode) {
7414 if (bp->state == BNX2X_STATE_OPEN) {
7415 /* Tx queue should be only reenabled */
7416 netif_tx_wake_all_queues(bp->dev);
7418 /* Initialize the receive filter. */
7419 bnx2x_set_rx_mode(bp->dev);
7423 netif_tx_start_all_queues(bp->dev);
7424 if (bp->state != BNX2X_STATE_OPEN)
7425 netif_tx_disable(bp->dev);
7426 /* Initialize the receive filter. */
7427 bnx2x_set_rx_mode(bp->dev);
7431 /* Initialize the receive filter. */
7432 bnx2x_set_rx_mode(bp->dev);
7433 bp->state = BNX2X_STATE_DIAG;
7441 bnx2x__link_status_update(bp);
7443 /* start the timer */
7444 mod_timer(&bp->timer, jiffies + bp->current_interval);
7450 bnx2x_int_disable_sync(bp, 1);
7451 if (!BP_NOMCP(bp)) {
7452 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7453 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7456 /* Free SKBs, SGEs, TPA pool and driver internals */
7457 bnx2x_free_skbs(bp);
7458 for_each_rx_queue(bp, i)
7459 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7464 bnx2x_napi_disable(bp);
7465 for_each_rx_queue(bp, i)
7466 netif_napi_del(&bnx2x_fp(bp, i, napi));
7472 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7474 struct bnx2x_fastpath *fp = &bp->fp[index];
7477 /* halt the connection */
7478 fp->state = BNX2X_FP_STATE_HALTING;
7479 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7481 /* Wait for completion */
7482 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7484 if (rc) /* timeout */
7487 /* delete cfc entry */
7488 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7490 /* Wait for completion */
7491 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7496 static int bnx2x_stop_leading(struct bnx2x *bp)
7498 __le16 dsb_sp_prod_idx;
7499 /* if the other port is handling traffic,
7500 this can take a lot of time */
7506 /* Send HALT ramrod */
7507 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7508 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7510 /* Wait for completion */
7511 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7512 &(bp->fp[0].state), 1);
7513 if (rc) /* timeout */
7516 dsb_sp_prod_idx = *bp->dsb_sp_prod;
7518 /* Send PORT_DELETE ramrod */
7519 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7521 /* Wait for completion to arrive on default status block
7522 we are going to reset the chip anyway
7523 so there is not much to do if this times out
7525 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7527 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7528 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7529 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7530 #ifdef BNX2X_STOP_ON_ERROR
7538 rmb(); /* Refresh the dsb_sp_prod */
7540 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7541 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7546 static void bnx2x_reset_func(struct bnx2x *bp)
7548 int port = BP_PORT(bp);
7549 int func = BP_FUNC(bp);
7553 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7554 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7557 base = FUNC_ILT_BASE(func);
7558 for (i = base; i < base + ILT_PER_FUNC; i++)
7559 bnx2x_ilt_wr(bp, i, 0);
7562 static void bnx2x_reset_port(struct bnx2x *bp)
7564 int port = BP_PORT(bp);
7567 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7569 /* Do not rcv packets to BRB */
7570 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7571 /* Do not direct rcv packets that are not for MCP to the BRB */
7572 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7573 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7576 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7579 /* Check for BRB port occupancy */
7580 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7582 DP(NETIF_MSG_IFDOWN,
7583 "BRB1 is not empty %d blocks are occupied\n", val);
7585 /* TODO: Close Doorbell port? */
7588 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7590 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7591 BP_FUNC(bp), reset_code);
7593 switch (reset_code) {
7594 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7595 bnx2x_reset_port(bp);
7596 bnx2x_reset_func(bp);
7597 bnx2x_reset_common(bp);
7600 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7601 bnx2x_reset_port(bp);
7602 bnx2x_reset_func(bp);
7605 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7606 bnx2x_reset_func(bp);
7610 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7615 /* must be called with rtnl_lock */
7616 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7618 int port = BP_PORT(bp);
7622 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7624 bp->rx_mode = BNX2X_RX_MODE_NONE;
7625 bnx2x_set_storm_rx_mode(bp);
7627 bnx2x_netif_stop(bp, 1);
7629 del_timer_sync(&bp->timer);
7630 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7631 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7632 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7637 /* Wait until tx fastpath tasks complete */
7638 for_each_tx_queue(bp, i) {
7639 struct bnx2x_fastpath *fp = &bp->fp[i];
7642 while (bnx2x_has_tx_work_unload(fp)) {
7646 BNX2X_ERR("timeout waiting for queue[%d]\n",
7648 #ifdef BNX2X_STOP_ON_ERROR
7659 /* Give HW time to discard old tx messages */
7662 if (CHIP_IS_E1(bp)) {
7663 struct mac_configuration_cmd *config =
7664 bnx2x_sp(bp, mcast_config);
7666 bnx2x_set_mac_addr_e1(bp, 0);
7668 for (i = 0; i < config->hdr.length; i++)
7669 CAM_INVALIDATE(config->config_table[i]);
7671 config->hdr.length = i;
7672 if (CHIP_REV_IS_SLOW(bp))
7673 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7675 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7676 config->hdr.client_id = bp->fp->cl_id;
7677 config->hdr.reserved1 = 0;
7679 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7680 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7681 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7684 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7686 bnx2x_set_mac_addr_e1h(bp, 0);
7688 for (i = 0; i < MC_HASH_SIZE; i++)
7689 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7691 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7694 if (unload_mode == UNLOAD_NORMAL)
7695 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7697 else if (bp->flags & NO_WOL_FLAG)
7698 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7701 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7702 u8 *mac_addr = bp->dev->dev_addr;
7704 /* The mac address is written to entries 1-4 to
7705 preserve entry 0 which is used by the PMF */
7706 u8 entry = (BP_E1HVN(bp) + 1)*8;
7708 val = (mac_addr[0] << 8) | mac_addr[1];
7709 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7711 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7712 (mac_addr[4] << 8) | mac_addr[5];
7713 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7715 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7718 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7720 /* Close multi and leading connections
7721 Completions for ramrods are collected in a synchronous way */
7722 for_each_nondefault_queue(bp, i)
7723 if (bnx2x_stop_multi(bp, i))
7726 rc = bnx2x_stop_leading(bp);
7728 BNX2X_ERR("Stop leading failed!\n");
7729 #ifdef BNX2X_STOP_ON_ERROR
7738 reset_code = bnx2x_fw_command(bp, reset_code);
7740 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
7741 load_count[0], load_count[1], load_count[2]);
7743 load_count[1 + port]--;
7744 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
7745 load_count[0], load_count[1], load_count[2]);
7746 if (load_count[0] == 0)
7747 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7748 else if (load_count[1 + port] == 0)
7749 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7751 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7754 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7755 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7756 bnx2x__link_reset(bp);
7758 /* Reset the chip */
7759 bnx2x_reset_chip(bp, reset_code);
7761 /* Report UNLOAD_DONE to MCP */
7763 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7767 /* Free SKBs, SGEs, TPA pool and driver internals */
7768 bnx2x_free_skbs(bp);
7769 for_each_rx_queue(bp, i)
7770 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7771 for_each_rx_queue(bp, i)
7772 netif_napi_del(&bnx2x_fp(bp, i, napi));
7775 bp->state = BNX2X_STATE_CLOSED;
7777 netif_carrier_off(bp->dev);
7782 static void bnx2x_reset_task(struct work_struct *work)
7784 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7786 #ifdef BNX2X_STOP_ON_ERROR
7787 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7788 " so reset not done to allow debug dump,\n"
7789 " you will need to reboot when done\n");
7795 if (!netif_running(bp->dev))
7796 goto reset_task_exit;
7798 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7799 bnx2x_nic_load(bp, LOAD_NORMAL);
7805 /* end of nic load/unload */
7810 * Init service functions
7813 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7816 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7817 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7818 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7819 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7820 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7821 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7822 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7823 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7825 BNX2X_ERR("Unsupported function index: %d\n", func);
7830 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7832 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7834 /* Flush all outstanding writes */
7837 /* Pretend to be function 0 */
7839 /* Flush the GRC transaction (in the chip) */
7840 new_val = REG_RD(bp, reg);
7842 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7847 /* From now we are in the "like-E1" mode */
7848 bnx2x_int_disable(bp);
7850 /* Flush all outstanding writes */
7853 /* Restore the original funtion settings */
7854 REG_WR(bp, reg, orig_func);
7855 new_val = REG_RD(bp, reg);
7856 if (new_val != orig_func) {
7857 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7858 orig_func, new_val);
7863 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7865 if (CHIP_IS_E1H(bp))
7866 bnx2x_undi_int_disable_e1h(bp, func);
7868 bnx2x_int_disable(bp);
7871 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7875 /* Check if there is any driver already loaded */
7876 val = REG_RD(bp, MISC_REG_UNPREPARED);
7878 /* Check if it is the UNDI driver
7879 * UNDI driver initializes CID offset for normal bell to 0x7
7881 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7882 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7884 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7886 int func = BP_FUNC(bp);
7890 /* clear the UNDI indication */
7891 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7893 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7895 /* try unload UNDI on port 0 */
7898 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7899 DRV_MSG_SEQ_NUMBER_MASK);
7900 reset_code = bnx2x_fw_command(bp, reset_code);
7902 /* if UNDI is loaded on the other port */
7903 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7905 /* send "DONE" for previous unload */
7906 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7908 /* unload UNDI on port 1 */
7911 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7912 DRV_MSG_SEQ_NUMBER_MASK);
7913 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7915 bnx2x_fw_command(bp, reset_code);
7918 /* now it's safe to release the lock */
7919 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7921 bnx2x_undi_int_disable(bp, func);
7923 /* close input traffic and wait for it */
7924 /* Do not rcv packets to BRB */
7926 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7927 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7928 /* Do not direct rcv packets that are not for MCP to
7931 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7932 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7935 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7936 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7939 /* save NIG port swap info */
7940 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7941 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7944 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7947 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7949 /* take the NIG out of reset and restore swap values */
7951 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7952 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7953 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7954 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7956 /* send unload done to the MCP */
7957 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7959 /* restore our func and fw_seq */
7962 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7963 DRV_MSG_SEQ_NUMBER_MASK);
7966 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7970 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7972 u32 val, val2, val3, val4, id;
7975 /* Get the chip revision id and number. */
7976 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7977 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7978 id = ((val & 0xffff) << 16);
7979 val = REG_RD(bp, MISC_REG_CHIP_REV);
7980 id |= ((val & 0xf) << 12);
7981 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7982 id |= ((val & 0xff) << 4);
7983 val = REG_RD(bp, MISC_REG_BOND_ID);
7985 bp->common.chip_id = id;
7986 bp->link_params.chip_id = bp->common.chip_id;
7987 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7989 val = (REG_RD(bp, 0x2874) & 0x55);
7990 if ((bp->common.chip_id & 0x1) ||
7991 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7992 bp->flags |= ONE_PORT_FLAG;
7993 BNX2X_DEV_INFO("single port device\n");
7996 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7997 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7998 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7999 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8000 bp->common.flash_size, bp->common.flash_size);
8002 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8003 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
8004 bp->link_params.shmem_base = bp->common.shmem_base;
8005 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
8006 bp->common.shmem_base, bp->common.shmem2_base);
8008 if (!bp->common.shmem_base ||
8009 (bp->common.shmem_base < 0xA0000) ||
8010 (bp->common.shmem_base >= 0xC0000)) {
8011 BNX2X_DEV_INFO("MCP not active\n");
8012 bp->flags |= NO_MCP_FLAG;
8016 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8017 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8018 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8019 BNX2X_ERR("BAD MCP validity signature\n");
8021 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
8022 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
8024 bp->link_params.hw_led_mode = ((bp->common.hw_config &
8025 SHARED_HW_CFG_LED_MODE_MASK) >>
8026 SHARED_HW_CFG_LED_MODE_SHIFT);
8028 bp->link_params.feature_config_flags = 0;
8029 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8030 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8031 bp->link_params.feature_config_flags |=
8032 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8034 bp->link_params.feature_config_flags &=
8035 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8037 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8038 bp->common.bc_ver = val;
8039 BNX2X_DEV_INFO("bc_ver %X\n", val);
8040 if (val < BNX2X_BC_VER) {
8041 /* for now only warn
8042 * later we might need to enforce this */
8043 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8044 " please upgrade BC\n", BNX2X_BC_VER, val);
8046 bp->link_params.feature_config_flags |=
8047 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8048 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
8050 if (BP_E1HVN(bp) == 0) {
8051 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8052 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8054 /* no WOL capability for E1HVN != 0 */
8055 bp->flags |= NO_WOL_FLAG;
8057 BNX2X_DEV_INFO("%sWoL capable\n",
8058 (bp->flags & NO_WOL_FLAG) ? "not " : "");
8060 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8061 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8062 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8063 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8065 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8066 val, val2, val3, val4);
8069 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8072 int port = BP_PORT(bp);
8075 switch (switch_cfg) {
8077 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8080 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8081 switch (ext_phy_type) {
8082 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8083 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8086 bp->port.supported |= (SUPPORTED_10baseT_Half |
8087 SUPPORTED_10baseT_Full |
8088 SUPPORTED_100baseT_Half |
8089 SUPPORTED_100baseT_Full |
8090 SUPPORTED_1000baseT_Full |
8091 SUPPORTED_2500baseX_Full |
8096 SUPPORTED_Asym_Pause);
8099 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8100 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8103 bp->port.supported |= (SUPPORTED_10baseT_Half |
8104 SUPPORTED_10baseT_Full |
8105 SUPPORTED_100baseT_Half |
8106 SUPPORTED_100baseT_Full |
8107 SUPPORTED_1000baseT_Full |
8112 SUPPORTED_Asym_Pause);
8116 BNX2X_ERR("NVRAM config error. "
8117 "BAD SerDes ext_phy_config 0x%x\n",
8118 bp->link_params.ext_phy_config);
8122 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8124 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8127 case SWITCH_CFG_10G:
8128 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8131 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8132 switch (ext_phy_type) {
8133 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8134 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8137 bp->port.supported |= (SUPPORTED_10baseT_Half |
8138 SUPPORTED_10baseT_Full |
8139 SUPPORTED_100baseT_Half |
8140 SUPPORTED_100baseT_Full |
8141 SUPPORTED_1000baseT_Full |
8142 SUPPORTED_2500baseX_Full |
8143 SUPPORTED_10000baseT_Full |
8148 SUPPORTED_Asym_Pause);
8151 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8152 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
8155 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8156 SUPPORTED_1000baseT_Full |
8160 SUPPORTED_Asym_Pause);
8163 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8164 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
8167 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8168 SUPPORTED_2500baseX_Full |
8169 SUPPORTED_1000baseT_Full |
8173 SUPPORTED_Asym_Pause);
8176 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8177 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8180 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8183 SUPPORTED_Asym_Pause);
8186 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8187 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
8190 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8191 SUPPORTED_1000baseT_Full |
8194 SUPPORTED_Asym_Pause);
8197 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8198 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
8201 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8202 SUPPORTED_1000baseT_Full |
8206 SUPPORTED_Asym_Pause);
8209 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8210 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8213 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8214 SUPPORTED_1000baseT_Full |
8218 SUPPORTED_Asym_Pause);
8221 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8222 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8225 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8229 SUPPORTED_Asym_Pause);
8232 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8233 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8236 bp->port.supported |= (SUPPORTED_10baseT_Half |
8237 SUPPORTED_10baseT_Full |
8238 SUPPORTED_100baseT_Half |
8239 SUPPORTED_100baseT_Full |
8240 SUPPORTED_1000baseT_Full |
8241 SUPPORTED_10000baseT_Full |
8245 SUPPORTED_Asym_Pause);
8248 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8249 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8250 bp->link_params.ext_phy_config);
8254 BNX2X_ERR("NVRAM config error. "
8255 "BAD XGXS ext_phy_config 0x%x\n",
8256 bp->link_params.ext_phy_config);
8260 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8262 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8267 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
8268 bp->port.link_config);
8271 bp->link_params.phy_addr = bp->port.phy_addr;
8273 /* mask what we support according to speed_cap_mask */
8274 if (!(bp->link_params.speed_cap_mask &
8275 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
8276 bp->port.supported &= ~SUPPORTED_10baseT_Half;
8278 if (!(bp->link_params.speed_cap_mask &
8279 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
8280 bp->port.supported &= ~SUPPORTED_10baseT_Full;
8282 if (!(bp->link_params.speed_cap_mask &
8283 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
8284 bp->port.supported &= ~SUPPORTED_100baseT_Half;
8286 if (!(bp->link_params.speed_cap_mask &
8287 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
8288 bp->port.supported &= ~SUPPORTED_100baseT_Full;
8290 if (!(bp->link_params.speed_cap_mask &
8291 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
8292 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8293 SUPPORTED_1000baseT_Full);
8295 if (!(bp->link_params.speed_cap_mask &
8296 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
8297 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
8299 if (!(bp->link_params.speed_cap_mask &
8300 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
8301 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
8303 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
8306 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8308 bp->link_params.req_duplex = DUPLEX_FULL;
8310 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
8311 case PORT_FEATURE_LINK_SPEED_AUTO:
8312 if (bp->port.supported & SUPPORTED_Autoneg) {
8313 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8314 bp->port.advertising = bp->port.supported;
8317 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8319 if ((ext_phy_type ==
8320 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8322 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
8323 /* force 10G, no AN */
8324 bp->link_params.req_line_speed = SPEED_10000;
8325 bp->port.advertising =
8326 (ADVERTISED_10000baseT_Full |
8330 BNX2X_ERR("NVRAM config error. "
8331 "Invalid link_config 0x%x"
8332 " Autoneg not supported\n",
8333 bp->port.link_config);
8338 case PORT_FEATURE_LINK_SPEED_10M_FULL:
8339 if (bp->port.supported & SUPPORTED_10baseT_Full) {
8340 bp->link_params.req_line_speed = SPEED_10;
8341 bp->port.advertising = (ADVERTISED_10baseT_Full |
8344 BNX2X_ERR("NVRAM config error. "
8345 "Invalid link_config 0x%x"
8346 " speed_cap_mask 0x%x\n",
8347 bp->port.link_config,
8348 bp->link_params.speed_cap_mask);
8353 case PORT_FEATURE_LINK_SPEED_10M_HALF:
8354 if (bp->port.supported & SUPPORTED_10baseT_Half) {
8355 bp->link_params.req_line_speed = SPEED_10;
8356 bp->link_params.req_duplex = DUPLEX_HALF;
8357 bp->port.advertising = (ADVERTISED_10baseT_Half |
8360 BNX2X_ERR("NVRAM config error. "
8361 "Invalid link_config 0x%x"
8362 " speed_cap_mask 0x%x\n",
8363 bp->port.link_config,
8364 bp->link_params.speed_cap_mask);
8369 case PORT_FEATURE_LINK_SPEED_100M_FULL:
8370 if (bp->port.supported & SUPPORTED_100baseT_Full) {
8371 bp->link_params.req_line_speed = SPEED_100;
8372 bp->port.advertising = (ADVERTISED_100baseT_Full |
8375 BNX2X_ERR("NVRAM config error. "
8376 "Invalid link_config 0x%x"
8377 " speed_cap_mask 0x%x\n",
8378 bp->port.link_config,
8379 bp->link_params.speed_cap_mask);
8384 case PORT_FEATURE_LINK_SPEED_100M_HALF:
8385 if (bp->port.supported & SUPPORTED_100baseT_Half) {
8386 bp->link_params.req_line_speed = SPEED_100;
8387 bp->link_params.req_duplex = DUPLEX_HALF;
8388 bp->port.advertising = (ADVERTISED_100baseT_Half |
8391 BNX2X_ERR("NVRAM config error. "
8392 "Invalid link_config 0x%x"
8393 " speed_cap_mask 0x%x\n",
8394 bp->port.link_config,
8395 bp->link_params.speed_cap_mask);
8400 case PORT_FEATURE_LINK_SPEED_1G:
8401 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
8402 bp->link_params.req_line_speed = SPEED_1000;
8403 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8406 BNX2X_ERR("NVRAM config error. "
8407 "Invalid link_config 0x%x"
8408 " speed_cap_mask 0x%x\n",
8409 bp->port.link_config,
8410 bp->link_params.speed_cap_mask);
8415 case PORT_FEATURE_LINK_SPEED_2_5G:
8416 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
8417 bp->link_params.req_line_speed = SPEED_2500;
8418 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8421 BNX2X_ERR("NVRAM config error. "
8422 "Invalid link_config 0x%x"
8423 " speed_cap_mask 0x%x\n",
8424 bp->port.link_config,
8425 bp->link_params.speed_cap_mask);
8430 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8431 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8432 case PORT_FEATURE_LINK_SPEED_10G_KR:
8433 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
8434 bp->link_params.req_line_speed = SPEED_10000;
8435 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8438 BNX2X_ERR("NVRAM config error. "
8439 "Invalid link_config 0x%x"
8440 " speed_cap_mask 0x%x\n",
8441 bp->port.link_config,
8442 bp->link_params.speed_cap_mask);
8448 BNX2X_ERR("NVRAM config error. "
8449 "BAD link speed link_config 0x%x\n",
8450 bp->port.link_config);
8451 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8452 bp->port.advertising = bp->port.supported;
8456 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8457 PORT_FEATURE_FLOW_CONTROL_MASK);
8458 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8459 !(bp->port.supported & SUPPORTED_Autoneg))
8460 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8462 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
8463 " advertising 0x%x\n",
8464 bp->link_params.req_line_speed,
8465 bp->link_params.req_duplex,
8466 bp->link_params.req_flow_ctrl, bp->port.advertising);
8469 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8471 int port = BP_PORT(bp);
8477 bp->link_params.bp = bp;
8478 bp->link_params.port = port;
8480 bp->link_params.lane_config =
8481 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8482 bp->link_params.ext_phy_config =
8484 dev_info.port_hw_config[port].external_phy_config);
8485 /* BCM8727_NOC => BCM8727 no over current */
8486 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8487 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8488 bp->link_params.ext_phy_config &=
8489 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8490 bp->link_params.ext_phy_config |=
8491 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8492 bp->link_params.feature_config_flags |=
8493 FEATURE_CONFIG_BCM8727_NOC;
8496 bp->link_params.speed_cap_mask =
8498 dev_info.port_hw_config[port].speed_capability_mask);
8500 bp->port.link_config =
8501 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8503 /* Get the 4 lanes xgxs config rx and tx */
8504 for (i = 0; i < 2; i++) {
8506 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8507 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8508 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8511 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8512 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8513 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8516 /* If the device is capable of WoL, set the default state according
8519 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8520 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8521 (config & PORT_FEATURE_WOL_ENABLED));
8523 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8524 " speed_cap_mask 0x%08x link_config 0x%08x\n",
8525 bp->link_params.lane_config,
8526 bp->link_params.ext_phy_config,
8527 bp->link_params.speed_cap_mask, bp->port.link_config);
8529 bp->link_params.switch_cfg |= (bp->port.link_config &
8530 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8531 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8533 bnx2x_link_settings_requested(bp);
8536 * If connected directly, work with the internal PHY, otherwise, work
8537 * with the external PHY
8539 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8540 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8541 bp->mdio.prtad = bp->link_params.phy_addr;
8543 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8544 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8546 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
8548 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8549 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8550 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8551 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8552 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8553 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8554 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8555 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8556 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8557 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8560 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8562 int func = BP_FUNC(bp);
8566 bnx2x_get_common_hwinfo(bp);
8570 if (CHIP_IS_E1H(bp)) {
8572 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8574 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
8575 FUNC_MF_CFG_E1HOV_TAG_MASK);
8576 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
8578 BNX2X_DEV_INFO("%s function mode\n",
8579 IS_E1HMF(bp) ? "multi" : "single");
8582 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8584 FUNC_MF_CFG_E1HOV_TAG_MASK);
8585 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8587 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8589 func, bp->e1hov, bp->e1hov);
8591 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8592 " aborting\n", func);
8597 BNX2X_ERR("!!! VN %d in single function mode,"
8598 " aborting\n", BP_E1HVN(bp));
8604 if (!BP_NOMCP(bp)) {
8605 bnx2x_get_port_hwinfo(bp);
8607 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8608 DRV_MSG_SEQ_NUMBER_MASK);
8609 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8613 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8614 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8615 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8616 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8617 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8618 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8619 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8620 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8621 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8622 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8623 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8625 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8633 /* only supposed to happen on emulation/FPGA */
8634 BNX2X_ERR("warning random MAC workaround active\n");
8635 random_ether_addr(bp->dev->dev_addr);
8636 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8642 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8644 int func = BP_FUNC(bp);
8648 /* Disable interrupt handling until HW is initialized */
8649 atomic_set(&bp->intr_sem, 1);
8650 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8652 mutex_init(&bp->port.phy_mutex);
8654 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8655 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8657 rc = bnx2x_get_hwinfo(bp);
8659 /* need to reset chip if undi was active */
8661 bnx2x_undi_unload(bp);
8663 if (CHIP_REV_IS_FPGA(bp))
8664 printk(KERN_ERR PFX "FPGA detected\n");
8666 if (BP_NOMCP(bp) && (func == 0))
8668 "MCP disabled, must load devices in order!\n");
8670 /* Set multi queue mode */
8671 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8672 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8674 "Multi disabled since int_mode requested is not MSI-X\n");
8675 multi_mode = ETH_RSS_MODE_DISABLED;
8677 bp->multi_mode = multi_mode;
8682 bp->flags &= ~TPA_ENABLE_FLAG;
8683 bp->dev->features &= ~NETIF_F_LRO;
8685 bp->flags |= TPA_ENABLE_FLAG;
8686 bp->dev->features |= NETIF_F_LRO;
8690 bp->dropless_fc = 0;
8692 bp->dropless_fc = dropless_fc;
8696 bp->tx_ring_size = MAX_TX_AVAIL;
8697 bp->rx_ring_size = MAX_RX_AVAIL;
8704 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8705 bp->current_interval = (poll ? poll : timer_interval);
8707 init_timer(&bp->timer);
8708 bp->timer.expires = jiffies + bp->current_interval;
8709 bp->timer.data = (unsigned long) bp;
8710 bp->timer.function = bnx2x_timer;
8716 * ethtool service functions
8719 /* All ethtool functions called with rtnl_lock */
8721 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8723 struct bnx2x *bp = netdev_priv(dev);
8725 cmd->supported = bp->port.supported;
8726 cmd->advertising = bp->port.advertising;
8728 if (netif_carrier_ok(dev)) {
8729 cmd->speed = bp->link_vars.line_speed;
8730 cmd->duplex = bp->link_vars.duplex;
8732 cmd->speed = bp->link_params.req_line_speed;
8733 cmd->duplex = bp->link_params.req_duplex;
8738 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8739 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8740 if (vn_max_rate < cmd->speed)
8741 cmd->speed = vn_max_rate;
8744 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8746 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8748 switch (ext_phy_type) {
8749 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8750 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8751 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8752 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8753 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8754 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8755 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8756 cmd->port = PORT_FIBRE;
8759 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8760 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8761 cmd->port = PORT_TP;
8764 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8765 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8766 bp->link_params.ext_phy_config);
8770 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
8771 bp->link_params.ext_phy_config);
8775 cmd->port = PORT_TP;
8777 cmd->phy_address = bp->mdio.prtad;
8778 cmd->transceiver = XCVR_INTERNAL;
8780 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8781 cmd->autoneg = AUTONEG_ENABLE;
8783 cmd->autoneg = AUTONEG_DISABLE;
8788 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8789 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8790 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8791 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8792 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8793 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8794 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8799 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8801 struct bnx2x *bp = netdev_priv(dev);
8807 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8808 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8809 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8810 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8811 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8812 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8813 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8815 if (cmd->autoneg == AUTONEG_ENABLE) {
8816 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8817 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8821 /* advertise the requested speed and duplex if supported */
8822 cmd->advertising &= bp->port.supported;
8824 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8825 bp->link_params.req_duplex = DUPLEX_FULL;
8826 bp->port.advertising |= (ADVERTISED_Autoneg |
8829 } else { /* forced speed */
8830 /* advertise the requested speed and duplex if supported */
8831 switch (cmd->speed) {
8833 if (cmd->duplex == DUPLEX_FULL) {
8834 if (!(bp->port.supported &
8835 SUPPORTED_10baseT_Full)) {
8837 "10M full not supported\n");
8841 advertising = (ADVERTISED_10baseT_Full |
8844 if (!(bp->port.supported &
8845 SUPPORTED_10baseT_Half)) {
8847 "10M half not supported\n");
8851 advertising = (ADVERTISED_10baseT_Half |
8857 if (cmd->duplex == DUPLEX_FULL) {
8858 if (!(bp->port.supported &
8859 SUPPORTED_100baseT_Full)) {
8861 "100M full not supported\n");
8865 advertising = (ADVERTISED_100baseT_Full |
8868 if (!(bp->port.supported &
8869 SUPPORTED_100baseT_Half)) {
8871 "100M half not supported\n");
8875 advertising = (ADVERTISED_100baseT_Half |
8881 if (cmd->duplex != DUPLEX_FULL) {
8882 DP(NETIF_MSG_LINK, "1G half not supported\n");
8886 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
8887 DP(NETIF_MSG_LINK, "1G full not supported\n");
8891 advertising = (ADVERTISED_1000baseT_Full |
8896 if (cmd->duplex != DUPLEX_FULL) {
8898 "2.5G half not supported\n");
8902 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
8904 "2.5G full not supported\n");
8908 advertising = (ADVERTISED_2500baseX_Full |
8913 if (cmd->duplex != DUPLEX_FULL) {
8914 DP(NETIF_MSG_LINK, "10G half not supported\n");
8918 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
8919 DP(NETIF_MSG_LINK, "10G full not supported\n");
8923 advertising = (ADVERTISED_10000baseT_Full |
8928 DP(NETIF_MSG_LINK, "Unsupported speed\n");
8932 bp->link_params.req_line_speed = cmd->speed;
8933 bp->link_params.req_duplex = cmd->duplex;
8934 bp->port.advertising = advertising;
8937 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
8938 DP_LEVEL " req_duplex %d advertising 0x%x\n",
8939 bp->link_params.req_line_speed, bp->link_params.req_duplex,
8940 bp->port.advertising);
8942 if (netif_running(dev)) {
8943 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8950 #define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8951 #define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8953 static int bnx2x_get_regs_len(struct net_device *dev)
8955 struct bnx2x *bp = netdev_priv(dev);
8956 int regdump_len = 0;
8959 if (CHIP_IS_E1(bp)) {
8960 for (i = 0; i < REGS_COUNT; i++)
8961 if (IS_E1_ONLINE(reg_addrs[i].info))
8962 regdump_len += reg_addrs[i].size;
8964 for (i = 0; i < WREGS_COUNT_E1; i++)
8965 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
8966 regdump_len += wreg_addrs_e1[i].size *
8967 (1 + wreg_addrs_e1[i].read_regs_count);
8970 for (i = 0; i < REGS_COUNT; i++)
8971 if (IS_E1H_ONLINE(reg_addrs[i].info))
8972 regdump_len += reg_addrs[i].size;
8974 for (i = 0; i < WREGS_COUNT_E1H; i++)
8975 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
8976 regdump_len += wreg_addrs_e1h[i].size *
8977 (1 + wreg_addrs_e1h[i].read_regs_count);
8980 regdump_len += sizeof(struct dump_hdr);
8985 static void bnx2x_get_regs(struct net_device *dev,
8986 struct ethtool_regs *regs, void *_p)
8989 struct bnx2x *bp = netdev_priv(dev);
8990 struct dump_hdr dump_hdr = {0};
8993 memset(p, 0, regs->len);
8995 if (!netif_running(bp->dev))
8998 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
8999 dump_hdr.dump_sign = dump_sign_all;
9000 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9001 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9002 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9003 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9004 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9006 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9007 p += dump_hdr.hdr_size + 1;
9009 if (CHIP_IS_E1(bp)) {
9010 for (i = 0; i < REGS_COUNT; i++)
9011 if (IS_E1_ONLINE(reg_addrs[i].info))
9012 for (j = 0; j < reg_addrs[i].size; j++)
9014 reg_addrs[i].addr + j*4);
9017 for (i = 0; i < REGS_COUNT; i++)
9018 if (IS_E1H_ONLINE(reg_addrs[i].info))
9019 for (j = 0; j < reg_addrs[i].size; j++)
9021 reg_addrs[i].addr + j*4);
9025 #define PHY_FW_VER_LEN 10
9027 static void bnx2x_get_drvinfo(struct net_device *dev,
9028 struct ethtool_drvinfo *info)
9030 struct bnx2x *bp = netdev_priv(dev);
9031 u8 phy_fw_ver[PHY_FW_VER_LEN];
9033 strcpy(info->driver, DRV_MODULE_NAME);
9034 strcpy(info->version, DRV_MODULE_VERSION);
9036 phy_fw_ver[0] = '\0';
9038 bnx2x_acquire_phy_lock(bp);
9039 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9040 (bp->state != BNX2X_STATE_CLOSED),
9041 phy_fw_ver, PHY_FW_VER_LEN);
9042 bnx2x_release_phy_lock(bp);
9045 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9046 (bp->common.bc_ver & 0xff0000) >> 16,
9047 (bp->common.bc_ver & 0xff00) >> 8,
9048 (bp->common.bc_ver & 0xff),
9049 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9050 strcpy(info->bus_info, pci_name(bp->pdev));
9051 info->n_stats = BNX2X_NUM_STATS;
9052 info->testinfo_len = BNX2X_NUM_TESTS;
9053 info->eedump_len = bp->common.flash_size;
9054 info->regdump_len = bnx2x_get_regs_len(dev);
9057 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9059 struct bnx2x *bp = netdev_priv(dev);
9061 if (bp->flags & NO_WOL_FLAG) {
9065 wol->supported = WAKE_MAGIC;
9067 wol->wolopts = WAKE_MAGIC;
9071 memset(&wol->sopass, 0, sizeof(wol->sopass));
9074 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9076 struct bnx2x *bp = netdev_priv(dev);
9078 if (wol->wolopts & ~WAKE_MAGIC)
9081 if (wol->wolopts & WAKE_MAGIC) {
9082 if (bp->flags & NO_WOL_FLAG)
9092 static u32 bnx2x_get_msglevel(struct net_device *dev)
9094 struct bnx2x *bp = netdev_priv(dev);
9096 return bp->msglevel;
9099 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9101 struct bnx2x *bp = netdev_priv(dev);
9103 if (capable(CAP_NET_ADMIN))
9104 bp->msglevel = level;
9107 static int bnx2x_nway_reset(struct net_device *dev)
9109 struct bnx2x *bp = netdev_priv(dev);
9114 if (netif_running(dev)) {
9115 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9123 bnx2x_get_link(struct net_device *dev)
9125 struct bnx2x *bp = netdev_priv(dev);
9127 return bp->link_vars.link_up;
9130 static int bnx2x_get_eeprom_len(struct net_device *dev)
9132 struct bnx2x *bp = netdev_priv(dev);
9134 return bp->common.flash_size;
9137 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9139 int port = BP_PORT(bp);
9143 /* adjust timeout for emulation/FPGA */
9144 count = NVRAM_TIMEOUT_COUNT;
9145 if (CHIP_REV_IS_SLOW(bp))
9148 /* request access to nvram interface */
9149 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9150 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9152 for (i = 0; i < count*10; i++) {
9153 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9154 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9160 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
9161 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
9168 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9170 int port = BP_PORT(bp);
9174 /* adjust timeout for emulation/FPGA */
9175 count = NVRAM_TIMEOUT_COUNT;
9176 if (CHIP_REV_IS_SLOW(bp))
9179 /* relinquish nvram interface */
9180 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9181 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9183 for (i = 0; i < count*10; i++) {
9184 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9185 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9191 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
9192 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
9199 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9203 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9205 /* enable both bits, even on read */
9206 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9207 (val | MCPR_NVM_ACCESS_ENABLE_EN |
9208 MCPR_NVM_ACCESS_ENABLE_WR_EN));
9211 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9215 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9217 /* disable both bits, even after read */
9218 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9219 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9220 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9223 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
9229 /* build the command word */
9230 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9232 /* need to clear DONE bit separately */
9233 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9235 /* address of the NVRAM to read from */
9236 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9237 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9239 /* issue a read command */
9240 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9242 /* adjust timeout for emulation/FPGA */
9243 count = NVRAM_TIMEOUT_COUNT;
9244 if (CHIP_REV_IS_SLOW(bp))
9247 /* wait for completion */
9250 for (i = 0; i < count; i++) {
9252 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9254 if (val & MCPR_NVM_COMMAND_DONE) {
9255 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
9256 /* we read nvram data in cpu order
9257 * but ethtool sees it as an array of bytes
9258 * converting to big-endian will do the work */
9259 *ret_val = cpu_to_be32(val);
9268 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9275 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9277 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
9282 if (offset + buf_size > bp->common.flash_size) {
9283 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9284 " buf_size (0x%x) > flash_size (0x%x)\n",
9285 offset, buf_size, bp->common.flash_size);
9289 /* request access to nvram interface */
9290 rc = bnx2x_acquire_nvram_lock(bp);
9294 /* enable access to nvram interface */
9295 bnx2x_enable_nvram_access(bp);
9297 /* read the first word(s) */
9298 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9299 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9300 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9301 memcpy(ret_buf, &val, 4);
9303 /* advance to the next dword */
9304 offset += sizeof(u32);
9305 ret_buf += sizeof(u32);
9306 buf_size -= sizeof(u32);
9311 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9312 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9313 memcpy(ret_buf, &val, 4);
9316 /* disable access to nvram interface */
9317 bnx2x_disable_nvram_access(bp);
9318 bnx2x_release_nvram_lock(bp);
9323 static int bnx2x_get_eeprom(struct net_device *dev,
9324 struct ethtool_eeprom *eeprom, u8 *eebuf)
9326 struct bnx2x *bp = netdev_priv(dev);
9329 if (!netif_running(dev))
9332 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9333 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9334 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9335 eeprom->len, eeprom->len);
9337 /* parameters already validated in ethtool_get_eeprom */
9339 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9344 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9349 /* build the command word */
9350 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9352 /* need to clear DONE bit separately */
9353 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9355 /* write the data */
9356 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9358 /* address of the NVRAM to write to */
9359 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9360 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9362 /* issue the write command */
9363 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9365 /* adjust timeout for emulation/FPGA */
9366 count = NVRAM_TIMEOUT_COUNT;
9367 if (CHIP_REV_IS_SLOW(bp))
9370 /* wait for completion */
9372 for (i = 0; i < count; i++) {
9374 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9375 if (val & MCPR_NVM_COMMAND_DONE) {
9384 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
9386 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9394 if (offset + buf_size > bp->common.flash_size) {
9395 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9396 " buf_size (0x%x) > flash_size (0x%x)\n",
9397 offset, buf_size, bp->common.flash_size);
9401 /* request access to nvram interface */
9402 rc = bnx2x_acquire_nvram_lock(bp);
9406 /* enable access to nvram interface */
9407 bnx2x_enable_nvram_access(bp);
9409 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9410 align_offset = (offset & ~0x03);
9411 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9414 val &= ~(0xff << BYTE_OFFSET(offset));
9415 val |= (*data_buf << BYTE_OFFSET(offset));
9417 /* nvram data is returned as an array of bytes
9418 * convert it back to cpu order */
9419 val = be32_to_cpu(val);
9421 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9425 /* disable access to nvram interface */
9426 bnx2x_disable_nvram_access(bp);
9427 bnx2x_release_nvram_lock(bp);
9432 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9440 if (buf_size == 1) /* ethtool */
9441 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
9443 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9445 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
9450 if (offset + buf_size > bp->common.flash_size) {
9451 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9452 " buf_size (0x%x) > flash_size (0x%x)\n",
9453 offset, buf_size, bp->common.flash_size);
9457 /* request access to nvram interface */
9458 rc = bnx2x_acquire_nvram_lock(bp);
9462 /* enable access to nvram interface */
9463 bnx2x_enable_nvram_access(bp);
9466 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9467 while ((written_so_far < buf_size) && (rc == 0)) {
9468 if (written_so_far == (buf_size - sizeof(u32)))
9469 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9470 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9471 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9472 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9473 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9475 memcpy(&val, data_buf, 4);
9477 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9479 /* advance to the next dword */
9480 offset += sizeof(u32);
9481 data_buf += sizeof(u32);
9482 written_so_far += sizeof(u32);
9486 /* disable access to nvram interface */
9487 bnx2x_disable_nvram_access(bp);
9488 bnx2x_release_nvram_lock(bp);
9493 static int bnx2x_set_eeprom(struct net_device *dev,
9494 struct ethtool_eeprom *eeprom, u8 *eebuf)
9496 struct bnx2x *bp = netdev_priv(dev);
9497 int port = BP_PORT(bp);
9500 if (!netif_running(dev))
9503 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9504 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9505 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9506 eeprom->len, eeprom->len);
9508 /* parameters already validated in ethtool_set_eeprom */
9510 /* PHY eeprom can be accessed only by the PMF */
9511 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9515 if (eeprom->magic == 0x50485950) {
9516 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9517 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9519 bnx2x_acquire_phy_lock(bp);
9520 rc |= bnx2x_link_reset(&bp->link_params,
9522 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9523 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9524 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9525 MISC_REGISTERS_GPIO_HIGH, port);
9526 bnx2x_release_phy_lock(bp);
9527 bnx2x_link_report(bp);
9529 } else if (eeprom->magic == 0x50485952) {
9530 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
9531 if ((bp->state == BNX2X_STATE_OPEN) ||
9532 (bp->state == BNX2X_STATE_DISABLED)) {
9533 bnx2x_acquire_phy_lock(bp);
9534 rc |= bnx2x_link_reset(&bp->link_params,
9537 rc |= bnx2x_phy_init(&bp->link_params,
9539 bnx2x_release_phy_lock(bp);
9540 bnx2x_calc_fc_adv(bp);
9542 } else if (eeprom->magic == 0x53985943) {
9543 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9544 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9545 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9547 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
9549 /* DSP Remove Download Mode */
9550 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9551 MISC_REGISTERS_GPIO_LOW, port);
9553 bnx2x_acquire_phy_lock(bp);
9555 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9557 /* wait 0.5 sec to allow it to run */
9559 bnx2x_ext_phy_hw_reset(bp, port);
9561 bnx2x_release_phy_lock(bp);
9564 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
9569 static int bnx2x_get_coalesce(struct net_device *dev,
9570 struct ethtool_coalesce *coal)
9572 struct bnx2x *bp = netdev_priv(dev);
9574 memset(coal, 0, sizeof(struct ethtool_coalesce));
9576 coal->rx_coalesce_usecs = bp->rx_ticks;
9577 coal->tx_coalesce_usecs = bp->tx_ticks;
9582 #define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
9583 static int bnx2x_set_coalesce(struct net_device *dev,
9584 struct ethtool_coalesce *coal)
9586 struct bnx2x *bp = netdev_priv(dev);
9588 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
9589 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9590 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
9592 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
9593 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9594 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
9596 if (netif_running(dev))
9597 bnx2x_update_coalesce(bp);
9602 static void bnx2x_get_ringparam(struct net_device *dev,
9603 struct ethtool_ringparam *ering)
9605 struct bnx2x *bp = netdev_priv(dev);
9607 ering->rx_max_pending = MAX_RX_AVAIL;
9608 ering->rx_mini_max_pending = 0;
9609 ering->rx_jumbo_max_pending = 0;
9611 ering->rx_pending = bp->rx_ring_size;
9612 ering->rx_mini_pending = 0;
9613 ering->rx_jumbo_pending = 0;
9615 ering->tx_max_pending = MAX_TX_AVAIL;
9616 ering->tx_pending = bp->tx_ring_size;
9619 static int bnx2x_set_ringparam(struct net_device *dev,
9620 struct ethtool_ringparam *ering)
9622 struct bnx2x *bp = netdev_priv(dev);
9625 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9626 (ering->tx_pending > MAX_TX_AVAIL) ||
9627 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9630 bp->rx_ring_size = ering->rx_pending;
9631 bp->tx_ring_size = ering->tx_pending;
9633 if (netif_running(dev)) {
9634 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9635 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9641 static void bnx2x_get_pauseparam(struct net_device *dev,
9642 struct ethtool_pauseparam *epause)
9644 struct bnx2x *bp = netdev_priv(dev);
9646 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9647 BNX2X_FLOW_CTRL_AUTO) &&
9648 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9650 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9651 BNX2X_FLOW_CTRL_RX);
9652 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9653 BNX2X_FLOW_CTRL_TX);
9655 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9656 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9657 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9660 static int bnx2x_set_pauseparam(struct net_device *dev,
9661 struct ethtool_pauseparam *epause)
9663 struct bnx2x *bp = netdev_priv(dev);
9668 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9669 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9670 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9672 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9674 if (epause->rx_pause)
9675 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9677 if (epause->tx_pause)
9678 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9680 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9681 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9683 if (epause->autoneg) {
9684 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9685 DP(NETIF_MSG_LINK, "autoneg not supported\n");
9689 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9690 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9694 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9696 if (netif_running(dev)) {
9697 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9704 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9706 struct bnx2x *bp = netdev_priv(dev);
9710 /* TPA requires Rx CSUM offloading */
9711 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9712 if (!(dev->features & NETIF_F_LRO)) {
9713 dev->features |= NETIF_F_LRO;
9714 bp->flags |= TPA_ENABLE_FLAG;
9718 } else if (dev->features & NETIF_F_LRO) {
9719 dev->features &= ~NETIF_F_LRO;
9720 bp->flags &= ~TPA_ENABLE_FLAG;
9724 if (changed && netif_running(dev)) {
9725 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9726 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9732 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9734 struct bnx2x *bp = netdev_priv(dev);
9739 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9741 struct bnx2x *bp = netdev_priv(dev);
9746 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9747 TPA'ed packets will be discarded due to wrong TCP CSUM */
9749 u32 flags = ethtool_op_get_flags(dev);
9751 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9757 static int bnx2x_set_tso(struct net_device *dev, u32 data)
9760 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9761 dev->features |= NETIF_F_TSO6;
9763 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
9764 dev->features &= ~NETIF_F_TSO6;
9770 static const struct {
9771 char string[ETH_GSTRING_LEN];
9772 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
9773 { "register_test (offline)" },
9774 { "memory_test (offline)" },
9775 { "loopback_test (offline)" },
9776 { "nvram_test (online)" },
9777 { "interrupt_test (online)" },
9778 { "link_test (online)" },
9779 { "idle check (online)" }
9782 static int bnx2x_self_test_count(struct net_device *dev)
9784 return BNX2X_NUM_TESTS;
9787 static int bnx2x_test_registers(struct bnx2x *bp)
9789 int idx, i, rc = -ENODEV;
9791 int port = BP_PORT(bp);
9792 static const struct {
9797 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9798 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9799 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9800 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9801 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9802 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9803 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9804 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9805 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9806 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9807 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9808 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9809 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9810 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9811 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9812 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9813 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9814 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9815 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9816 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9817 /* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9818 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9819 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9820 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9821 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9822 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9823 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9824 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9825 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9826 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9827 /* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9828 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9829 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9830 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9831 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9832 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9833 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9835 { 0xffffffff, 0, 0x00000000 }
9838 if (!netif_running(bp->dev))
9841 /* Repeat the test twice:
9842 First by writing 0x00000000, second by writing 0xffffffff */
9843 for (idx = 0; idx < 2; idx++) {
9850 wr_val = 0xffffffff;
9854 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9855 u32 offset, mask, save_val, val;
9857 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9858 mask = reg_tbl[i].mask;
9860 save_val = REG_RD(bp, offset);
9862 REG_WR(bp, offset, wr_val);
9863 val = REG_RD(bp, offset);
9865 /* Restore the original register's value */
9866 REG_WR(bp, offset, save_val);
9868 /* verify that value is as expected value */
9869 if ((val & mask) != (wr_val & mask))
9880 static int bnx2x_test_memory(struct bnx2x *bp)
9882 int i, j, rc = -ENODEV;
9884 static const struct {
9888 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9889 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9890 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9891 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9892 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9893 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9894 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9898 static const struct {
9904 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9905 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9906 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9907 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9908 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9909 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9911 { NULL, 0xffffffff, 0, 0 }
9914 if (!netif_running(bp->dev))
9917 /* Go through all the memories */
9918 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9919 for (j = 0; j < mem_tbl[i].size; j++)
9920 REG_RD(bp, mem_tbl[i].offset + j*4);
9922 /* Check the parity status */
9923 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9924 val = REG_RD(bp, prty_tbl[i].offset);
9925 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9926 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
9928 "%s is 0x%x\n", prty_tbl[i].name, val);
9939 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9944 while (bnx2x_link_test(bp) && cnt--)
9948 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9950 unsigned int pkt_size, num_pkts, i;
9951 struct sk_buff *skb;
9952 unsigned char *packet;
9953 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
9954 struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues];
9955 u16 tx_start_idx, tx_idx;
9956 u16 rx_start_idx, rx_idx;
9957 u16 pkt_prod, bd_prod;
9958 struct sw_tx_bd *tx_buf;
9959 struct eth_tx_start_bd *tx_start_bd;
9960 struct eth_tx_parse_bd *pbd = NULL;
9962 union eth_rx_cqe *cqe;
9964 struct sw_rx_bd *rx_buf;
9968 /* check the loopback mode */
9969 switch (loopback_mode) {
9970 case BNX2X_PHY_LOOPBACK:
9971 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9974 case BNX2X_MAC_LOOPBACK:
9975 bp->link_params.loopback_mode = LOOPBACK_BMAC;
9976 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
9982 /* prepare the loopback packet */
9983 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9984 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
9985 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9988 goto test_loopback_exit;
9990 packet = skb_put(skb, pkt_size);
9991 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9992 memset(packet + ETH_ALEN, 0, ETH_ALEN);
9993 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
9994 for (i = ETH_HLEN; i < pkt_size; i++)
9995 packet[i] = (unsigned char) (i & 0xff);
9997 /* send the loopback packet */
9999 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10000 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10002 pkt_prod = fp_tx->tx_pkt_prod++;
10003 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10004 tx_buf->first_bd = fp_tx->tx_bd_prod;
10008 bd_prod = TX_BD(fp_tx->tx_bd_prod);
10009 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
10010 mapping = pci_map_single(bp->pdev, skb->data,
10011 skb_headlen(skb), PCI_DMA_TODEVICE);
10012 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10013 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10014 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10015 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10016 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10017 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10018 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10019 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10021 /* turn on parsing and get a BD */
10022 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10023 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10025 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10029 fp_tx->tx_db.data.prod += 2;
10031 DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw);
10036 fp_tx->tx_bd_prod += 2; /* start + pbd */
10037 bp->dev->trans_start = jiffies;
10041 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10042 if (tx_idx != tx_start_idx + num_pkts)
10043 goto test_loopback_exit;
10045 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
10046 if (rx_idx != rx_start_idx + num_pkts)
10047 goto test_loopback_exit;
10049 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
10050 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10051 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10052 goto test_loopback_rx_exit;
10054 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10055 if (len != pkt_size)
10056 goto test_loopback_rx_exit;
10058 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
10060 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10061 for (i = ETH_HLEN; i < pkt_size; i++)
10062 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10063 goto test_loopback_rx_exit;
10067 test_loopback_rx_exit:
10069 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10070 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10071 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10072 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
10074 /* Update producers */
10075 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10076 fp_rx->rx_sge_prod);
10078 test_loopback_exit:
10079 bp->link_params.loopback_mode = LOOPBACK_NONE;
10084 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10088 if (!netif_running(bp->dev))
10089 return BNX2X_LOOPBACK_FAILED;
10091 bnx2x_netif_stop(bp, 1);
10092 bnx2x_acquire_phy_lock(bp);
10094 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10096 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
10097 rc |= BNX2X_PHY_LOOPBACK_FAILED;
10100 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10102 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
10103 rc |= BNX2X_MAC_LOOPBACK_FAILED;
10106 bnx2x_release_phy_lock(bp);
10107 bnx2x_netif_start(bp);
10112 #define CRC32_RESIDUAL 0xdebb20e3
10114 static int bnx2x_test_nvram(struct bnx2x *bp)
10116 static const struct {
10120 { 0, 0x14 }, /* bootstrap */
10121 { 0x14, 0xec }, /* dir */
10122 { 0x100, 0x350 }, /* manuf_info */
10123 { 0x450, 0xf0 }, /* feature_info */
10124 { 0x640, 0x64 }, /* upgrade_key_info */
10126 { 0x708, 0x70 }, /* manuf_key_info */
10130 __be32 buf[0x350 / 4];
10131 u8 *data = (u8 *)buf;
10135 rc = bnx2x_nvram_read(bp, 0, data, 4);
10137 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
10138 goto test_nvram_exit;
10141 magic = be32_to_cpu(buf[0]);
10142 if (magic != 0x669955aa) {
10143 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10145 goto test_nvram_exit;
10148 for (i = 0; nvram_tbl[i].size; i++) {
10150 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10151 nvram_tbl[i].size);
10153 DP(NETIF_MSG_PROBE,
10154 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
10155 goto test_nvram_exit;
10158 csum = ether_crc_le(nvram_tbl[i].size, data);
10159 if (csum != CRC32_RESIDUAL) {
10160 DP(NETIF_MSG_PROBE,
10161 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
10163 goto test_nvram_exit;
10171 static int bnx2x_test_intr(struct bnx2x *bp)
10173 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10176 if (!netif_running(bp->dev))
10179 config->hdr.length = 0;
10180 if (CHIP_IS_E1(bp))
10181 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10183 config->hdr.offset = BP_FUNC(bp);
10184 config->hdr.client_id = bp->fp->cl_id;
10185 config->hdr.reserved1 = 0;
10187 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10188 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10189 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10191 bp->set_mac_pending++;
10192 for (i = 0; i < 10; i++) {
10193 if (!bp->set_mac_pending)
10195 msleep_interruptible(10);
10204 static void bnx2x_self_test(struct net_device *dev,
10205 struct ethtool_test *etest, u64 *buf)
10207 struct bnx2x *bp = netdev_priv(dev);
10209 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10211 if (!netif_running(dev))
10214 /* offline tests are not supported in MF mode */
10216 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10218 if (etest->flags & ETH_TEST_FL_OFFLINE) {
10219 int port = BP_PORT(bp);
10223 /* save current value of input enable for TX port IF */
10224 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10225 /* disable input for TX port IF */
10226 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10228 link_up = bp->link_vars.link_up;
10229 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10230 bnx2x_nic_load(bp, LOAD_DIAG);
10231 /* wait until link state is restored */
10232 bnx2x_wait_for_link(bp, link_up);
10234 if (bnx2x_test_registers(bp) != 0) {
10236 etest->flags |= ETH_TEST_FL_FAILED;
10238 if (bnx2x_test_memory(bp) != 0) {
10240 etest->flags |= ETH_TEST_FL_FAILED;
10242 buf[2] = bnx2x_test_loopback(bp, link_up);
10244 etest->flags |= ETH_TEST_FL_FAILED;
10246 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10248 /* restore input for TX port IF */
10249 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10251 bnx2x_nic_load(bp, LOAD_NORMAL);
10252 /* wait until link state is restored */
10253 bnx2x_wait_for_link(bp, link_up);
10255 if (bnx2x_test_nvram(bp) != 0) {
10257 etest->flags |= ETH_TEST_FL_FAILED;
10259 if (bnx2x_test_intr(bp) != 0) {
10261 etest->flags |= ETH_TEST_FL_FAILED;
10264 if (bnx2x_link_test(bp) != 0) {
10266 etest->flags |= ETH_TEST_FL_FAILED;
10269 #ifdef BNX2X_EXTRA_DEBUG
10270 bnx2x_panic_dump(bp);
10274 static const struct {
10277 u8 string[ETH_GSTRING_LEN];
10278 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10279 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10280 { Q_STATS_OFFSET32(error_bytes_received_hi),
10281 8, "[%d]: rx_error_bytes" },
10282 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10283 8, "[%d]: rx_ucast_packets" },
10284 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10285 8, "[%d]: rx_mcast_packets" },
10286 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10287 8, "[%d]: rx_bcast_packets" },
10288 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10289 { Q_STATS_OFFSET32(rx_err_discard_pkt),
10290 4, "[%d]: rx_phy_ip_err_discards"},
10291 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10292 4, "[%d]: rx_skb_alloc_discard" },
10293 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10295 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10296 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10297 8, "[%d]: tx_packets" }
10300 static const struct {
10304 #define STATS_FLAGS_PORT 1
10305 #define STATS_FLAGS_FUNC 2
10306 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
10307 u8 string[ETH_GSTRING_LEN];
10308 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
10309 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10310 8, STATS_FLAGS_BOTH, "rx_bytes" },
10311 { STATS_OFFSET32(error_bytes_received_hi),
10312 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
10313 { STATS_OFFSET32(total_unicast_packets_received_hi),
10314 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
10315 { STATS_OFFSET32(total_multicast_packets_received_hi),
10316 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
10317 { STATS_OFFSET32(total_broadcast_packets_received_hi),
10318 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
10319 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
10320 8, STATS_FLAGS_PORT, "rx_crc_errors" },
10321 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
10322 8, STATS_FLAGS_PORT, "rx_align_errors" },
10323 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10324 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10325 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10326 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10327 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10328 8, STATS_FLAGS_PORT, "rx_fragments" },
10329 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10330 8, STATS_FLAGS_PORT, "rx_jabbers" },
10331 { STATS_OFFSET32(no_buff_discard_hi),
10332 8, STATS_FLAGS_BOTH, "rx_discards" },
10333 { STATS_OFFSET32(mac_filter_discard),
10334 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10335 { STATS_OFFSET32(xxoverflow_discard),
10336 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10337 { STATS_OFFSET32(brb_drop_hi),
10338 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10339 { STATS_OFFSET32(brb_truncate_hi),
10340 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10341 { STATS_OFFSET32(pause_frames_received_hi),
10342 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10343 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10344 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10345 { STATS_OFFSET32(nig_timer_max),
10346 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10347 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10348 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10349 { STATS_OFFSET32(rx_skb_alloc_failed),
10350 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10351 { STATS_OFFSET32(hw_csum_err),
10352 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10354 { STATS_OFFSET32(total_bytes_transmitted_hi),
10355 8, STATS_FLAGS_BOTH, "tx_bytes" },
10356 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10357 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10358 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10359 8, STATS_FLAGS_BOTH, "tx_packets" },
10360 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10361 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10362 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10363 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
10364 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
10365 8, STATS_FLAGS_PORT, "tx_single_collisions" },
10366 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
10367 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
10368 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
10369 8, STATS_FLAGS_PORT, "tx_deferred" },
10370 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
10371 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
10372 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
10373 8, STATS_FLAGS_PORT, "tx_late_collisions" },
10374 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
10375 8, STATS_FLAGS_PORT, "tx_total_collisions" },
10376 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
10377 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
10378 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
10379 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
10380 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
10381 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
10382 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
10383 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
10384 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
10385 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
10386 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
10387 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
10388 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
10389 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
10390 { STATS_OFFSET32(pause_frames_sent_hi),
10391 8, STATS_FLAGS_PORT, "tx_pause_frames" }
10394 #define IS_PORT_STAT(i) \
10395 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10396 #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10397 #define IS_E1HMF_MODE_STAT(bp) \
10398 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
10400 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10402 struct bnx2x *bp = netdev_priv(dev);
10405 switch (stringset) {
10407 if (is_multi(bp)) {
10409 for_each_rx_queue(bp, i) {
10410 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10411 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10412 bnx2x_q_stats_arr[j].string, i);
10413 k += BNX2X_NUM_Q_STATS;
10415 if (IS_E1HMF_MODE_STAT(bp))
10417 for (j = 0; j < BNX2X_NUM_STATS; j++)
10418 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10419 bnx2x_stats_arr[j].string);
10421 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10422 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10424 strcpy(buf + j*ETH_GSTRING_LEN,
10425 bnx2x_stats_arr[i].string);
10432 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10437 static int bnx2x_get_stats_count(struct net_device *dev)
10439 struct bnx2x *bp = netdev_priv(dev);
10442 if (is_multi(bp)) {
10443 num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
10444 if (!IS_E1HMF_MODE_STAT(bp))
10445 num_stats += BNX2X_NUM_STATS;
10447 if (IS_E1HMF_MODE_STAT(bp)) {
10449 for (i = 0; i < BNX2X_NUM_STATS; i++)
10450 if (IS_FUNC_STAT(i))
10453 num_stats = BNX2X_NUM_STATS;
10459 static void bnx2x_get_ethtool_stats(struct net_device *dev,
10460 struct ethtool_stats *stats, u64 *buf)
10462 struct bnx2x *bp = netdev_priv(dev);
10463 u32 *hw_stats, *offset;
10466 if (is_multi(bp)) {
10468 for_each_rx_queue(bp, i) {
10469 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10470 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10471 if (bnx2x_q_stats_arr[j].size == 0) {
10472 /* skip this counter */
10476 offset = (hw_stats +
10477 bnx2x_q_stats_arr[j].offset);
10478 if (bnx2x_q_stats_arr[j].size == 4) {
10479 /* 4-byte counter */
10480 buf[k + j] = (u64) *offset;
10483 /* 8-byte counter */
10484 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10486 k += BNX2X_NUM_Q_STATS;
10488 if (IS_E1HMF_MODE_STAT(bp))
10490 hw_stats = (u32 *)&bp->eth_stats;
10491 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10492 if (bnx2x_stats_arr[j].size == 0) {
10493 /* skip this counter */
10497 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10498 if (bnx2x_stats_arr[j].size == 4) {
10499 /* 4-byte counter */
10500 buf[k + j] = (u64) *offset;
10503 /* 8-byte counter */
10504 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10507 hw_stats = (u32 *)&bp->eth_stats;
10508 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10509 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10511 if (bnx2x_stats_arr[i].size == 0) {
10512 /* skip this counter */
10517 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10518 if (bnx2x_stats_arr[i].size == 4) {
10519 /* 4-byte counter */
10520 buf[j] = (u64) *offset;
10524 /* 8-byte counter */
10525 buf[j] = HILO_U64(*offset, *(offset + 1));
10531 static int bnx2x_phys_id(struct net_device *dev, u32 data)
10533 struct bnx2x *bp = netdev_priv(dev);
10534 int port = BP_PORT(bp);
10537 if (!netif_running(dev))
10546 for (i = 0; i < (data * 2); i++) {
10548 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
10549 bp->link_params.hw_led_mode,
10550 bp->link_params.chip_id);
10552 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
10553 bp->link_params.hw_led_mode,
10554 bp->link_params.chip_id);
10556 msleep_interruptible(500);
10557 if (signal_pending(current))
10561 if (bp->link_vars.link_up)
10562 bnx2x_set_led(bp, port, LED_MODE_OPER,
10563 bp->link_vars.line_speed,
10564 bp->link_params.hw_led_mode,
10565 bp->link_params.chip_id);
10570 static struct ethtool_ops bnx2x_ethtool_ops = {
10571 .get_settings = bnx2x_get_settings,
10572 .set_settings = bnx2x_set_settings,
10573 .get_drvinfo = bnx2x_get_drvinfo,
10574 .get_regs_len = bnx2x_get_regs_len,
10575 .get_regs = bnx2x_get_regs,
10576 .get_wol = bnx2x_get_wol,
10577 .set_wol = bnx2x_set_wol,
10578 .get_msglevel = bnx2x_get_msglevel,
10579 .set_msglevel = bnx2x_set_msglevel,
10580 .nway_reset = bnx2x_nway_reset,
10581 .get_link = bnx2x_get_link,
10582 .get_eeprom_len = bnx2x_get_eeprom_len,
10583 .get_eeprom = bnx2x_get_eeprom,
10584 .set_eeprom = bnx2x_set_eeprom,
10585 .get_coalesce = bnx2x_get_coalesce,
10586 .set_coalesce = bnx2x_set_coalesce,
10587 .get_ringparam = bnx2x_get_ringparam,
10588 .set_ringparam = bnx2x_set_ringparam,
10589 .get_pauseparam = bnx2x_get_pauseparam,
10590 .set_pauseparam = bnx2x_set_pauseparam,
10591 .get_rx_csum = bnx2x_get_rx_csum,
10592 .set_rx_csum = bnx2x_set_rx_csum,
10593 .get_tx_csum = ethtool_op_get_tx_csum,
10594 .set_tx_csum = ethtool_op_set_tx_hw_csum,
10595 .set_flags = bnx2x_set_flags,
10596 .get_flags = ethtool_op_get_flags,
10597 .get_sg = ethtool_op_get_sg,
10598 .set_sg = ethtool_op_set_sg,
10599 .get_tso = ethtool_op_get_tso,
10600 .set_tso = bnx2x_set_tso,
10601 .self_test_count = bnx2x_self_test_count,
10602 .self_test = bnx2x_self_test,
10603 .get_strings = bnx2x_get_strings,
10604 .phys_id = bnx2x_phys_id,
10605 .get_stats_count = bnx2x_get_stats_count,
10606 .get_ethtool_stats = bnx2x_get_ethtool_stats,
10609 /* end of ethtool_ops */
10611 /****************************************************************************
10612 * General service functions
10613 ****************************************************************************/
10615 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10619 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10623 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10624 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10625 PCI_PM_CTRL_PME_STATUS));
10627 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10628 /* delay required during transition out of D3hot */
10633 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10637 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10639 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10642 /* No more memory access after this point until
10643 * device is brought back to D0.
10653 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10657 /* Tell compiler that status block fields can change */
10659 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10660 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10662 return (fp->rx_comp_cons != rx_cons_sb);
10666 * net_device service functions
10669 static int bnx2x_poll(struct napi_struct *napi, int budget)
10671 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10673 struct bnx2x *bp = fp->bp;
10676 #ifdef BNX2X_STOP_ON_ERROR
10677 if (unlikely(bp->panic))
10681 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10682 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10684 bnx2x_update_fpsb_idx(fp);
10686 if (bnx2x_has_rx_work(fp)) {
10687 work_done = bnx2x_rx_int(fp, budget);
10689 /* must not complete if we consumed full budget */
10690 if (work_done >= budget)
10694 /* bnx2x_has_rx_work() reads the status block, thus we need to
10695 * ensure that status block indices have been actually read
10696 * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work)
10697 * so that we won't write the "newer" value of the status block to IGU
10698 * (if there was a DMA right after bnx2x_has_rx_work and
10699 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10700 * may be postponed to right before bnx2x_ack_sb). In this case
10701 * there will never be another interrupt until there is another update
10702 * of the status block, while there is still unhandled work.
10706 if (!bnx2x_has_rx_work(fp)) {
10707 #ifdef BNX2X_STOP_ON_ERROR
10710 napi_complete(napi);
10712 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10713 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
10714 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10715 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10723 /* we split the first BD into headers and data BDs
10724 * to ease the pain of our fellow microcode engineers
10725 * we use one mapping for both BDs
10726 * So far this has only been observed to happen
10727 * in Other Operating Systems(TM)
10729 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10730 struct bnx2x_fastpath *fp,
10731 struct sw_tx_bd *tx_buf,
10732 struct eth_tx_start_bd **tx_bd, u16 hlen,
10733 u16 bd_prod, int nbd)
10735 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
10736 struct eth_tx_bd *d_tx_bd;
10737 dma_addr_t mapping;
10738 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10740 /* first fix first BD */
10741 h_tx_bd->nbd = cpu_to_le16(nbd);
10742 h_tx_bd->nbytes = cpu_to_le16(hlen);
10744 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10745 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10746 h_tx_bd->addr_lo, h_tx_bd->nbd);
10748 /* now get a new data BD
10749 * (after the pbd) and fill it */
10750 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10751 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
10753 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10754 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10756 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10757 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10758 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10760 /* this marks the BD as one that has no individual mapping */
10761 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
10763 DP(NETIF_MSG_TX_QUEUED,
10764 "TSO split data size is %d (%x:%x)\n",
10765 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10768 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
10773 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10776 csum = (u16) ~csum_fold(csum_sub(csum,
10777 csum_partial(t_header - fix, fix, 0)));
10780 csum = (u16) ~csum_fold(csum_add(csum,
10781 csum_partial(t_header, -fix, 0)));
10783 return swab16(csum);
10786 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10790 if (skb->ip_summed != CHECKSUM_PARTIAL)
10794 if (skb->protocol == htons(ETH_P_IPV6)) {
10796 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10797 rc |= XMIT_CSUM_TCP;
10801 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10802 rc |= XMIT_CSUM_TCP;
10806 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10809 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10815 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10816 /* check if packet requires linearization (packet is too fragmented)
10817 no need to check fragmentation if page size > 8K (there will be no
10818 violation to FW restrictions) */
10819 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10824 int first_bd_sz = 0;
10826 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10827 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10829 if (xmit_type & XMIT_GSO) {
10830 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10831 /* Check if LSO packet needs to be copied:
10832 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10833 int wnd_size = MAX_FETCH_BD - 3;
10834 /* Number of windows to check */
10835 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10840 /* Headers length */
10841 hlen = (int)(skb_transport_header(skb) - skb->data) +
10844 /* Amount of data (w/o headers) on linear part of SKB*/
10845 first_bd_sz = skb_headlen(skb) - hlen;
10847 wnd_sum = first_bd_sz;
10849 /* Calculate the first sum - it's special */
10850 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10852 skb_shinfo(skb)->frags[frag_idx].size;
10854 /* If there was data on linear skb data - check it */
10855 if (first_bd_sz > 0) {
10856 if (unlikely(wnd_sum < lso_mss)) {
10861 wnd_sum -= first_bd_sz;
10864 /* Others are easier: run through the frag list and
10865 check all windows */
10866 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10868 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10870 if (unlikely(wnd_sum < lso_mss)) {
10875 skb_shinfo(skb)->frags[wnd_idx].size;
10878 /* in non-LSO too fragmented packet should always
10885 if (unlikely(to_copy))
10886 DP(NETIF_MSG_TX_QUEUED,
10887 "Linearization IS REQUIRED for %s packet. "
10888 "num_frags %d hlen %d first_bd_sz %d\n",
10889 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10890 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10896 /* called with netif_tx_lock
10897 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
10898 * netif_wake_queue()
10900 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10902 struct bnx2x *bp = netdev_priv(dev);
10903 struct bnx2x_fastpath *fp, *fp_stat;
10904 struct netdev_queue *txq;
10905 struct sw_tx_bd *tx_buf;
10906 struct eth_tx_start_bd *tx_start_bd;
10907 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
10908 struct eth_tx_parse_bd *pbd = NULL;
10909 u16 pkt_prod, bd_prod;
10911 dma_addr_t mapping;
10912 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10915 __le16 pkt_size = 0;
10917 #ifdef BNX2X_STOP_ON_ERROR
10918 if (unlikely(bp->panic))
10919 return NETDEV_TX_BUSY;
10922 fp_index = skb_get_queue_mapping(skb);
10923 txq = netdev_get_tx_queue(dev, fp_index);
10925 fp = &bp->fp[fp_index + bp->num_rx_queues];
10926 fp_stat = &bp->fp[fp_index];
10928 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
10929 fp_stat->eth_q_stats.driver_xoff++;
10930 netif_tx_stop_queue(txq);
10931 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10932 return NETDEV_TX_BUSY;
10935 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10936 " gso type %x xmit_type %x\n",
10937 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10938 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10940 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10941 /* First, check if we need to linearize the skb (due to FW
10942 restrictions). No need to check fragmentation if page size > 8K
10943 (there will be no violation to FW restrictions) */
10944 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10945 /* Statistics of linearization */
10947 if (skb_linearize(skb) != 0) {
10948 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10949 "silently dropping this SKB\n");
10950 dev_kfree_skb_any(skb);
10951 return NETDEV_TX_OK;
10957 Please read carefully. First we use one BD which we mark as start,
10958 then we have a parsing info BD (used for TSO or xsum),
10959 and only then we have the rest of the TSO BDs.
10960 (don't forget to mark the last one as last,
10961 and to unmap only AFTER you write to the BD ...)
10962 And above all, all pdb sizes are in words - NOT DWORDS!
10965 pkt_prod = fp->tx_pkt_prod++;
10966 bd_prod = TX_BD(fp->tx_bd_prod);
10968 /* get a tx_buf and first BD */
10969 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10970 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
10972 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10973 tx_start_bd->general_data = (UNICAST_ADDRESS <<
10974 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
10976 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
10978 /* remember the first BD of the packet */
10979 tx_buf->first_bd = fp->tx_bd_prod;
10983 DP(NETIF_MSG_TX_QUEUED,
10984 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10985 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
10988 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10989 (bp->flags & HW_VLAN_TX_FLAG)) {
10990 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10991 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10994 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10996 /* turn on parsing and get a BD */
10997 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10998 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
11000 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
11002 if (xmit_type & XMIT_CSUM) {
11003 hlen = (skb_network_header(skb) - skb->data) / 2;
11005 /* for now NS flag is not used in Linux */
11007 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11008 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
11010 pbd->ip_hlen = (skb_transport_header(skb) -
11011 skb_network_header(skb)) / 2;
11013 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
11015 pbd->total_hlen = cpu_to_le16(hlen);
11018 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
11020 if (xmit_type & XMIT_CSUM_V4)
11021 tx_start_bd->bd_flags.as_bitfield |=
11022 ETH_TX_BD_FLAGS_IP_CSUM;
11024 tx_start_bd->bd_flags.as_bitfield |=
11025 ETH_TX_BD_FLAGS_IPV6;
11027 if (xmit_type & XMIT_CSUM_TCP) {
11028 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11031 s8 fix = SKB_CS_OFF(skb); /* signed! */
11033 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
11035 DP(NETIF_MSG_TX_QUEUED,
11036 "hlen %d fix %d csum before fix %x\n",
11037 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
11039 /* HW bug: fixup the CSUM */
11040 pbd->tcp_pseudo_csum =
11041 bnx2x_csum_fix(skb_transport_header(skb),
11044 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11045 pbd->tcp_pseudo_csum);
11049 mapping = pci_map_single(bp->pdev, skb->data,
11050 skb_headlen(skb), PCI_DMA_TODEVICE);
11052 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11053 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11054 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11055 tx_start_bd->nbd = cpu_to_le16(nbd);
11056 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11057 pkt_size = tx_start_bd->nbytes;
11059 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
11060 " nbytes %d flags %x vlan %x\n",
11061 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11062 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11063 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
11065 if (xmit_type & XMIT_GSO) {
11067 DP(NETIF_MSG_TX_QUEUED,
11068 "TSO packet len %d hlen %d total len %d tso size %d\n",
11069 skb->len, hlen, skb_headlen(skb),
11070 skb_shinfo(skb)->gso_size);
11072 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
11074 if (unlikely(skb_headlen(skb) > hlen))
11075 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11076 hlen, bd_prod, ++nbd);
11078 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11079 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
11080 pbd->tcp_flags = pbd_tcp_flags(skb);
11082 if (xmit_type & XMIT_GSO_V4) {
11083 pbd->ip_id = swab16(ip_hdr(skb)->id);
11084 pbd->tcp_pseudo_csum =
11085 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11086 ip_hdr(skb)->daddr,
11087 0, IPPROTO_TCP, 0));
11090 pbd->tcp_pseudo_csum =
11091 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11092 &ipv6_hdr(skb)->daddr,
11093 0, IPPROTO_TCP, 0));
11095 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11097 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
11099 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11100 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
11102 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11103 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11104 if (total_pkt_bd == NULL)
11105 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11107 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11108 frag->size, PCI_DMA_TODEVICE);
11110 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11111 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11112 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11113 le16_add_cpu(&pkt_size, frag->size);
11115 DP(NETIF_MSG_TX_QUEUED,
11116 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
11117 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11118 le16_to_cpu(tx_data_bd->nbytes));
11121 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
11123 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11125 /* now send a tx doorbell, counting the next BD
11126 * if the packet contains or ends with it
11128 if (TX_BD_POFF(bd_prod) < nbd)
11131 if (total_pkt_bd != NULL)
11132 total_pkt_bd->total_pkt_bytes = pkt_size;
11135 DP(NETIF_MSG_TX_QUEUED,
11136 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
11137 " tcp_flags %x xsum %x seq %u hlen %u\n",
11138 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11139 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
11140 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
11142 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
11145 * Make sure that the BD data is updated before updating the producer
11146 * since FW might read the BD right after the producer is updated.
11147 * This is only applicable for weak-ordered memory model archs such
11148 * as IA-64. The following barrier is also mandatory since FW will
11149 * assumes packets must have BDs.
11153 fp->tx_db.data.prod += nbd;
11155 DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw);
11159 fp->tx_bd_prod += nbd;
11161 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
11162 netif_tx_stop_queue(txq);
11163 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11164 if we put Tx into XOFF state. */
11166 fp_stat->eth_q_stats.driver_xoff++;
11167 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
11168 netif_tx_wake_queue(txq);
11172 return NETDEV_TX_OK;
11175 /* called with rtnl_lock */
11176 static int bnx2x_open(struct net_device *dev)
11178 struct bnx2x *bp = netdev_priv(dev);
11180 netif_carrier_off(dev);
11182 bnx2x_set_power_state(bp, PCI_D0);
11184 return bnx2x_nic_load(bp, LOAD_OPEN);
11187 /* called with rtnl_lock */
11188 static int bnx2x_close(struct net_device *dev)
11190 struct bnx2x *bp = netdev_priv(dev);
11192 /* Unload the driver, release IRQs */
11193 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11194 if (atomic_read(&bp->pdev->enable_cnt) == 1)
11195 if (!CHIP_REV_IS_SLOW(bp))
11196 bnx2x_set_power_state(bp, PCI_D3hot);
11201 /* called with netif_tx_lock from dev_mcast.c */
11202 static void bnx2x_set_rx_mode(struct net_device *dev)
11204 struct bnx2x *bp = netdev_priv(dev);
11205 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11206 int port = BP_PORT(bp);
11208 if (bp->state != BNX2X_STATE_OPEN) {
11209 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11213 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11215 if (dev->flags & IFF_PROMISC)
11216 rx_mode = BNX2X_RX_MODE_PROMISC;
11218 else if ((dev->flags & IFF_ALLMULTI) ||
11219 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11220 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11222 else { /* some multicasts */
11223 if (CHIP_IS_E1(bp)) {
11224 int i, old, offset;
11225 struct dev_mc_list *mclist;
11226 struct mac_configuration_cmd *config =
11227 bnx2x_sp(bp, mcast_config);
11229 for (i = 0, mclist = dev->mc_list;
11230 mclist && (i < dev->mc_count);
11231 i++, mclist = mclist->next) {
11233 config->config_table[i].
11234 cam_entry.msb_mac_addr =
11235 swab16(*(u16 *)&mclist->dmi_addr[0]);
11236 config->config_table[i].
11237 cam_entry.middle_mac_addr =
11238 swab16(*(u16 *)&mclist->dmi_addr[2]);
11239 config->config_table[i].
11240 cam_entry.lsb_mac_addr =
11241 swab16(*(u16 *)&mclist->dmi_addr[4]);
11242 config->config_table[i].cam_entry.flags =
11244 config->config_table[i].
11245 target_table_entry.flags = 0;
11246 config->config_table[i].target_table_entry.
11247 clients_bit_vector =
11248 cpu_to_le32(1 << BP_L_ID(bp));
11249 config->config_table[i].
11250 target_table_entry.vlan_id = 0;
11253 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11254 config->config_table[i].
11255 cam_entry.msb_mac_addr,
11256 config->config_table[i].
11257 cam_entry.middle_mac_addr,
11258 config->config_table[i].
11259 cam_entry.lsb_mac_addr);
11261 old = config->hdr.length;
11263 for (; i < old; i++) {
11264 if (CAM_IS_INVALID(config->
11265 config_table[i])) {
11266 /* already invalidated */
11270 CAM_INVALIDATE(config->
11275 if (CHIP_REV_IS_SLOW(bp))
11276 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11278 offset = BNX2X_MAX_MULTICAST*(1 + port);
11280 config->hdr.length = i;
11281 config->hdr.offset = offset;
11282 config->hdr.client_id = bp->fp->cl_id;
11283 config->hdr.reserved1 = 0;
11285 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11286 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11287 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11290 /* Accept one or more multicasts */
11291 struct dev_mc_list *mclist;
11292 u32 mc_filter[MC_HASH_SIZE];
11293 u32 crc, bit, regidx;
11296 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11298 for (i = 0, mclist = dev->mc_list;
11299 mclist && (i < dev->mc_count);
11300 i++, mclist = mclist->next) {
11302 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11305 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11306 bit = (crc >> 24) & 0xff;
11309 mc_filter[regidx] |= (1 << bit);
11312 for (i = 0; i < MC_HASH_SIZE; i++)
11313 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11318 bp->rx_mode = rx_mode;
11319 bnx2x_set_storm_rx_mode(bp);
11322 /* called with rtnl_lock */
11323 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11325 struct sockaddr *addr = p;
11326 struct bnx2x *bp = netdev_priv(dev);
11328 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
11331 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
11332 if (netif_running(dev)) {
11333 if (CHIP_IS_E1(bp))
11334 bnx2x_set_mac_addr_e1(bp, 1);
11336 bnx2x_set_mac_addr_e1h(bp, 1);
11342 /* called with rtnl_lock */
11343 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11344 int devad, u16 addr)
11346 struct bnx2x *bp = netdev_priv(netdev);
11349 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11351 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11352 prtad, devad, addr);
11354 if (prtad != bp->mdio.prtad) {
11355 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11356 prtad, bp->mdio.prtad);
11360 /* The HW expects different devad if CL22 is used */
11361 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11363 bnx2x_acquire_phy_lock(bp);
11364 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11365 devad, addr, &value);
11366 bnx2x_release_phy_lock(bp);
11367 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
11374 /* called with rtnl_lock */
11375 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11376 u16 addr, u16 value)
11378 struct bnx2x *bp = netdev_priv(netdev);
11379 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11382 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11383 " value 0x%x\n", prtad, devad, addr, value);
11385 if (prtad != bp->mdio.prtad) {
11386 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11387 prtad, bp->mdio.prtad);
11391 /* The HW expects different devad if CL22 is used */
11392 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11394 bnx2x_acquire_phy_lock(bp);
11395 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11396 devad, addr, value);
11397 bnx2x_release_phy_lock(bp);
11401 /* called with rtnl_lock */
11402 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11404 struct bnx2x *bp = netdev_priv(dev);
11405 struct mii_ioctl_data *mdio = if_mii(ifr);
11407 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11408 mdio->phy_id, mdio->reg_num, mdio->val_in);
11410 if (!netif_running(dev))
11413 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
11416 /* called with rtnl_lock */
11417 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11419 struct bnx2x *bp = netdev_priv(dev);
11422 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11423 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11426 /* This does not race with packet allocation
11427 * because the actual alloc size is
11428 * only updated as part of load
11430 dev->mtu = new_mtu;
11432 if (netif_running(dev)) {
11433 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11434 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11440 static void bnx2x_tx_timeout(struct net_device *dev)
11442 struct bnx2x *bp = netdev_priv(dev);
11444 #ifdef BNX2X_STOP_ON_ERROR
11448 /* This allows the netif to be shutdown gracefully before resetting */
11449 schedule_work(&bp->reset_task);
11453 /* called with rtnl_lock */
11454 static void bnx2x_vlan_rx_register(struct net_device *dev,
11455 struct vlan_group *vlgrp)
11457 struct bnx2x *bp = netdev_priv(dev);
11461 /* Set flags according to the required capabilities */
11462 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11464 if (dev->features & NETIF_F_HW_VLAN_TX)
11465 bp->flags |= HW_VLAN_TX_FLAG;
11467 if (dev->features & NETIF_F_HW_VLAN_RX)
11468 bp->flags |= HW_VLAN_RX_FLAG;
11470 if (netif_running(dev))
11471 bnx2x_set_client_config(bp);
11476 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11477 static void poll_bnx2x(struct net_device *dev)
11479 struct bnx2x *bp = netdev_priv(dev);
11481 disable_irq(bp->pdev->irq);
11482 bnx2x_interrupt(bp->pdev->irq, dev);
11483 enable_irq(bp->pdev->irq);
11487 static const struct net_device_ops bnx2x_netdev_ops = {
11488 .ndo_open = bnx2x_open,
11489 .ndo_stop = bnx2x_close,
11490 .ndo_start_xmit = bnx2x_start_xmit,
11491 .ndo_set_multicast_list = bnx2x_set_rx_mode,
11492 .ndo_set_mac_address = bnx2x_change_mac_addr,
11493 .ndo_validate_addr = eth_validate_addr,
11494 .ndo_do_ioctl = bnx2x_ioctl,
11495 .ndo_change_mtu = bnx2x_change_mtu,
11496 .ndo_tx_timeout = bnx2x_tx_timeout,
11498 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11500 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11501 .ndo_poll_controller = poll_bnx2x,
11505 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11506 struct net_device *dev)
11511 SET_NETDEV_DEV(dev, &pdev->dev);
11512 bp = netdev_priv(dev);
11517 bp->func = PCI_FUNC(pdev->devfn);
11519 rc = pci_enable_device(pdev);
11521 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11525 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11526 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11529 goto err_out_disable;
11532 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11533 printk(KERN_ERR PFX "Cannot find second PCI device"
11534 " base address, aborting\n");
11536 goto err_out_disable;
11539 if (atomic_read(&pdev->enable_cnt) == 1) {
11540 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11542 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11544 goto err_out_disable;
11547 pci_set_master(pdev);
11548 pci_save_state(pdev);
11551 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11552 if (bp->pm_cap == 0) {
11553 printk(KERN_ERR PFX "Cannot find power management"
11554 " capability, aborting\n");
11556 goto err_out_release;
11559 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11560 if (bp->pcie_cap == 0) {
11561 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11564 goto err_out_release;
11567 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
11568 bp->flags |= USING_DAC_FLAG;
11569 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
11570 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11571 " failed, aborting\n");
11573 goto err_out_release;
11576 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
11577 printk(KERN_ERR PFX "System does not support DMA,"
11580 goto err_out_release;
11583 dev->mem_start = pci_resource_start(pdev, 0);
11584 dev->base_addr = dev->mem_start;
11585 dev->mem_end = pci_resource_end(pdev, 0);
11587 dev->irq = pdev->irq;
11589 bp->regview = pci_ioremap_bar(pdev, 0);
11590 if (!bp->regview) {
11591 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11593 goto err_out_release;
11596 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11597 min_t(u64, BNX2X_DB_SIZE,
11598 pci_resource_len(pdev, 2)));
11599 if (!bp->doorbells) {
11600 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11602 goto err_out_unmap;
11605 bnx2x_set_power_state(bp, PCI_D0);
11607 /* clean indirect addresses */
11608 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11609 PCICFG_VENDOR_ID_OFFSET);
11610 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11611 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11612 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11613 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11615 dev->watchdog_timeo = TX_TIMEOUT;
11617 dev->netdev_ops = &bnx2x_netdev_ops;
11618 dev->ethtool_ops = &bnx2x_ethtool_ops;
11619 dev->features |= NETIF_F_SG;
11620 dev->features |= NETIF_F_HW_CSUM;
11621 if (bp->flags & USING_DAC_FLAG)
11622 dev->features |= NETIF_F_HIGHDMA;
11623 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11624 dev->features |= NETIF_F_TSO6;
11626 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
11627 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11629 dev->vlan_features |= NETIF_F_SG;
11630 dev->vlan_features |= NETIF_F_HW_CSUM;
11631 if (bp->flags & USING_DAC_FLAG)
11632 dev->vlan_features |= NETIF_F_HIGHDMA;
11633 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11634 dev->vlan_features |= NETIF_F_TSO6;
11637 /* get_port_hwinfo() will set prtad and mmds properly */
11638 bp->mdio.prtad = MDIO_PRTAD_NONE;
11640 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11641 bp->mdio.dev = dev;
11642 bp->mdio.mdio_read = bnx2x_mdio_read;
11643 bp->mdio.mdio_write = bnx2x_mdio_write;
11649 iounmap(bp->regview);
11650 bp->regview = NULL;
11652 if (bp->doorbells) {
11653 iounmap(bp->doorbells);
11654 bp->doorbells = NULL;
11658 if (atomic_read(&pdev->enable_cnt) == 1)
11659 pci_release_regions(pdev);
11662 pci_disable_device(pdev);
11663 pci_set_drvdata(pdev, NULL);
11669 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11670 int *width, int *speed)
11672 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11674 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11676 /* return value of 1=2.5GHz 2=5GHz */
11677 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11680 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11682 const struct firmware *firmware = bp->firmware;
11683 struct bnx2x_fw_file_hdr *fw_hdr;
11684 struct bnx2x_fw_file_section *sections;
11685 u32 offset, len, num_ops;
11690 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11693 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11694 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11696 /* Make sure none of the offsets and sizes make us read beyond
11697 * the end of the firmware data */
11698 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11699 offset = be32_to_cpu(sections[i].offset);
11700 len = be32_to_cpu(sections[i].len);
11701 if (offset + len > firmware->size) {
11702 printk(KERN_ERR PFX "Section %d length is out of "
11708 /* Likewise for the init_ops offsets */
11709 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11710 ops_offsets = (u16 *)(firmware->data + offset);
11711 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11713 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11714 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11715 printk(KERN_ERR PFX "Section offset %d is out of "
11721 /* Check FW version */
11722 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11723 fw_ver = firmware->data + offset;
11724 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11725 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11726 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11727 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11728 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11729 " Should be %d.%d.%d.%d\n",
11730 fw_ver[0], fw_ver[1], fw_ver[2],
11731 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11732 BCM_5710_FW_MINOR_VERSION,
11733 BCM_5710_FW_REVISION_VERSION,
11734 BCM_5710_FW_ENGINEERING_VERSION);
11741 static void inline be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11744 const __be32 *source = (const __be32*)_source;
11745 u32 *target = (u32*)_target;
11747 for (i = 0; i < n/4; i++)
11748 target[i] = be32_to_cpu(source[i]);
11752 Ops array is stored in the following format:
11753 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11755 static void inline bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11758 const __be32 *source = (const __be32*)_source;
11759 struct raw_op *target = (struct raw_op*)_target;
11761 for (i = 0, j = 0; i < n/8; i++, j+=2) {
11762 tmp = be32_to_cpu(source[j]);
11763 target[i].op = (tmp >> 24) & 0xff;
11764 target[i].offset = tmp & 0xffffff;
11765 target[i].raw_data = be32_to_cpu(source[j+1]);
11768 static void inline be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11771 u16 *target = (u16*)_target;
11772 const __be16 *source = (const __be16*)_source;
11774 for (i = 0; i < n/2; i++)
11775 target[i] = be16_to_cpu(source[i]);
11778 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11780 u32 len = be32_to_cpu(fw_hdr->arr.len); \
11781 bp->arr = kmalloc(len, GFP_KERNEL); \
11783 printk(KERN_ERR PFX "Failed to allocate %d bytes for "#arr"\n", len); \
11786 func(bp->firmware->data + \
11787 be32_to_cpu(fw_hdr->arr.offset), \
11788 (u8*)bp->arr, len); \
11792 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11794 char fw_file_name[40] = {0};
11796 struct bnx2x_fw_file_hdr *fw_hdr;
11798 /* Create a FW file name */
11799 if (CHIP_IS_E1(bp))
11800 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
11802 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
11804 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11805 BCM_5710_FW_MAJOR_VERSION,
11806 BCM_5710_FW_MINOR_VERSION,
11807 BCM_5710_FW_REVISION_VERSION,
11808 BCM_5710_FW_ENGINEERING_VERSION);
11810 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11812 rc = request_firmware(&bp->firmware, fw_file_name, dev);
11814 printk(KERN_ERR PFX "Can't load firmware file %s\n", fw_file_name);
11815 goto request_firmware_exit;
11818 rc = bnx2x_check_firmware(bp);
11820 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
11821 goto request_firmware_exit;
11824 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11826 /* Initialize the pointers to the init arrays */
11828 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11831 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11834 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, be16_to_cpu_n);
11836 /* STORMs firmware */
11837 bp->tsem_int_table_data = bp->firmware->data +
11838 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11839 bp->tsem_pram_data = bp->firmware->data +
11840 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11841 bp->usem_int_table_data = bp->firmware->data +
11842 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11843 bp->usem_pram_data = bp->firmware->data +
11844 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11845 bp->xsem_int_table_data = bp->firmware->data +
11846 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11847 bp->xsem_pram_data = bp->firmware->data +
11848 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11849 bp->csem_int_table_data = bp->firmware->data +
11850 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11851 bp->csem_pram_data = bp->firmware->data +
11852 be32_to_cpu(fw_hdr->csem_pram_data.offset);
11855 init_offsets_alloc_err:
11856 kfree(bp->init_ops);
11857 init_ops_alloc_err:
11858 kfree(bp->init_data);
11859 request_firmware_exit:
11860 release_firmware(bp->firmware);
11867 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11868 const struct pci_device_id *ent)
11870 struct net_device *dev = NULL;
11872 int pcie_width, pcie_speed;
11875 /* dev zeroed in init_etherdev */
11876 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
11878 printk(KERN_ERR PFX "Cannot allocate net device\n");
11882 bp = netdev_priv(dev);
11883 bp->msglevel = debug;
11885 pci_set_drvdata(pdev, dev);
11887 rc = bnx2x_init_dev(pdev, dev);
11893 rc = bnx2x_init_bp(bp);
11895 goto init_one_exit;
11897 /* Set init arrays */
11898 rc = bnx2x_init_firmware(bp, &pdev->dev);
11900 printk(KERN_ERR PFX "Error loading firmware\n");
11901 goto init_one_exit;
11904 rc = register_netdev(dev);
11906 dev_err(&pdev->dev, "Cannot register net device\n");
11907 goto init_one_exit;
11910 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
11911 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
11912 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
11913 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
11914 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
11915 dev->base_addr, bp->pdev->irq);
11916 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
11922 iounmap(bp->regview);
11925 iounmap(bp->doorbells);
11929 if (atomic_read(&pdev->enable_cnt) == 1)
11930 pci_release_regions(pdev);
11932 pci_disable_device(pdev);
11933 pci_set_drvdata(pdev, NULL);
11938 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11940 struct net_device *dev = pci_get_drvdata(pdev);
11944 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11947 bp = netdev_priv(dev);
11949 unregister_netdev(dev);
11951 kfree(bp->init_ops_offsets);
11952 kfree(bp->init_ops);
11953 kfree(bp->init_data);
11954 release_firmware(bp->firmware);
11957 iounmap(bp->regview);
11960 iounmap(bp->doorbells);
11964 if (atomic_read(&pdev->enable_cnt) == 1)
11965 pci_release_regions(pdev);
11967 pci_disable_device(pdev);
11968 pci_set_drvdata(pdev, NULL);
11971 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11973 struct net_device *dev = pci_get_drvdata(pdev);
11977 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11980 bp = netdev_priv(dev);
11984 pci_save_state(pdev);
11986 if (!netif_running(dev)) {
11991 netif_device_detach(dev);
11993 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11995 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
12002 static int bnx2x_resume(struct pci_dev *pdev)
12004 struct net_device *dev = pci_get_drvdata(pdev);
12009 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12012 bp = netdev_priv(dev);
12016 pci_restore_state(pdev);
12018 if (!netif_running(dev)) {
12023 bnx2x_set_power_state(bp, PCI_D0);
12024 netif_device_attach(dev);
12026 rc = bnx2x_nic_load(bp, LOAD_OPEN);
12033 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12037 bp->state = BNX2X_STATE_ERROR;
12039 bp->rx_mode = BNX2X_RX_MODE_NONE;
12041 bnx2x_netif_stop(bp, 0);
12043 del_timer_sync(&bp->timer);
12044 bp->stats_state = STATS_STATE_DISABLED;
12045 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12048 bnx2x_free_irq(bp);
12050 if (CHIP_IS_E1(bp)) {
12051 struct mac_configuration_cmd *config =
12052 bnx2x_sp(bp, mcast_config);
12054 for (i = 0; i < config->hdr.length; i++)
12055 CAM_INVALIDATE(config->config_table[i]);
12058 /* Free SKBs, SGEs, TPA pool and driver internals */
12059 bnx2x_free_skbs(bp);
12060 for_each_rx_queue(bp, i)
12061 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
12062 for_each_rx_queue(bp, i)
12063 netif_napi_del(&bnx2x_fp(bp, i, napi));
12064 bnx2x_free_mem(bp);
12066 bp->state = BNX2X_STATE_CLOSED;
12068 netif_carrier_off(bp->dev);
12073 static void bnx2x_eeh_recover(struct bnx2x *bp)
12077 mutex_init(&bp->port.phy_mutex);
12079 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12080 bp->link_params.shmem_base = bp->common.shmem_base;
12081 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12083 if (!bp->common.shmem_base ||
12084 (bp->common.shmem_base < 0xA0000) ||
12085 (bp->common.shmem_base >= 0xC0000)) {
12086 BNX2X_DEV_INFO("MCP not active\n");
12087 bp->flags |= NO_MCP_FLAG;
12091 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12092 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12093 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12094 BNX2X_ERR("BAD MCP validity signature\n");
12096 if (!BP_NOMCP(bp)) {
12097 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12098 & DRV_MSG_SEQ_NUMBER_MASK);
12099 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12104 * bnx2x_io_error_detected - called when PCI error is detected
12105 * @pdev: Pointer to PCI device
12106 * @state: The current pci connection state
12108 * This function is called after a PCI bus error affecting
12109 * this device has been detected.
12111 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12112 pci_channel_state_t state)
12114 struct net_device *dev = pci_get_drvdata(pdev);
12115 struct bnx2x *bp = netdev_priv(dev);
12119 netif_device_detach(dev);
12121 if (state == pci_channel_io_perm_failure) {
12123 return PCI_ERS_RESULT_DISCONNECT;
12126 if (netif_running(dev))
12127 bnx2x_eeh_nic_unload(bp);
12129 pci_disable_device(pdev);
12133 /* Request a slot reset */
12134 return PCI_ERS_RESULT_NEED_RESET;
12138 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12139 * @pdev: Pointer to PCI device
12141 * Restart the card from scratch, as if from a cold-boot.
12143 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12145 struct net_device *dev = pci_get_drvdata(pdev);
12146 struct bnx2x *bp = netdev_priv(dev);
12150 if (pci_enable_device(pdev)) {
12151 dev_err(&pdev->dev,
12152 "Cannot re-enable PCI device after reset\n");
12154 return PCI_ERS_RESULT_DISCONNECT;
12157 pci_set_master(pdev);
12158 pci_restore_state(pdev);
12160 if (netif_running(dev))
12161 bnx2x_set_power_state(bp, PCI_D0);
12165 return PCI_ERS_RESULT_RECOVERED;
12169 * bnx2x_io_resume - called when traffic can start flowing again
12170 * @pdev: Pointer to PCI device
12172 * This callback is called when the error recovery driver tells us that
12173 * its OK to resume normal operation.
12175 static void bnx2x_io_resume(struct pci_dev *pdev)
12177 struct net_device *dev = pci_get_drvdata(pdev);
12178 struct bnx2x *bp = netdev_priv(dev);
12182 bnx2x_eeh_recover(bp);
12184 if (netif_running(dev))
12185 bnx2x_nic_load(bp, LOAD_NORMAL);
12187 netif_device_attach(dev);
12192 static struct pci_error_handlers bnx2x_err_handler = {
12193 .error_detected = bnx2x_io_error_detected,
12194 .slot_reset = bnx2x_io_slot_reset,
12195 .resume = bnx2x_io_resume,
12198 static struct pci_driver bnx2x_pci_driver = {
12199 .name = DRV_MODULE_NAME,
12200 .id_table = bnx2x_pci_tbl,
12201 .probe = bnx2x_init_one,
12202 .remove = __devexit_p(bnx2x_remove_one),
12203 .suspend = bnx2x_suspend,
12204 .resume = bnx2x_resume,
12205 .err_handler = &bnx2x_err_handler,
12208 static int __init bnx2x_init(void)
12212 printk(KERN_INFO "%s", version);
12214 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12215 if (bnx2x_wq == NULL) {
12216 printk(KERN_ERR PFX "Cannot create workqueue\n");
12220 ret = pci_register_driver(&bnx2x_pci_driver);
12222 printk(KERN_ERR PFX "Cannot register driver\n");
12223 destroy_workqueue(bnx2x_wq);
12228 static void __exit bnx2x_cleanup(void)
12230 pci_unregister_driver(&bnx2x_pci_driver);
12232 destroy_workqueue(bnx2x_wq);
12235 module_init(bnx2x_init);
12236 module_exit(bnx2x_cleanup);